0021-target-riscv-rvv-1.0-add-VMA-and-VTA.patch 153 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290
  1. From 2258cdd072fa32063a7a66fef68808cd597afb9d Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Thu, 30 Jul 2020 20:42:19 +0800
  4. Subject: [PATCH 021/107] target/riscv: rvv-1.0: add VMA and VTA
  5. Introduce vma and vta fields in vtype register.
  6. According to RVV 1.0 spec (section 3.3.3):
  7. When a set is marked agnostic, the corresponding set of destination
  8. elements in any vector or mask destination operand can either retain
  9. the value they previously held, or are overwritten with 1s.
  10. So, either vta/vma is set to undisturbed or agnostic, it's legal to
  11. retain the inactive masked-off elements and tail elements' original
  12. values unchanged. Therefore, besides declaring vta/vma fields in vtype
  13. register, also remove all the tail elements clean functions in this
  14. commit.
  15. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  16. Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
  17. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  18. ---
  19. target/riscv/cpu.h | 2 +
  20. target/riscv/vector_helper.c | 1927 ++++++++++++++++------------------
  21. 2 files changed, 891 insertions(+), 1038 deletions(-)
  22. diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
  23. index 33cb265304..3835d22ca1 100644
  24. --- a/target/riscv/cpu.h
  25. +++ b/target/riscv/cpu.h
  26. @@ -107,6 +107,8 @@ typedef struct CPURISCVState CPURISCVState;
  27. FIELD(VTYPE, VLMUL, 0, 3)
  28. FIELD(VTYPE, VSEW, 3, 3)
  29. +FIELD(VTYPE, VTA, 6, 1)
  30. +FIELD(VTYPE, VMA, 7, 1)
  31. FIELD(VTYPE, VEDIV, 8, 2)
  32. FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
  33. FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
  34. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  35. index f28d20a97d..5a142a1f4b 100644
  36. --- a/target/riscv/vector_helper.c
  37. +++ b/target/riscv/vector_helper.c
  38. @@ -146,55 +146,6 @@ static void probe_pages(CPURISCVState *env, target_ulong addr,
  39. }
  40. }
  41. -#ifdef HOST_WORDS_BIGENDIAN
  42. -static void vext_clear(void *tail, uint32_t cnt, uint32_t tot)
  43. -{
  44. - /*
  45. - * Split the remaining range to two parts.
  46. - * The first part is in the last uint64_t unit.
  47. - * The second part start from the next uint64_t unit.
  48. - */
  49. - int part1 = 0, part2 = tot - cnt;
  50. - if (cnt % 8) {
  51. - part1 = 8 - (cnt % 8);
  52. - part2 = tot - cnt - part1;
  53. - memset(QEMU_ALIGN_PTR_DOWN(tail, 8), 0, part1);
  54. - memset(QEMU_ALIGN_PTR_UP(tail, 8), 0, part2);
  55. - } else {
  56. - memset(tail, 0, part2);
  57. - }
  58. -}
  59. -#else
  60. -static void vext_clear(void *tail, uint32_t cnt, uint32_t tot)
  61. -{
  62. - memset(tail, 0, tot - cnt);
  63. -}
  64. -#endif
  65. -
  66. -static void clearb(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
  67. -{
  68. - int8_t *cur = ((int8_t *)vd + H1(idx));
  69. - vext_clear(cur, cnt, tot);
  70. -}
  71. -
  72. -static void clearh(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
  73. -{
  74. - int16_t *cur = ((int16_t *)vd + H2(idx));
  75. - vext_clear(cur, cnt, tot);
  76. -}
  77. -
  78. -static void clearl(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
  79. -{
  80. - int32_t *cur = ((int32_t *)vd + H4(idx));
  81. - vext_clear(cur, cnt, tot);
  82. -}
  83. -
  84. -static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
  85. -{
  86. - int64_t *cur = (int64_t *)vd + idx;
  87. - vext_clear(cur, cnt, tot);
  88. -}
  89. -
  90. static inline void vext_set_elem_mask(void *v0, int index,
  91. uint8_t value)
  92. {
  93. @@ -219,7 +170,6 @@ static inline int vext_elem_mask(void *v0, int index)
  94. /* elements operations for load and store */
  95. typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr,
  96. uint32_t idx, void *vd, uintptr_t retaddr);
  97. -typedef void clear_fn(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot);
  98. #define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \
  99. static void NAME(CPURISCVState *env, abi_ptr addr, \
  100. @@ -283,7 +233,7 @@ static void
  101. vext_ldst_stride(void *vd, void *v0, target_ulong base,
  102. target_ulong stride, CPURISCVState *env,
  103. uint32_t desc, uint32_t vm,
  104. - vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem,
  105. + vext_ldst_elem_fn *ldst_elem,
  106. uint32_t esz, uint32_t msz, uintptr_t ra,
  107. MMUAccessType access_type)
  108. {
  109. @@ -310,47 +260,41 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  110. k++;
  111. }
  112. }
  113. - /* clear tail elements */
  114. - if (clear_elem) {
  115. - for (k = 0; k < nf; k++) {
  116. - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
  117. - }
  118. - }
  119. }
  120. -#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
  121. +#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN) \
  122. void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
  123. target_ulong stride, CPURISCVState *env, \
  124. uint32_t desc) \
  125. { \
  126. uint32_t vm = vext_vm(desc); \
  127. vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
  128. - CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
  129. + sizeof(ETYPE), sizeof(MTYPE), \
  130. GETPC(), MMU_DATA_LOAD); \
  131. }
  132. -GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b, clearb)
  133. -GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h, clearh)
  134. -GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w, clearl)
  135. -GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d, clearq)
  136. -GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h, clearh)
  137. -GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w, clearl)
  138. -GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d, clearq)
  139. -GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w, clearl)
  140. -GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d, clearq)
  141. -GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b, clearb)
  142. -GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h, clearh)
  143. -GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w, clearl)
  144. -GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d, clearq)
  145. -GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b, clearb)
  146. -GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h, clearh)
  147. -GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w, clearl)
  148. -GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d, clearq)
  149. -GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h, clearh)
  150. -GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w, clearl)
  151. -GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d, clearq)
  152. -GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w, clearl)
  153. -GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d, clearq)
  154. +GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b)
  155. +GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h)
  156. +GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w)
  157. +GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d)
  158. +GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h)
  159. +GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w)
  160. +GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d)
  161. +GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w)
  162. +GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d)
  163. +GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b)
  164. +GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h)
  165. +GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w)
  166. +GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d)
  167. +GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b)
  168. +GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h)
  169. +GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w)
  170. +GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d)
  171. +GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h)
  172. +GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w)
  173. +GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d)
  174. +GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w)
  175. +GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d)
  176. #define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \
  177. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  178. @@ -359,7 +303,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  179. { \
  180. uint32_t vm = vext_vm(desc); \
  181. vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
  182. - NULL, sizeof(ETYPE), sizeof(MTYPE), \
  183. + sizeof(ETYPE), sizeof(MTYPE), \
  184. GETPC(), MMU_DATA_STORE); \
  185. }
  186. @@ -384,9 +328,8 @@ GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d)
  187. /* unmasked unit-stride load and store operation*/
  188. static void
  189. vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  190. - vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem,
  191. - uint32_t esz, uint32_t msz, uintptr_t ra,
  192. - MMUAccessType access_type)
  193. + vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t msz,
  194. + uintptr_t ra, MMUAccessType access_type)
  195. {
  196. uint32_t i, k;
  197. uint32_t nf = vext_nf(desc);
  198. @@ -403,12 +346,6 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  199. k++;
  200. }
  201. }
  202. - /* clear tail elements */
  203. - if (clear_elem) {
  204. - for (k = 0; k < nf; k++) {
  205. - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
  206. - }
  207. - }
  208. }
  209. /*
  210. @@ -416,45 +353,45 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  211. * stride = NF * sizeof (MTYPE)
  212. */
  213. -#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
  214. +#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN) \
  215. void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  216. CPURISCVState *env, uint32_t desc) \
  217. { \
  218. uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
  219. vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
  220. - CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
  221. + sizeof(ETYPE), sizeof(MTYPE), \
  222. GETPC(), MMU_DATA_LOAD); \
  223. } \
  224. \
  225. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  226. CPURISCVState *env, uint32_t desc) \
  227. { \
  228. - vext_ldst_us(vd, base, env, desc, LOAD_FN, CLEAR_FN, \
  229. + vext_ldst_us(vd, base, env, desc, LOAD_FN, \
  230. sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \
  231. }
  232. -GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b, clearb)
  233. -GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h, clearh)
  234. -GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w, clearl)
  235. -GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d, clearq)
  236. -GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h, clearh)
  237. -GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w, clearl)
  238. -GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d, clearq)
  239. -GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w, clearl)
  240. -GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d, clearq)
  241. -GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b, clearb)
  242. -GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h, clearh)
  243. -GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w, clearl)
  244. -GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d, clearq)
  245. -GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b, clearb)
  246. -GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h, clearh)
  247. -GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w, clearl)
  248. -GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d, clearq)
  249. -GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h, clearh)
  250. -GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w, clearl)
  251. -GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d, clearq)
  252. -GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w, clearl)
  253. -GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d, clearq)
  254. +GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b)
  255. +GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h)
  256. +GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w)
  257. +GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d)
  258. +GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h)
  259. +GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w)
  260. +GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d)
  261. +GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w)
  262. +GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d)
  263. +GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b)
  264. +GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h)
  265. +GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w)
  266. +GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d)
  267. +GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b)
  268. +GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h)
  269. +GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w)
  270. +GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d)
  271. +GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h)
  272. +GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w)
  273. +GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d)
  274. +GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w)
  275. +GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d)
  276. #define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \
  277. void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  278. @@ -462,14 +399,14 @@ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  279. { \
  280. uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
  281. vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
  282. - NULL, sizeof(ETYPE), sizeof(MTYPE), \
  283. + sizeof(ETYPE), sizeof(MTYPE), \
  284. GETPC(), MMU_DATA_STORE); \
  285. } \
  286. \
  287. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  288. CPURISCVState *env, uint32_t desc) \
  289. { \
  290. - vext_ldst_us(vd, base, env, desc, STORE_FN, NULL, \
  291. + vext_ldst_us(vd, base, env, desc, STORE_FN, \
  292. sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\
  293. }
  294. @@ -510,7 +447,6 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  295. void *vs2, CPURISCVState *env, uint32_t desc,
  296. vext_get_index_addr get_index_addr,
  297. vext_ldst_elem_fn *ldst_elem,
  298. - clear_fn *clear_elem,
  299. uint32_t esz, uint32_t msz, uintptr_t ra,
  300. MMUAccessType access_type)
  301. {
  302. @@ -539,52 +475,46 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  303. k++;
  304. }
  305. }
  306. - /* clear tail elements */
  307. - if (clear_elem) {
  308. - for (k = 0; k < nf; k++) {
  309. - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
  310. - }
  311. - }
  312. }
  313. -#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN, CLEAR_FN) \
  314. +#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN) \
  315. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  316. void *vs2, CPURISCVState *env, uint32_t desc) \
  317. { \
  318. vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
  319. - LOAD_FN, CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
  320. + LOAD_FN, sizeof(ETYPE), sizeof(MTYPE), \
  321. GETPC(), MMU_DATA_LOAD); \
  322. }
  323. -GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b, clearb)
  324. -GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h, clearh)
  325. -GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w, clearl)
  326. -GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d, clearq)
  327. -GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h, clearh)
  328. -GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w, clearl)
  329. -GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d, clearq)
  330. -GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w, clearl)
  331. -GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d, clearq)
  332. -GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b, clearb)
  333. -GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h, clearh)
  334. -GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w, clearl)
  335. -GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d, clearq)
  336. -GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b, clearb)
  337. -GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h, clearh)
  338. -GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w, clearl)
  339. -GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d, clearq)
  340. -GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h, clearh)
  341. -GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w, clearl)
  342. -GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d, clearq)
  343. -GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w, clearl)
  344. -GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d, clearq)
  345. +GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b)
  346. +GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h)
  347. +GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w)
  348. +GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d)
  349. +GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h)
  350. +GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w)
  351. +GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d)
  352. +GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w)
  353. +GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d)
  354. +GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b)
  355. +GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h)
  356. +GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w)
  357. +GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d)
  358. +GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b)
  359. +GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h)
  360. +GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w)
  361. +GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d)
  362. +GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h)
  363. +GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w)
  364. +GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d)
  365. +GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w)
  366. +GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d)
  367. #define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\
  368. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  369. void *vs2, CPURISCVState *env, uint32_t desc) \
  370. { \
  371. vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
  372. - STORE_FN, NULL, sizeof(ETYPE), sizeof(MTYPE),\
  373. + STORE_FN, sizeof(ETYPE), sizeof(MTYPE), \
  374. GETPC(), MMU_DATA_STORE); \
  375. }
  376. @@ -609,7 +539,6 @@ static inline void
  377. vext_ldff(void *vd, void *v0, target_ulong base,
  378. CPURISCVState *env, uint32_t desc,
  379. vext_ldst_elem_fn *ldst_elem,
  380. - clear_fn *clear_elem,
  381. uint32_t esz, uint32_t msz, uintptr_t ra)
  382. {
  383. void *host;
  384. @@ -671,45 +600,38 @@ ProbeSuccess:
  385. k++;
  386. }
  387. }
  388. - /* clear tail elements */
  389. - if (vl != 0) {
  390. - return;
  391. - }
  392. - for (k = 0; k < nf; k++) {
  393. - clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
  394. - }
  395. }
  396. -#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
  397. +#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN) \
  398. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  399. CPURISCVState *env, uint32_t desc) \
  400. { \
  401. - vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \
  402. + vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
  403. sizeof(ETYPE), sizeof(MTYPE), GETPC()); \
  404. }
  405. -GEN_VEXT_LDFF(vlbff_v_b, int8_t, int8_t, ldb_b, clearb)
  406. -GEN_VEXT_LDFF(vlbff_v_h, int8_t, int16_t, ldb_h, clearh)
  407. -GEN_VEXT_LDFF(vlbff_v_w, int8_t, int32_t, ldb_w, clearl)
  408. -GEN_VEXT_LDFF(vlbff_v_d, int8_t, int64_t, ldb_d, clearq)
  409. -GEN_VEXT_LDFF(vlhff_v_h, int16_t, int16_t, ldh_h, clearh)
  410. -GEN_VEXT_LDFF(vlhff_v_w, int16_t, int32_t, ldh_w, clearl)
  411. -GEN_VEXT_LDFF(vlhff_v_d, int16_t, int64_t, ldh_d, clearq)
  412. -GEN_VEXT_LDFF(vlwff_v_w, int32_t, int32_t, ldw_w, clearl)
  413. -GEN_VEXT_LDFF(vlwff_v_d, int32_t, int64_t, ldw_d, clearq)
  414. -GEN_VEXT_LDFF(vleff_v_b, int8_t, int8_t, lde_b, clearb)
  415. -GEN_VEXT_LDFF(vleff_v_h, int16_t, int16_t, lde_h, clearh)
  416. -GEN_VEXT_LDFF(vleff_v_w, int32_t, int32_t, lde_w, clearl)
  417. -GEN_VEXT_LDFF(vleff_v_d, int64_t, int64_t, lde_d, clearq)
  418. -GEN_VEXT_LDFF(vlbuff_v_b, uint8_t, uint8_t, ldbu_b, clearb)
  419. -GEN_VEXT_LDFF(vlbuff_v_h, uint8_t, uint16_t, ldbu_h, clearh)
  420. -GEN_VEXT_LDFF(vlbuff_v_w, uint8_t, uint32_t, ldbu_w, clearl)
  421. -GEN_VEXT_LDFF(vlbuff_v_d, uint8_t, uint64_t, ldbu_d, clearq)
  422. -GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h, clearh)
  423. -GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl)
  424. -GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq)
  425. -GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl)
  426. -GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq)
  427. +GEN_VEXT_LDFF(vlbff_v_b, int8_t, int8_t, ldb_b)
  428. +GEN_VEXT_LDFF(vlbff_v_h, int8_t, int16_t, ldb_h)
  429. +GEN_VEXT_LDFF(vlbff_v_w, int8_t, int32_t, ldb_w)
  430. +GEN_VEXT_LDFF(vlbff_v_d, int8_t, int64_t, ldb_d)
  431. +GEN_VEXT_LDFF(vlhff_v_h, int16_t, int16_t, ldh_h)
  432. +GEN_VEXT_LDFF(vlhff_v_w, int16_t, int32_t, ldh_w)
  433. +GEN_VEXT_LDFF(vlhff_v_d, int16_t, int64_t, ldh_d)
  434. +GEN_VEXT_LDFF(vlwff_v_w, int32_t, int32_t, ldw_w)
  435. +GEN_VEXT_LDFF(vlwff_v_d, int32_t, int64_t, ldw_d)
  436. +GEN_VEXT_LDFF(vleff_v_b, int8_t, int8_t, lde_b)
  437. +GEN_VEXT_LDFF(vleff_v_h, int16_t, int16_t, lde_h)
  438. +GEN_VEXT_LDFF(vleff_v_w, int32_t, int32_t, lde_w)
  439. +GEN_VEXT_LDFF(vleff_v_d, int64_t, int64_t, lde_d)
  440. +GEN_VEXT_LDFF(vlbuff_v_b, uint8_t, uint8_t, ldbu_b)
  441. +GEN_VEXT_LDFF(vlbuff_v_h, uint8_t, uint16_t, ldbu_h)
  442. +GEN_VEXT_LDFF(vlbuff_v_w, uint8_t, uint32_t, ldbu_w)
  443. +GEN_VEXT_LDFF(vlbuff_v_d, uint8_t, uint64_t, ldbu_d)
  444. +GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h)
  445. +GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w)
  446. +GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d)
  447. +GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w)
  448. +GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d)
  449. /*
  450. *** Vector AMO Operations (Zvamo)
  451. @@ -786,14 +708,12 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
  452. void *vs2, CPURISCVState *env, uint32_t desc,
  453. vext_get_index_addr get_index_addr,
  454. vext_amo_noatomic_fn *noatomic_op,
  455. - clear_fn *clear_elem,
  456. uint32_t esz, uint32_t msz, uintptr_t ra)
  457. {
  458. uint32_t i;
  459. target_long addr;
  460. uint32_t wd = vext_wd(desc);
  461. uint32_t vm = vext_vm(desc);
  462. - uint32_t vlmax = vext_maxsz(desc) / esz;
  463. for (i = 0; i < env->vl; i++) {
  464. if (!vm && !vext_elem_mask(v0, i)) {
  465. @@ -809,48 +729,47 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
  466. addr = get_index_addr(base, i, vs2);
  467. noatomic_op(vs3, addr, wd, i, env, ra);
  468. }
  469. - clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz);
  470. }
  471. -#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN) \
  472. +#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN) \
  473. void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \
  474. void *vs2, CPURISCVState *env, uint32_t desc) \
  475. { \
  476. vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \
  477. INDEX_FN, vext_##NAME##_noatomic_op, \
  478. - CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
  479. + sizeof(ETYPE), sizeof(MTYPE), \
  480. GETPC()); \
  481. }
  482. #ifdef TARGET_RISCV64
  483. -GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d, clearq)
  484. -GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d, clearq)
  485. -GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d, clearq)
  486. -GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d, clearq)
  487. -GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d, clearq)
  488. -GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d, clearq)
  489. -GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d, clearq)
  490. -GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d, clearq)
  491. -GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d, clearq)
  492. -GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d, clearq)
  493. -GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d, clearq)
  494. -GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d, clearq)
  495. -GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d, clearq)
  496. -GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d, clearq)
  497. -GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq)
  498. -GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq)
  499. -GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq)
  500. -GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq)
  501. +GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d)
  502. +GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d)
  503. +GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d)
  504. +GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d)
  505. +GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d)
  506. +GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d)
  507. +GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d)
  508. +GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d)
  509. +GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d)
  510. +GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d)
  511. +GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d)
  512. +GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d)
  513. +GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d)
  514. +GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d)
  515. +GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d)
  516. +GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d)
  517. +GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d)
  518. +GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d)
  519. #endif
  520. -GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w, clearl)
  521. -GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w, clearl)
  522. -GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w, clearl)
  523. -GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w, clearl)
  524. -GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w, clearl)
  525. -GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w, clearl)
  526. -GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w, clearl)
  527. -GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl)
  528. -GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl)
  529. +GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w)
  530. +GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w)
  531. +GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w)
  532. +GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w)
  533. +GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w)
  534. +GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w)
  535. +GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w)
  536. +GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w)
  537. +GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w)
  538. /*
  539. *** Vector Integer Arithmetic Instructions
  540. @@ -916,9 +835,8 @@ RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB)
  541. static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
  542. CPURISCVState *env, uint32_t desc,
  543. uint32_t esz, uint32_t dsz,
  544. - opivv2_fn *fn, clear_fn *clearfn)
  545. + opivv2_fn *fn)
  546. {
  547. - uint32_t vlmax = vext_maxsz(desc) / esz;
  548. uint32_t vm = vext_vm(desc);
  549. uint32_t vl = env->vl;
  550. uint32_t i;
  551. @@ -929,27 +847,26 @@ static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
  552. }
  553. fn(vd, vs1, vs2, i);
  554. }
  555. - clearfn(vd, vl, vl * dsz, vlmax * dsz);
  556. }
  557. /* generate the helpers for OPIVV */
  558. -#define GEN_VEXT_VV(NAME, ESZ, DSZ, CLEAR_FN) \
  559. +#define GEN_VEXT_VV(NAME, ESZ, DSZ) \
  560. void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  561. void *vs2, CPURISCVState *env, \
  562. uint32_t desc) \
  563. { \
  564. do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
  565. - do_##NAME, CLEAR_FN); \
  566. + do_##NAME); \
  567. }
  568. -GEN_VEXT_VV(vadd_vv_b, 1, 1, clearb)
  569. -GEN_VEXT_VV(vadd_vv_h, 2, 2, clearh)
  570. -GEN_VEXT_VV(vadd_vv_w, 4, 4, clearl)
  571. -GEN_VEXT_VV(vadd_vv_d, 8, 8, clearq)
  572. -GEN_VEXT_VV(vsub_vv_b, 1, 1, clearb)
  573. -GEN_VEXT_VV(vsub_vv_h, 2, 2, clearh)
  574. -GEN_VEXT_VV(vsub_vv_w, 4, 4, clearl)
  575. -GEN_VEXT_VV(vsub_vv_d, 8, 8, clearq)
  576. +GEN_VEXT_VV(vadd_vv_b, 1, 1)
  577. +GEN_VEXT_VV(vadd_vv_h, 2, 2)
  578. +GEN_VEXT_VV(vadd_vv_w, 4, 4)
  579. +GEN_VEXT_VV(vadd_vv_d, 8, 8)
  580. +GEN_VEXT_VV(vsub_vv_b, 1, 1)
  581. +GEN_VEXT_VV(vsub_vv_h, 2, 2)
  582. +GEN_VEXT_VV(vsub_vv_w, 4, 4)
  583. +GEN_VEXT_VV(vsub_vv_d, 8, 8)
  584. typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
  585. @@ -980,9 +897,8 @@ RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB)
  586. static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
  587. CPURISCVState *env, uint32_t desc,
  588. uint32_t esz, uint32_t dsz,
  589. - opivx2_fn fn, clear_fn *clearfn)
  590. + opivx2_fn fn)
  591. {
  592. - uint32_t vlmax = vext_maxsz(desc) / esz;
  593. uint32_t vm = vext_vm(desc);
  594. uint32_t vl = env->vl;
  595. uint32_t i;
  596. @@ -993,31 +909,30 @@ static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
  597. }
  598. fn(vd, s1, vs2, i);
  599. }
  600. - clearfn(vd, vl, vl * dsz, vlmax * dsz);
  601. }
  602. /* generate the helpers for OPIVX */
  603. -#define GEN_VEXT_VX(NAME, ESZ, DSZ, CLEAR_FN) \
  604. +#define GEN_VEXT_VX(NAME, ESZ, DSZ) \
  605. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  606. void *vs2, CPURISCVState *env, \
  607. uint32_t desc) \
  608. { \
  609. do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
  610. - do_##NAME, CLEAR_FN); \
  611. -}
  612. -
  613. -GEN_VEXT_VX(vadd_vx_b, 1, 1, clearb)
  614. -GEN_VEXT_VX(vadd_vx_h, 2, 2, clearh)
  615. -GEN_VEXT_VX(vadd_vx_w, 4, 4, clearl)
  616. -GEN_VEXT_VX(vadd_vx_d, 8, 8, clearq)
  617. -GEN_VEXT_VX(vsub_vx_b, 1, 1, clearb)
  618. -GEN_VEXT_VX(vsub_vx_h, 2, 2, clearh)
  619. -GEN_VEXT_VX(vsub_vx_w, 4, 4, clearl)
  620. -GEN_VEXT_VX(vsub_vx_d, 8, 8, clearq)
  621. -GEN_VEXT_VX(vrsub_vx_b, 1, 1, clearb)
  622. -GEN_VEXT_VX(vrsub_vx_h, 2, 2, clearh)
  623. -GEN_VEXT_VX(vrsub_vx_w, 4, 4, clearl)
  624. -GEN_VEXT_VX(vrsub_vx_d, 8, 8, clearq)
  625. + do_##NAME); \
  626. +}
  627. +
  628. +GEN_VEXT_VX(vadd_vx_b, 1, 1)
  629. +GEN_VEXT_VX(vadd_vx_h, 2, 2)
  630. +GEN_VEXT_VX(vadd_vx_w, 4, 4)
  631. +GEN_VEXT_VX(vadd_vx_d, 8, 8)
  632. +GEN_VEXT_VX(vsub_vx_b, 1, 1)
  633. +GEN_VEXT_VX(vsub_vx_h, 2, 2)
  634. +GEN_VEXT_VX(vsub_vx_w, 4, 4)
  635. +GEN_VEXT_VX(vsub_vx_d, 8, 8)
  636. +GEN_VEXT_VX(vrsub_vx_b, 1, 1)
  637. +GEN_VEXT_VX(vrsub_vx_h, 2, 2)
  638. +GEN_VEXT_VX(vrsub_vx_w, 4, 4)
  639. +GEN_VEXT_VX(vrsub_vx_d, 8, 8)
  640. void HELPER(vec_rsubs8)(void *d, void *a, uint64_t b, uint32_t desc)
  641. {
  642. @@ -1096,30 +1011,30 @@ RVVCALL(OPIVV2, vwadd_wv_w, WOP_WSSS_W, H8, H4, H4, DO_ADD)
  643. RVVCALL(OPIVV2, vwsub_wv_b, WOP_WSSS_B, H2, H1, H1, DO_SUB)
  644. RVVCALL(OPIVV2, vwsub_wv_h, WOP_WSSS_H, H4, H2, H2, DO_SUB)
  645. RVVCALL(OPIVV2, vwsub_wv_w, WOP_WSSS_W, H8, H4, H4, DO_SUB)
  646. -GEN_VEXT_VV(vwaddu_vv_b, 1, 2, clearh)
  647. -GEN_VEXT_VV(vwaddu_vv_h, 2, 4, clearl)
  648. -GEN_VEXT_VV(vwaddu_vv_w, 4, 8, clearq)
  649. -GEN_VEXT_VV(vwsubu_vv_b, 1, 2, clearh)
  650. -GEN_VEXT_VV(vwsubu_vv_h, 2, 4, clearl)
  651. -GEN_VEXT_VV(vwsubu_vv_w, 4, 8, clearq)
  652. -GEN_VEXT_VV(vwadd_vv_b, 1, 2, clearh)
  653. -GEN_VEXT_VV(vwadd_vv_h, 2, 4, clearl)
  654. -GEN_VEXT_VV(vwadd_vv_w, 4, 8, clearq)
  655. -GEN_VEXT_VV(vwsub_vv_b, 1, 2, clearh)
  656. -GEN_VEXT_VV(vwsub_vv_h, 2, 4, clearl)
  657. -GEN_VEXT_VV(vwsub_vv_w, 4, 8, clearq)
  658. -GEN_VEXT_VV(vwaddu_wv_b, 1, 2, clearh)
  659. -GEN_VEXT_VV(vwaddu_wv_h, 2, 4, clearl)
  660. -GEN_VEXT_VV(vwaddu_wv_w, 4, 8, clearq)
  661. -GEN_VEXT_VV(vwsubu_wv_b, 1, 2, clearh)
  662. -GEN_VEXT_VV(vwsubu_wv_h, 2, 4, clearl)
  663. -GEN_VEXT_VV(vwsubu_wv_w, 4, 8, clearq)
  664. -GEN_VEXT_VV(vwadd_wv_b, 1, 2, clearh)
  665. -GEN_VEXT_VV(vwadd_wv_h, 2, 4, clearl)
  666. -GEN_VEXT_VV(vwadd_wv_w, 4, 8, clearq)
  667. -GEN_VEXT_VV(vwsub_wv_b, 1, 2, clearh)
  668. -GEN_VEXT_VV(vwsub_wv_h, 2, 4, clearl)
  669. -GEN_VEXT_VV(vwsub_wv_w, 4, 8, clearq)
  670. +GEN_VEXT_VV(vwaddu_vv_b, 1, 2)
  671. +GEN_VEXT_VV(vwaddu_vv_h, 2, 4)
  672. +GEN_VEXT_VV(vwaddu_vv_w, 4, 8)
  673. +GEN_VEXT_VV(vwsubu_vv_b, 1, 2)
  674. +GEN_VEXT_VV(vwsubu_vv_h, 2, 4)
  675. +GEN_VEXT_VV(vwsubu_vv_w, 4, 8)
  676. +GEN_VEXT_VV(vwadd_vv_b, 1, 2)
  677. +GEN_VEXT_VV(vwadd_vv_h, 2, 4)
  678. +GEN_VEXT_VV(vwadd_vv_w, 4, 8)
  679. +GEN_VEXT_VV(vwsub_vv_b, 1, 2)
  680. +GEN_VEXT_VV(vwsub_vv_h, 2, 4)
  681. +GEN_VEXT_VV(vwsub_vv_w, 4, 8)
  682. +GEN_VEXT_VV(vwaddu_wv_b, 1, 2)
  683. +GEN_VEXT_VV(vwaddu_wv_h, 2, 4)
  684. +GEN_VEXT_VV(vwaddu_wv_w, 4, 8)
  685. +GEN_VEXT_VV(vwsubu_wv_b, 1, 2)
  686. +GEN_VEXT_VV(vwsubu_wv_h, 2, 4)
  687. +GEN_VEXT_VV(vwsubu_wv_w, 4, 8)
  688. +GEN_VEXT_VV(vwadd_wv_b, 1, 2)
  689. +GEN_VEXT_VV(vwadd_wv_h, 2, 4)
  690. +GEN_VEXT_VV(vwadd_wv_w, 4, 8)
  691. +GEN_VEXT_VV(vwsub_wv_b, 1, 2)
  692. +GEN_VEXT_VV(vwsub_wv_h, 2, 4)
  693. +GEN_VEXT_VV(vwsub_wv_w, 4, 8)
  694. RVVCALL(OPIVX2, vwaddu_vx_b, WOP_UUU_B, H2, H1, DO_ADD)
  695. RVVCALL(OPIVX2, vwaddu_vx_h, WOP_UUU_H, H4, H2, DO_ADD)
  696. @@ -1145,42 +1060,40 @@ RVVCALL(OPIVX2, vwadd_wx_w, WOP_WSSS_W, H8, H4, DO_ADD)
  697. RVVCALL(OPIVX2, vwsub_wx_b, WOP_WSSS_B, H2, H1, DO_SUB)
  698. RVVCALL(OPIVX2, vwsub_wx_h, WOP_WSSS_H, H4, H2, DO_SUB)
  699. RVVCALL(OPIVX2, vwsub_wx_w, WOP_WSSS_W, H8, H4, DO_SUB)
  700. -GEN_VEXT_VX(vwaddu_vx_b, 1, 2, clearh)
  701. -GEN_VEXT_VX(vwaddu_vx_h, 2, 4, clearl)
  702. -GEN_VEXT_VX(vwaddu_vx_w, 4, 8, clearq)
  703. -GEN_VEXT_VX(vwsubu_vx_b, 1, 2, clearh)
  704. -GEN_VEXT_VX(vwsubu_vx_h, 2, 4, clearl)
  705. -GEN_VEXT_VX(vwsubu_vx_w, 4, 8, clearq)
  706. -GEN_VEXT_VX(vwadd_vx_b, 1, 2, clearh)
  707. -GEN_VEXT_VX(vwadd_vx_h, 2, 4, clearl)
  708. -GEN_VEXT_VX(vwadd_vx_w, 4, 8, clearq)
  709. -GEN_VEXT_VX(vwsub_vx_b, 1, 2, clearh)
  710. -GEN_VEXT_VX(vwsub_vx_h, 2, 4, clearl)
  711. -GEN_VEXT_VX(vwsub_vx_w, 4, 8, clearq)
  712. -GEN_VEXT_VX(vwaddu_wx_b, 1, 2, clearh)
  713. -GEN_VEXT_VX(vwaddu_wx_h, 2, 4, clearl)
  714. -GEN_VEXT_VX(vwaddu_wx_w, 4, 8, clearq)
  715. -GEN_VEXT_VX(vwsubu_wx_b, 1, 2, clearh)
  716. -GEN_VEXT_VX(vwsubu_wx_h, 2, 4, clearl)
  717. -GEN_VEXT_VX(vwsubu_wx_w, 4, 8, clearq)
  718. -GEN_VEXT_VX(vwadd_wx_b, 1, 2, clearh)
  719. -GEN_VEXT_VX(vwadd_wx_h, 2, 4, clearl)
  720. -GEN_VEXT_VX(vwadd_wx_w, 4, 8, clearq)
  721. -GEN_VEXT_VX(vwsub_wx_b, 1, 2, clearh)
  722. -GEN_VEXT_VX(vwsub_wx_h, 2, 4, clearl)
  723. -GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq)
  724. +GEN_VEXT_VX(vwaddu_vx_b, 1, 2)
  725. +GEN_VEXT_VX(vwaddu_vx_h, 2, 4)
  726. +GEN_VEXT_VX(vwaddu_vx_w, 4, 8)
  727. +GEN_VEXT_VX(vwsubu_vx_b, 1, 2)
  728. +GEN_VEXT_VX(vwsubu_vx_h, 2, 4)
  729. +GEN_VEXT_VX(vwsubu_vx_w, 4, 8)
  730. +GEN_VEXT_VX(vwadd_vx_b, 1, 2)
  731. +GEN_VEXT_VX(vwadd_vx_h, 2, 4)
  732. +GEN_VEXT_VX(vwadd_vx_w, 4, 8)
  733. +GEN_VEXT_VX(vwsub_vx_b, 1, 2)
  734. +GEN_VEXT_VX(vwsub_vx_h, 2, 4)
  735. +GEN_VEXT_VX(vwsub_vx_w, 4, 8)
  736. +GEN_VEXT_VX(vwaddu_wx_b, 1, 2)
  737. +GEN_VEXT_VX(vwaddu_wx_h, 2, 4)
  738. +GEN_VEXT_VX(vwaddu_wx_w, 4, 8)
  739. +GEN_VEXT_VX(vwsubu_wx_b, 1, 2)
  740. +GEN_VEXT_VX(vwsubu_wx_h, 2, 4)
  741. +GEN_VEXT_VX(vwsubu_wx_w, 4, 8)
  742. +GEN_VEXT_VX(vwadd_wx_b, 1, 2)
  743. +GEN_VEXT_VX(vwadd_wx_h, 2, 4)
  744. +GEN_VEXT_VX(vwadd_wx_w, 4, 8)
  745. +GEN_VEXT_VX(vwsub_wx_b, 1, 2)
  746. +GEN_VEXT_VX(vwsub_wx_h, 2, 4)
  747. +GEN_VEXT_VX(vwsub_wx_w, 4, 8)
  748. /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
  749. #define DO_VADC(N, M, C) (N + M + C)
  750. #define DO_VSBC(N, M, C) (N - M - C)
  751. -#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \
  752. +#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP) \
  753. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  754. CPURISCVState *env, uint32_t desc) \
  755. { \
  756. uint32_t vl = env->vl; \
  757. - uint32_t esz = sizeof(ETYPE); \
  758. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  759. uint32_t i; \
  760. \
  761. for (i = 0; i < vl; i++) { \
  762. @@ -1190,26 +1103,23 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  763. \
  764. *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \
  765. } \
  766. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  767. }
  768. -GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC, clearb)
  769. -GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC, clearh)
  770. -GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC, clearl)
  771. -GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC, clearq)
  772. +GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC)
  773. +GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC)
  774. +GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC)
  775. +GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC)
  776. -GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC, clearb)
  777. -GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC, clearh)
  778. -GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC, clearl)
  779. -GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq)
  780. +GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC)
  781. +GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC)
  782. +GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC)
  783. +GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC)
  784. -#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \
  785. +#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP) \
  786. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  787. CPURISCVState *env, uint32_t desc) \
  788. { \
  789. uint32_t vl = env->vl; \
  790. - uint32_t esz = sizeof(ETYPE); \
  791. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  792. uint32_t i; \
  793. \
  794. for (i = 0; i < vl; i++) { \
  795. @@ -1218,18 +1128,17 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  796. \
  797. *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
  798. } \
  799. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  800. }
  801. -GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC, clearb)
  802. -GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC, clearh)
  803. -GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC, clearl)
  804. -GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC, clearq)
  805. +GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC)
  806. +GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC)
  807. +GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC)
  808. +GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC)
  809. -GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC, clearb)
  810. -GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC, clearh)
  811. -GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC, clearl)
  812. -GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq)
  813. +GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC)
  814. +GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC)
  815. +GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC)
  816. +GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC)
  817. #define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \
  818. (__typeof(N))(N + M) < N)
  819. @@ -1308,18 +1217,18 @@ RVVCALL(OPIVV2, vxor_vv_b, OP_SSS_B, H1, H1, H1, DO_XOR)
  820. RVVCALL(OPIVV2, vxor_vv_h, OP_SSS_H, H2, H2, H2, DO_XOR)
  821. RVVCALL(OPIVV2, vxor_vv_w, OP_SSS_W, H4, H4, H4, DO_XOR)
  822. RVVCALL(OPIVV2, vxor_vv_d, OP_SSS_D, H8, H8, H8, DO_XOR)
  823. -GEN_VEXT_VV(vand_vv_b, 1, 1, clearb)
  824. -GEN_VEXT_VV(vand_vv_h, 2, 2, clearh)
  825. -GEN_VEXT_VV(vand_vv_w, 4, 4, clearl)
  826. -GEN_VEXT_VV(vand_vv_d, 8, 8, clearq)
  827. -GEN_VEXT_VV(vor_vv_b, 1, 1, clearb)
  828. -GEN_VEXT_VV(vor_vv_h, 2, 2, clearh)
  829. -GEN_VEXT_VV(vor_vv_w, 4, 4, clearl)
  830. -GEN_VEXT_VV(vor_vv_d, 8, 8, clearq)
  831. -GEN_VEXT_VV(vxor_vv_b, 1, 1, clearb)
  832. -GEN_VEXT_VV(vxor_vv_h, 2, 2, clearh)
  833. -GEN_VEXT_VV(vxor_vv_w, 4, 4, clearl)
  834. -GEN_VEXT_VV(vxor_vv_d, 8, 8, clearq)
  835. +GEN_VEXT_VV(vand_vv_b, 1, 1)
  836. +GEN_VEXT_VV(vand_vv_h, 2, 2)
  837. +GEN_VEXT_VV(vand_vv_w, 4, 4)
  838. +GEN_VEXT_VV(vand_vv_d, 8, 8)
  839. +GEN_VEXT_VV(vor_vv_b, 1, 1)
  840. +GEN_VEXT_VV(vor_vv_h, 2, 2)
  841. +GEN_VEXT_VV(vor_vv_w, 4, 4)
  842. +GEN_VEXT_VV(vor_vv_d, 8, 8)
  843. +GEN_VEXT_VV(vxor_vv_b, 1, 1)
  844. +GEN_VEXT_VV(vxor_vv_h, 2, 2)
  845. +GEN_VEXT_VV(vxor_vv_w, 4, 4)
  846. +GEN_VEXT_VV(vxor_vv_d, 8, 8)
  847. RVVCALL(OPIVX2, vand_vx_b, OP_SSS_B, H1, H1, DO_AND)
  848. RVVCALL(OPIVX2, vand_vx_h, OP_SSS_H, H2, H2, DO_AND)
  849. @@ -1333,32 +1242,30 @@ RVVCALL(OPIVX2, vxor_vx_b, OP_SSS_B, H1, H1, DO_XOR)
  850. RVVCALL(OPIVX2, vxor_vx_h, OP_SSS_H, H2, H2, DO_XOR)
  851. RVVCALL(OPIVX2, vxor_vx_w, OP_SSS_W, H4, H4, DO_XOR)
  852. RVVCALL(OPIVX2, vxor_vx_d, OP_SSS_D, H8, H8, DO_XOR)
  853. -GEN_VEXT_VX(vand_vx_b, 1, 1, clearb)
  854. -GEN_VEXT_VX(vand_vx_h, 2, 2, clearh)
  855. -GEN_VEXT_VX(vand_vx_w, 4, 4, clearl)
  856. -GEN_VEXT_VX(vand_vx_d, 8, 8, clearq)
  857. -GEN_VEXT_VX(vor_vx_b, 1, 1, clearb)
  858. -GEN_VEXT_VX(vor_vx_h, 2, 2, clearh)
  859. -GEN_VEXT_VX(vor_vx_w, 4, 4, clearl)
  860. -GEN_VEXT_VX(vor_vx_d, 8, 8, clearq)
  861. -GEN_VEXT_VX(vxor_vx_b, 1, 1, clearb)
  862. -GEN_VEXT_VX(vxor_vx_h, 2, 2, clearh)
  863. -GEN_VEXT_VX(vxor_vx_w, 4, 4, clearl)
  864. -GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq)
  865. +GEN_VEXT_VX(vand_vx_b, 1, 1)
  866. +GEN_VEXT_VX(vand_vx_h, 2, 2)
  867. +GEN_VEXT_VX(vand_vx_w, 4, 4)
  868. +GEN_VEXT_VX(vand_vx_d, 8, 8)
  869. +GEN_VEXT_VX(vor_vx_b, 1, 1)
  870. +GEN_VEXT_VX(vor_vx_h, 2, 2)
  871. +GEN_VEXT_VX(vor_vx_w, 4, 4)
  872. +GEN_VEXT_VX(vor_vx_d, 8, 8)
  873. +GEN_VEXT_VX(vxor_vx_b, 1, 1)
  874. +GEN_VEXT_VX(vxor_vx_h, 2, 2)
  875. +GEN_VEXT_VX(vxor_vx_w, 4, 4)
  876. +GEN_VEXT_VX(vxor_vx_d, 8, 8)
  877. /* Vector Single-Width Bit Shift Instructions */
  878. #define DO_SLL(N, M) (N << (M))
  879. #define DO_SRL(N, M) (N >> (M))
  880. /* generate the helpers for shift instructions with two vector operators */
  881. -#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK, CLEAR_FN) \
  882. +#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK) \
  883. void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  884. void *vs2, CPURISCVState *env, uint32_t desc) \
  885. { \
  886. uint32_t vm = vext_vm(desc); \
  887. uint32_t vl = env->vl; \
  888. - uint32_t esz = sizeof(TS1); \
  889. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  890. uint32_t i; \
  891. \
  892. for (i = 0; i < vl; i++) { \
  893. @@ -1369,73 +1276,69 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  894. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  895. *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \
  896. } \
  897. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  898. }
  899. -GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7, clearb)
  900. -GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf, clearh)
  901. -GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f, clearl)
  902. -GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f, clearq)
  903. +GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7)
  904. +GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf)
  905. +GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f)
  906. +GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f)
  907. -GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb)
  908. -GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh)
  909. -GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl)
  910. -GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq)
  911. +GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7)
  912. +GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf)
  913. +GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f)
  914. +GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f)
  915. -GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb)
  916. -GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh)
  917. -GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl)
  918. -GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
  919. +GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7)
  920. +GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf)
  921. +GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f)
  922. +GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f)
  923. /* generate the helpers for shift instructions with one vector and one scalar */
  924. -#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK, CLEAR_FN) \
  925. -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  926. - void *vs2, CPURISCVState *env, uint32_t desc) \
  927. -{ \
  928. - uint32_t vm = vext_vm(desc); \
  929. - uint32_t vl = env->vl; \
  930. - uint32_t esz = sizeof(TD); \
  931. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  932. - uint32_t i; \
  933. - \
  934. - for (i = 0; i < vl; i++) { \
  935. - if (!vm && !vext_elem_mask(v0, i)) { \
  936. - continue; \
  937. - } \
  938. - TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  939. - *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \
  940. - } \
  941. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  942. -}
  943. -
  944. -GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7, clearb)
  945. -GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf, clearh)
  946. -GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f, clearl)
  947. -GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f, clearq)
  948. -
  949. -GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb)
  950. -GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh)
  951. -GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl)
  952. -GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq)
  953. -
  954. -GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb)
  955. -GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh)
  956. -GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl)
  957. -GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
  958. +#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK) \
  959. +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  960. + void *vs2, CPURISCVState *env, uint32_t desc) \
  961. +{ \
  962. + uint32_t vm = vext_vm(desc); \
  963. + uint32_t vl = env->vl; \
  964. + uint32_t i; \
  965. + \
  966. + for (i = 0; i < vl; i++) { \
  967. + if (!vm && !vext_elem_mask(v0, i)) { \
  968. + continue; \
  969. + } \
  970. + TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  971. + *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \
  972. + } \
  973. +}
  974. +
  975. +GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
  976. +GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf)
  977. +GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f)
  978. +GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f)
  979. +
  980. +GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7)
  981. +GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf)
  982. +GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f)
  983. +GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f)
  984. +
  985. +GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7)
  986. +GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf)
  987. +GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f)
  988. +GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f)
  989. /* Vector Narrowing Integer Right Shift Instructions */
  990. -GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
  991. -GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
  992. -GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
  993. -GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
  994. -GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
  995. -GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
  996. -GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
  997. -GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
  998. -GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
  999. -GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
  1000. -GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
  1001. -GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
  1002. +GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf)
  1003. +GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
  1004. +GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
  1005. +GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf)
  1006. +GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f)
  1007. +GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f)
  1008. +GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf)
  1009. +GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
  1010. +GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
  1011. +GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf)
  1012. +GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f)
  1013. +GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f)
  1014. /* Vector Integer Comparison Instructions */
  1015. #define DO_MSEQ(N, M) (N == M)
  1016. @@ -1575,22 +1478,22 @@ RVVCALL(OPIVV2, vmax_vv_b, OP_SSS_B, H1, H1, H1, DO_MAX)
  1017. RVVCALL(OPIVV2, vmax_vv_h, OP_SSS_H, H2, H2, H2, DO_MAX)
  1018. RVVCALL(OPIVV2, vmax_vv_w, OP_SSS_W, H4, H4, H4, DO_MAX)
  1019. RVVCALL(OPIVV2, vmax_vv_d, OP_SSS_D, H8, H8, H8, DO_MAX)
  1020. -GEN_VEXT_VV(vminu_vv_b, 1, 1, clearb)
  1021. -GEN_VEXT_VV(vminu_vv_h, 2, 2, clearh)
  1022. -GEN_VEXT_VV(vminu_vv_w, 4, 4, clearl)
  1023. -GEN_VEXT_VV(vminu_vv_d, 8, 8, clearq)
  1024. -GEN_VEXT_VV(vmin_vv_b, 1, 1, clearb)
  1025. -GEN_VEXT_VV(vmin_vv_h, 2, 2, clearh)
  1026. -GEN_VEXT_VV(vmin_vv_w, 4, 4, clearl)
  1027. -GEN_VEXT_VV(vmin_vv_d, 8, 8, clearq)
  1028. -GEN_VEXT_VV(vmaxu_vv_b, 1, 1, clearb)
  1029. -GEN_VEXT_VV(vmaxu_vv_h, 2, 2, clearh)
  1030. -GEN_VEXT_VV(vmaxu_vv_w, 4, 4, clearl)
  1031. -GEN_VEXT_VV(vmaxu_vv_d, 8, 8, clearq)
  1032. -GEN_VEXT_VV(vmax_vv_b, 1, 1, clearb)
  1033. -GEN_VEXT_VV(vmax_vv_h, 2, 2, clearh)
  1034. -GEN_VEXT_VV(vmax_vv_w, 4, 4, clearl)
  1035. -GEN_VEXT_VV(vmax_vv_d, 8, 8, clearq)
  1036. +GEN_VEXT_VV(vminu_vv_b, 1, 1)
  1037. +GEN_VEXT_VV(vminu_vv_h, 2, 2)
  1038. +GEN_VEXT_VV(vminu_vv_w, 4, 4)
  1039. +GEN_VEXT_VV(vminu_vv_d, 8, 8)
  1040. +GEN_VEXT_VV(vmin_vv_b, 1, 1)
  1041. +GEN_VEXT_VV(vmin_vv_h, 2, 2)
  1042. +GEN_VEXT_VV(vmin_vv_w, 4, 4)
  1043. +GEN_VEXT_VV(vmin_vv_d, 8, 8)
  1044. +GEN_VEXT_VV(vmaxu_vv_b, 1, 1)
  1045. +GEN_VEXT_VV(vmaxu_vv_h, 2, 2)
  1046. +GEN_VEXT_VV(vmaxu_vv_w, 4, 4)
  1047. +GEN_VEXT_VV(vmaxu_vv_d, 8, 8)
  1048. +GEN_VEXT_VV(vmax_vv_b, 1, 1)
  1049. +GEN_VEXT_VV(vmax_vv_h, 2, 2)
  1050. +GEN_VEXT_VV(vmax_vv_w, 4, 4)
  1051. +GEN_VEXT_VV(vmax_vv_d, 8, 8)
  1052. RVVCALL(OPIVX2, vminu_vx_b, OP_UUU_B, H1, H1, DO_MIN)
  1053. RVVCALL(OPIVX2, vminu_vx_h, OP_UUU_H, H2, H2, DO_MIN)
  1054. @@ -1608,22 +1511,22 @@ RVVCALL(OPIVX2, vmax_vx_b, OP_SSS_B, H1, H1, DO_MAX)
  1055. RVVCALL(OPIVX2, vmax_vx_h, OP_SSS_H, H2, H2, DO_MAX)
  1056. RVVCALL(OPIVX2, vmax_vx_w, OP_SSS_W, H4, H4, DO_MAX)
  1057. RVVCALL(OPIVX2, vmax_vx_d, OP_SSS_D, H8, H8, DO_MAX)
  1058. -GEN_VEXT_VX(vminu_vx_b, 1, 1, clearb)
  1059. -GEN_VEXT_VX(vminu_vx_h, 2, 2, clearh)
  1060. -GEN_VEXT_VX(vminu_vx_w, 4, 4, clearl)
  1061. -GEN_VEXT_VX(vminu_vx_d, 8, 8, clearq)
  1062. -GEN_VEXT_VX(vmin_vx_b, 1, 1, clearb)
  1063. -GEN_VEXT_VX(vmin_vx_h, 2, 2, clearh)
  1064. -GEN_VEXT_VX(vmin_vx_w, 4, 4, clearl)
  1065. -GEN_VEXT_VX(vmin_vx_d, 8, 8, clearq)
  1066. -GEN_VEXT_VX(vmaxu_vx_b, 1, 1, clearb)
  1067. -GEN_VEXT_VX(vmaxu_vx_h, 2, 2, clearh)
  1068. -GEN_VEXT_VX(vmaxu_vx_w, 4, 4, clearl)
  1069. -GEN_VEXT_VX(vmaxu_vx_d, 8, 8, clearq)
  1070. -GEN_VEXT_VX(vmax_vx_b, 1, 1, clearb)
  1071. -GEN_VEXT_VX(vmax_vx_h, 2, 2, clearh)
  1072. -GEN_VEXT_VX(vmax_vx_w, 4, 4, clearl)
  1073. -GEN_VEXT_VX(vmax_vx_d, 8, 8, clearq)
  1074. +GEN_VEXT_VX(vminu_vx_b, 1, 1)
  1075. +GEN_VEXT_VX(vminu_vx_h, 2, 2)
  1076. +GEN_VEXT_VX(vminu_vx_w, 4, 4)
  1077. +GEN_VEXT_VX(vminu_vx_d, 8, 8)
  1078. +GEN_VEXT_VX(vmin_vx_b, 1, 1)
  1079. +GEN_VEXT_VX(vmin_vx_h, 2, 2)
  1080. +GEN_VEXT_VX(vmin_vx_w, 4, 4)
  1081. +GEN_VEXT_VX(vmin_vx_d, 8, 8)
  1082. +GEN_VEXT_VX(vmaxu_vx_b, 1, 1)
  1083. +GEN_VEXT_VX(vmaxu_vx_h, 2, 2)
  1084. +GEN_VEXT_VX(vmaxu_vx_w, 4, 4)
  1085. +GEN_VEXT_VX(vmaxu_vx_d, 8, 8)
  1086. +GEN_VEXT_VX(vmax_vx_b, 1, 1)
  1087. +GEN_VEXT_VX(vmax_vx_h, 2, 2)
  1088. +GEN_VEXT_VX(vmax_vx_w, 4, 4)
  1089. +GEN_VEXT_VX(vmax_vx_d, 8, 8)
  1090. /* Vector Single-Width Integer Multiply Instructions */
  1091. #define DO_MUL(N, M) (N * M)
  1092. @@ -1631,10 +1534,10 @@ RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL)
  1093. RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL)
  1094. RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL)
  1095. RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL)
  1096. -GEN_VEXT_VV(vmul_vv_b, 1, 1, clearb)
  1097. -GEN_VEXT_VV(vmul_vv_h, 2, 2, clearh)
  1098. -GEN_VEXT_VV(vmul_vv_w, 4, 4, clearl)
  1099. -GEN_VEXT_VV(vmul_vv_d, 8, 8, clearq)
  1100. +GEN_VEXT_VV(vmul_vv_b, 1, 1)
  1101. +GEN_VEXT_VV(vmul_vv_h, 2, 2)
  1102. +GEN_VEXT_VV(vmul_vv_w, 4, 4)
  1103. +GEN_VEXT_VV(vmul_vv_d, 8, 8)
  1104. static int8_t do_mulh_b(int8_t s2, int8_t s1)
  1105. {
  1106. @@ -1738,18 +1641,18 @@ RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b)
  1107. RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h)
  1108. RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w)
  1109. RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d)
  1110. -GEN_VEXT_VV(vmulh_vv_b, 1, 1, clearb)
  1111. -GEN_VEXT_VV(vmulh_vv_h, 2, 2, clearh)
  1112. -GEN_VEXT_VV(vmulh_vv_w, 4, 4, clearl)
  1113. -GEN_VEXT_VV(vmulh_vv_d, 8, 8, clearq)
  1114. -GEN_VEXT_VV(vmulhu_vv_b, 1, 1, clearb)
  1115. -GEN_VEXT_VV(vmulhu_vv_h, 2, 2, clearh)
  1116. -GEN_VEXT_VV(vmulhu_vv_w, 4, 4, clearl)
  1117. -GEN_VEXT_VV(vmulhu_vv_d, 8, 8, clearq)
  1118. -GEN_VEXT_VV(vmulhsu_vv_b, 1, 1, clearb)
  1119. -GEN_VEXT_VV(vmulhsu_vv_h, 2, 2, clearh)
  1120. -GEN_VEXT_VV(vmulhsu_vv_w, 4, 4, clearl)
  1121. -GEN_VEXT_VV(vmulhsu_vv_d, 8, 8, clearq)
  1122. +GEN_VEXT_VV(vmulh_vv_b, 1, 1)
  1123. +GEN_VEXT_VV(vmulh_vv_h, 2, 2)
  1124. +GEN_VEXT_VV(vmulh_vv_w, 4, 4)
  1125. +GEN_VEXT_VV(vmulh_vv_d, 8, 8)
  1126. +GEN_VEXT_VV(vmulhu_vv_b, 1, 1)
  1127. +GEN_VEXT_VV(vmulhu_vv_h, 2, 2)
  1128. +GEN_VEXT_VV(vmulhu_vv_w, 4, 4)
  1129. +GEN_VEXT_VV(vmulhu_vv_d, 8, 8)
  1130. +GEN_VEXT_VV(vmulhsu_vv_b, 1, 1)
  1131. +GEN_VEXT_VV(vmulhsu_vv_h, 2, 2)
  1132. +GEN_VEXT_VV(vmulhsu_vv_w, 4, 4)
  1133. +GEN_VEXT_VV(vmulhsu_vv_d, 8, 8)
  1134. RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL)
  1135. RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL)
  1136. @@ -1767,22 +1670,22 @@ RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b)
  1137. RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h)
  1138. RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w)
  1139. RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d)
  1140. -GEN_VEXT_VX(vmul_vx_b, 1, 1, clearb)
  1141. -GEN_VEXT_VX(vmul_vx_h, 2, 2, clearh)
  1142. -GEN_VEXT_VX(vmul_vx_w, 4, 4, clearl)
  1143. -GEN_VEXT_VX(vmul_vx_d, 8, 8, clearq)
  1144. -GEN_VEXT_VX(vmulh_vx_b, 1, 1, clearb)
  1145. -GEN_VEXT_VX(vmulh_vx_h, 2, 2, clearh)
  1146. -GEN_VEXT_VX(vmulh_vx_w, 4, 4, clearl)
  1147. -GEN_VEXT_VX(vmulh_vx_d, 8, 8, clearq)
  1148. -GEN_VEXT_VX(vmulhu_vx_b, 1, 1, clearb)
  1149. -GEN_VEXT_VX(vmulhu_vx_h, 2, 2, clearh)
  1150. -GEN_VEXT_VX(vmulhu_vx_w, 4, 4, clearl)
  1151. -GEN_VEXT_VX(vmulhu_vx_d, 8, 8, clearq)
  1152. -GEN_VEXT_VX(vmulhsu_vx_b, 1, 1, clearb)
  1153. -GEN_VEXT_VX(vmulhsu_vx_h, 2, 2, clearh)
  1154. -GEN_VEXT_VX(vmulhsu_vx_w, 4, 4, clearl)
  1155. -GEN_VEXT_VX(vmulhsu_vx_d, 8, 8, clearq)
  1156. +GEN_VEXT_VX(vmul_vx_b, 1, 1)
  1157. +GEN_VEXT_VX(vmul_vx_h, 2, 2)
  1158. +GEN_VEXT_VX(vmul_vx_w, 4, 4)
  1159. +GEN_VEXT_VX(vmul_vx_d, 8, 8)
  1160. +GEN_VEXT_VX(vmulh_vx_b, 1, 1)
  1161. +GEN_VEXT_VX(vmulh_vx_h, 2, 2)
  1162. +GEN_VEXT_VX(vmulh_vx_w, 4, 4)
  1163. +GEN_VEXT_VX(vmulh_vx_d, 8, 8)
  1164. +GEN_VEXT_VX(vmulhu_vx_b, 1, 1)
  1165. +GEN_VEXT_VX(vmulhu_vx_h, 2, 2)
  1166. +GEN_VEXT_VX(vmulhu_vx_w, 4, 4)
  1167. +GEN_VEXT_VX(vmulhu_vx_d, 8, 8)
  1168. +GEN_VEXT_VX(vmulhsu_vx_b, 1, 1)
  1169. +GEN_VEXT_VX(vmulhsu_vx_h, 2, 2)
  1170. +GEN_VEXT_VX(vmulhsu_vx_w, 4, 4)
  1171. +GEN_VEXT_VX(vmulhsu_vx_d, 8, 8)
  1172. /* Vector Integer Divide Instructions */
  1173. #define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M)
  1174. @@ -1808,22 +1711,22 @@ RVVCALL(OPIVV2, vrem_vv_b, OP_SSS_B, H1, H1, H1, DO_REM)
  1175. RVVCALL(OPIVV2, vrem_vv_h, OP_SSS_H, H2, H2, H2, DO_REM)
  1176. RVVCALL(OPIVV2, vrem_vv_w, OP_SSS_W, H4, H4, H4, DO_REM)
  1177. RVVCALL(OPIVV2, vrem_vv_d, OP_SSS_D, H8, H8, H8, DO_REM)
  1178. -GEN_VEXT_VV(vdivu_vv_b, 1, 1, clearb)
  1179. -GEN_VEXT_VV(vdivu_vv_h, 2, 2, clearh)
  1180. -GEN_VEXT_VV(vdivu_vv_w, 4, 4, clearl)
  1181. -GEN_VEXT_VV(vdivu_vv_d, 8, 8, clearq)
  1182. -GEN_VEXT_VV(vdiv_vv_b, 1, 1, clearb)
  1183. -GEN_VEXT_VV(vdiv_vv_h, 2, 2, clearh)
  1184. -GEN_VEXT_VV(vdiv_vv_w, 4, 4, clearl)
  1185. -GEN_VEXT_VV(vdiv_vv_d, 8, 8, clearq)
  1186. -GEN_VEXT_VV(vremu_vv_b, 1, 1, clearb)
  1187. -GEN_VEXT_VV(vremu_vv_h, 2, 2, clearh)
  1188. -GEN_VEXT_VV(vremu_vv_w, 4, 4, clearl)
  1189. -GEN_VEXT_VV(vremu_vv_d, 8, 8, clearq)
  1190. -GEN_VEXT_VV(vrem_vv_b, 1, 1, clearb)
  1191. -GEN_VEXT_VV(vrem_vv_h, 2, 2, clearh)
  1192. -GEN_VEXT_VV(vrem_vv_w, 4, 4, clearl)
  1193. -GEN_VEXT_VV(vrem_vv_d, 8, 8, clearq)
  1194. +GEN_VEXT_VV(vdivu_vv_b, 1, 1)
  1195. +GEN_VEXT_VV(vdivu_vv_h, 2, 2)
  1196. +GEN_VEXT_VV(vdivu_vv_w, 4, 4)
  1197. +GEN_VEXT_VV(vdivu_vv_d, 8, 8)
  1198. +GEN_VEXT_VV(vdiv_vv_b, 1, 1)
  1199. +GEN_VEXT_VV(vdiv_vv_h, 2, 2)
  1200. +GEN_VEXT_VV(vdiv_vv_w, 4, 4)
  1201. +GEN_VEXT_VV(vdiv_vv_d, 8, 8)
  1202. +GEN_VEXT_VV(vremu_vv_b, 1, 1)
  1203. +GEN_VEXT_VV(vremu_vv_h, 2, 2)
  1204. +GEN_VEXT_VV(vremu_vv_w, 4, 4)
  1205. +GEN_VEXT_VV(vremu_vv_d, 8, 8)
  1206. +GEN_VEXT_VV(vrem_vv_b, 1, 1)
  1207. +GEN_VEXT_VV(vrem_vv_h, 2, 2)
  1208. +GEN_VEXT_VV(vrem_vv_w, 4, 4)
  1209. +GEN_VEXT_VV(vrem_vv_d, 8, 8)
  1210. RVVCALL(OPIVX2, vdivu_vx_b, OP_UUU_B, H1, H1, DO_DIVU)
  1211. RVVCALL(OPIVX2, vdivu_vx_h, OP_UUU_H, H2, H2, DO_DIVU)
  1212. @@ -1841,22 +1744,22 @@ RVVCALL(OPIVX2, vrem_vx_b, OP_SSS_B, H1, H1, DO_REM)
  1213. RVVCALL(OPIVX2, vrem_vx_h, OP_SSS_H, H2, H2, DO_REM)
  1214. RVVCALL(OPIVX2, vrem_vx_w, OP_SSS_W, H4, H4, DO_REM)
  1215. RVVCALL(OPIVX2, vrem_vx_d, OP_SSS_D, H8, H8, DO_REM)
  1216. -GEN_VEXT_VX(vdivu_vx_b, 1, 1, clearb)
  1217. -GEN_VEXT_VX(vdivu_vx_h, 2, 2, clearh)
  1218. -GEN_VEXT_VX(vdivu_vx_w, 4, 4, clearl)
  1219. -GEN_VEXT_VX(vdivu_vx_d, 8, 8, clearq)
  1220. -GEN_VEXT_VX(vdiv_vx_b, 1, 1, clearb)
  1221. -GEN_VEXT_VX(vdiv_vx_h, 2, 2, clearh)
  1222. -GEN_VEXT_VX(vdiv_vx_w, 4, 4, clearl)
  1223. -GEN_VEXT_VX(vdiv_vx_d, 8, 8, clearq)
  1224. -GEN_VEXT_VX(vremu_vx_b, 1, 1, clearb)
  1225. -GEN_VEXT_VX(vremu_vx_h, 2, 2, clearh)
  1226. -GEN_VEXT_VX(vremu_vx_w, 4, 4, clearl)
  1227. -GEN_VEXT_VX(vremu_vx_d, 8, 8, clearq)
  1228. -GEN_VEXT_VX(vrem_vx_b, 1, 1, clearb)
  1229. -GEN_VEXT_VX(vrem_vx_h, 2, 2, clearh)
  1230. -GEN_VEXT_VX(vrem_vx_w, 4, 4, clearl)
  1231. -GEN_VEXT_VX(vrem_vx_d, 8, 8, clearq)
  1232. +GEN_VEXT_VX(vdivu_vx_b, 1, 1)
  1233. +GEN_VEXT_VX(vdivu_vx_h, 2, 2)
  1234. +GEN_VEXT_VX(vdivu_vx_w, 4, 4)
  1235. +GEN_VEXT_VX(vdivu_vx_d, 8, 8)
  1236. +GEN_VEXT_VX(vdiv_vx_b, 1, 1)
  1237. +GEN_VEXT_VX(vdiv_vx_h, 2, 2)
  1238. +GEN_VEXT_VX(vdiv_vx_w, 4, 4)
  1239. +GEN_VEXT_VX(vdiv_vx_d, 8, 8)
  1240. +GEN_VEXT_VX(vremu_vx_b, 1, 1)
  1241. +GEN_VEXT_VX(vremu_vx_h, 2, 2)
  1242. +GEN_VEXT_VX(vremu_vx_w, 4, 4)
  1243. +GEN_VEXT_VX(vremu_vx_d, 8, 8)
  1244. +GEN_VEXT_VX(vrem_vx_b, 1, 1)
  1245. +GEN_VEXT_VX(vrem_vx_h, 2, 2)
  1246. +GEN_VEXT_VX(vrem_vx_w, 4, 4)
  1247. +GEN_VEXT_VX(vrem_vx_d, 8, 8)
  1248. /* Vector Widening Integer Multiply Instructions */
  1249. RVVCALL(OPIVV2, vwmul_vv_b, WOP_SSS_B, H2, H1, H1, DO_MUL)
  1250. @@ -1868,15 +1771,15 @@ RVVCALL(OPIVV2, vwmulu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MUL)
  1251. RVVCALL(OPIVV2, vwmulsu_vv_b, WOP_SUS_B, H2, H1, H1, DO_MUL)
  1252. RVVCALL(OPIVV2, vwmulsu_vv_h, WOP_SUS_H, H4, H2, H2, DO_MUL)
  1253. RVVCALL(OPIVV2, vwmulsu_vv_w, WOP_SUS_W, H8, H4, H4, DO_MUL)
  1254. -GEN_VEXT_VV(vwmul_vv_b, 1, 2, clearh)
  1255. -GEN_VEXT_VV(vwmul_vv_h, 2, 4, clearl)
  1256. -GEN_VEXT_VV(vwmul_vv_w, 4, 8, clearq)
  1257. -GEN_VEXT_VV(vwmulu_vv_b, 1, 2, clearh)
  1258. -GEN_VEXT_VV(vwmulu_vv_h, 2, 4, clearl)
  1259. -GEN_VEXT_VV(vwmulu_vv_w, 4, 8, clearq)
  1260. -GEN_VEXT_VV(vwmulsu_vv_b, 1, 2, clearh)
  1261. -GEN_VEXT_VV(vwmulsu_vv_h, 2, 4, clearl)
  1262. -GEN_VEXT_VV(vwmulsu_vv_w, 4, 8, clearq)
  1263. +GEN_VEXT_VV(vwmul_vv_b, 1, 2)
  1264. +GEN_VEXT_VV(vwmul_vv_h, 2, 4)
  1265. +GEN_VEXT_VV(vwmul_vv_w, 4, 8)
  1266. +GEN_VEXT_VV(vwmulu_vv_b, 1, 2)
  1267. +GEN_VEXT_VV(vwmulu_vv_h, 2, 4)
  1268. +GEN_VEXT_VV(vwmulu_vv_w, 4, 8)
  1269. +GEN_VEXT_VV(vwmulsu_vv_b, 1, 2)
  1270. +GEN_VEXT_VV(vwmulsu_vv_h, 2, 4)
  1271. +GEN_VEXT_VV(vwmulsu_vv_w, 4, 8)
  1272. RVVCALL(OPIVX2, vwmul_vx_b, WOP_SSS_B, H2, H1, DO_MUL)
  1273. RVVCALL(OPIVX2, vwmul_vx_h, WOP_SSS_H, H4, H2, DO_MUL)
  1274. @@ -1887,15 +1790,15 @@ RVVCALL(OPIVX2, vwmulu_vx_w, WOP_UUU_W, H8, H4, DO_MUL)
  1275. RVVCALL(OPIVX2, vwmulsu_vx_b, WOP_SUS_B, H2, H1, DO_MUL)
  1276. RVVCALL(OPIVX2, vwmulsu_vx_h, WOP_SUS_H, H4, H2, DO_MUL)
  1277. RVVCALL(OPIVX2, vwmulsu_vx_w, WOP_SUS_W, H8, H4, DO_MUL)
  1278. -GEN_VEXT_VX(vwmul_vx_b, 1, 2, clearh)
  1279. -GEN_VEXT_VX(vwmul_vx_h, 2, 4, clearl)
  1280. -GEN_VEXT_VX(vwmul_vx_w, 4, 8, clearq)
  1281. -GEN_VEXT_VX(vwmulu_vx_b, 1, 2, clearh)
  1282. -GEN_VEXT_VX(vwmulu_vx_h, 2, 4, clearl)
  1283. -GEN_VEXT_VX(vwmulu_vx_w, 4, 8, clearq)
  1284. -GEN_VEXT_VX(vwmulsu_vx_b, 1, 2, clearh)
  1285. -GEN_VEXT_VX(vwmulsu_vx_h, 2, 4, clearl)
  1286. -GEN_VEXT_VX(vwmulsu_vx_w, 4, 8, clearq)
  1287. +GEN_VEXT_VX(vwmul_vx_b, 1, 2)
  1288. +GEN_VEXT_VX(vwmul_vx_h, 2, 4)
  1289. +GEN_VEXT_VX(vwmul_vx_w, 4, 8)
  1290. +GEN_VEXT_VX(vwmulu_vx_b, 1, 2)
  1291. +GEN_VEXT_VX(vwmulu_vx_h, 2, 4)
  1292. +GEN_VEXT_VX(vwmulu_vx_w, 4, 8)
  1293. +GEN_VEXT_VX(vwmulsu_vx_b, 1, 2)
  1294. +GEN_VEXT_VX(vwmulsu_vx_h, 2, 4)
  1295. +GEN_VEXT_VX(vwmulsu_vx_w, 4, 8)
  1296. /* Vector Single-Width Integer Multiply-Add Instructions */
  1297. #define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
  1298. @@ -1927,22 +1830,22 @@ RVVCALL(OPIVV3, vnmsub_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSUB)
  1299. RVVCALL(OPIVV3, vnmsub_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSUB)
  1300. RVVCALL(OPIVV3, vnmsub_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSUB)
  1301. RVVCALL(OPIVV3, vnmsub_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSUB)
  1302. -GEN_VEXT_VV(vmacc_vv_b, 1, 1, clearb)
  1303. -GEN_VEXT_VV(vmacc_vv_h, 2, 2, clearh)
  1304. -GEN_VEXT_VV(vmacc_vv_w, 4, 4, clearl)
  1305. -GEN_VEXT_VV(vmacc_vv_d, 8, 8, clearq)
  1306. -GEN_VEXT_VV(vnmsac_vv_b, 1, 1, clearb)
  1307. -GEN_VEXT_VV(vnmsac_vv_h, 2, 2, clearh)
  1308. -GEN_VEXT_VV(vnmsac_vv_w, 4, 4, clearl)
  1309. -GEN_VEXT_VV(vnmsac_vv_d, 8, 8, clearq)
  1310. -GEN_VEXT_VV(vmadd_vv_b, 1, 1, clearb)
  1311. -GEN_VEXT_VV(vmadd_vv_h, 2, 2, clearh)
  1312. -GEN_VEXT_VV(vmadd_vv_w, 4, 4, clearl)
  1313. -GEN_VEXT_VV(vmadd_vv_d, 8, 8, clearq)
  1314. -GEN_VEXT_VV(vnmsub_vv_b, 1, 1, clearb)
  1315. -GEN_VEXT_VV(vnmsub_vv_h, 2, 2, clearh)
  1316. -GEN_VEXT_VV(vnmsub_vv_w, 4, 4, clearl)
  1317. -GEN_VEXT_VV(vnmsub_vv_d, 8, 8, clearq)
  1318. +GEN_VEXT_VV(vmacc_vv_b, 1, 1)
  1319. +GEN_VEXT_VV(vmacc_vv_h, 2, 2)
  1320. +GEN_VEXT_VV(vmacc_vv_w, 4, 4)
  1321. +GEN_VEXT_VV(vmacc_vv_d, 8, 8)
  1322. +GEN_VEXT_VV(vnmsac_vv_b, 1, 1)
  1323. +GEN_VEXT_VV(vnmsac_vv_h, 2, 2)
  1324. +GEN_VEXT_VV(vnmsac_vv_w, 4, 4)
  1325. +GEN_VEXT_VV(vnmsac_vv_d, 8, 8)
  1326. +GEN_VEXT_VV(vmadd_vv_b, 1, 1)
  1327. +GEN_VEXT_VV(vmadd_vv_h, 2, 2)
  1328. +GEN_VEXT_VV(vmadd_vv_w, 4, 4)
  1329. +GEN_VEXT_VV(vmadd_vv_d, 8, 8)
  1330. +GEN_VEXT_VV(vnmsub_vv_b, 1, 1)
  1331. +GEN_VEXT_VV(vnmsub_vv_h, 2, 2)
  1332. +GEN_VEXT_VV(vnmsub_vv_w, 4, 4)
  1333. +GEN_VEXT_VV(vnmsub_vv_d, 8, 8)
  1334. #define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
  1335. static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
  1336. @@ -1968,22 +1871,22 @@ RVVCALL(OPIVX3, vnmsub_vx_b, OP_SSS_B, H1, H1, DO_NMSUB)
  1337. RVVCALL(OPIVX3, vnmsub_vx_h, OP_SSS_H, H2, H2, DO_NMSUB)
  1338. RVVCALL(OPIVX3, vnmsub_vx_w, OP_SSS_W, H4, H4, DO_NMSUB)
  1339. RVVCALL(OPIVX3, vnmsub_vx_d, OP_SSS_D, H8, H8, DO_NMSUB)
  1340. -GEN_VEXT_VX(vmacc_vx_b, 1, 1, clearb)
  1341. -GEN_VEXT_VX(vmacc_vx_h, 2, 2, clearh)
  1342. -GEN_VEXT_VX(vmacc_vx_w, 4, 4, clearl)
  1343. -GEN_VEXT_VX(vmacc_vx_d, 8, 8, clearq)
  1344. -GEN_VEXT_VX(vnmsac_vx_b, 1, 1, clearb)
  1345. -GEN_VEXT_VX(vnmsac_vx_h, 2, 2, clearh)
  1346. -GEN_VEXT_VX(vnmsac_vx_w, 4, 4, clearl)
  1347. -GEN_VEXT_VX(vnmsac_vx_d, 8, 8, clearq)
  1348. -GEN_VEXT_VX(vmadd_vx_b, 1, 1, clearb)
  1349. -GEN_VEXT_VX(vmadd_vx_h, 2, 2, clearh)
  1350. -GEN_VEXT_VX(vmadd_vx_w, 4, 4, clearl)
  1351. -GEN_VEXT_VX(vmadd_vx_d, 8, 8, clearq)
  1352. -GEN_VEXT_VX(vnmsub_vx_b, 1, 1, clearb)
  1353. -GEN_VEXT_VX(vnmsub_vx_h, 2, 2, clearh)
  1354. -GEN_VEXT_VX(vnmsub_vx_w, 4, 4, clearl)
  1355. -GEN_VEXT_VX(vnmsub_vx_d, 8, 8, clearq)
  1356. +GEN_VEXT_VX(vmacc_vx_b, 1, 1)
  1357. +GEN_VEXT_VX(vmacc_vx_h, 2, 2)
  1358. +GEN_VEXT_VX(vmacc_vx_w, 4, 4)
  1359. +GEN_VEXT_VX(vmacc_vx_d, 8, 8)
  1360. +GEN_VEXT_VX(vnmsac_vx_b, 1, 1)
  1361. +GEN_VEXT_VX(vnmsac_vx_h, 2, 2)
  1362. +GEN_VEXT_VX(vnmsac_vx_w, 4, 4)
  1363. +GEN_VEXT_VX(vnmsac_vx_d, 8, 8)
  1364. +GEN_VEXT_VX(vmadd_vx_b, 1, 1)
  1365. +GEN_VEXT_VX(vmadd_vx_h, 2, 2)
  1366. +GEN_VEXT_VX(vmadd_vx_w, 4, 4)
  1367. +GEN_VEXT_VX(vmadd_vx_d, 8, 8)
  1368. +GEN_VEXT_VX(vnmsub_vx_b, 1, 1)
  1369. +GEN_VEXT_VX(vnmsub_vx_h, 2, 2)
  1370. +GEN_VEXT_VX(vnmsub_vx_w, 4, 4)
  1371. +GEN_VEXT_VX(vnmsub_vx_d, 8, 8)
  1372. /* Vector Widening Integer Multiply-Add Instructions */
  1373. RVVCALL(OPIVV3, vwmaccu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MACC)
  1374. @@ -1995,15 +1898,15 @@ RVVCALL(OPIVV3, vwmacc_vv_w, WOP_SSS_W, H8, H4, H4, DO_MACC)
  1375. RVVCALL(OPIVV3, vwmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, DO_MACC)
  1376. RVVCALL(OPIVV3, vwmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, DO_MACC)
  1377. RVVCALL(OPIVV3, vwmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, DO_MACC)
  1378. -GEN_VEXT_VV(vwmaccu_vv_b, 1, 2, clearh)
  1379. -GEN_VEXT_VV(vwmaccu_vv_h, 2, 4, clearl)
  1380. -GEN_VEXT_VV(vwmaccu_vv_w, 4, 8, clearq)
  1381. -GEN_VEXT_VV(vwmacc_vv_b, 1, 2, clearh)
  1382. -GEN_VEXT_VV(vwmacc_vv_h, 2, 4, clearl)
  1383. -GEN_VEXT_VV(vwmacc_vv_w, 4, 8, clearq)
  1384. -GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2, clearh)
  1385. -GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4, clearl)
  1386. -GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8, clearq)
  1387. +GEN_VEXT_VV(vwmaccu_vv_b, 1, 2)
  1388. +GEN_VEXT_VV(vwmaccu_vv_h, 2, 4)
  1389. +GEN_VEXT_VV(vwmaccu_vv_w, 4, 8)
  1390. +GEN_VEXT_VV(vwmacc_vv_b, 1, 2)
  1391. +GEN_VEXT_VV(vwmacc_vv_h, 2, 4)
  1392. +GEN_VEXT_VV(vwmacc_vv_w, 4, 8)
  1393. +GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2)
  1394. +GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4)
  1395. +GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8)
  1396. RVVCALL(OPIVX3, vwmaccu_vx_b, WOP_UUU_B, H2, H1, DO_MACC)
  1397. RVVCALL(OPIVX3, vwmaccu_vx_h, WOP_UUU_H, H4, H2, DO_MACC)
  1398. @@ -2017,89 +1920,78 @@ RVVCALL(OPIVX3, vwmaccsu_vx_w, WOP_SSU_W, H8, H4, DO_MACC)
  1399. RVVCALL(OPIVX3, vwmaccus_vx_b, WOP_SUS_B, H2, H1, DO_MACC)
  1400. RVVCALL(OPIVX3, vwmaccus_vx_h, WOP_SUS_H, H4, H2, DO_MACC)
  1401. RVVCALL(OPIVX3, vwmaccus_vx_w, WOP_SUS_W, H8, H4, DO_MACC)
  1402. -GEN_VEXT_VX(vwmaccu_vx_b, 1, 2, clearh)
  1403. -GEN_VEXT_VX(vwmaccu_vx_h, 2, 4, clearl)
  1404. -GEN_VEXT_VX(vwmaccu_vx_w, 4, 8, clearq)
  1405. -GEN_VEXT_VX(vwmacc_vx_b, 1, 2, clearh)
  1406. -GEN_VEXT_VX(vwmacc_vx_h, 2, 4, clearl)
  1407. -GEN_VEXT_VX(vwmacc_vx_w, 4, 8, clearq)
  1408. -GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2, clearh)
  1409. -GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4, clearl)
  1410. -GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8, clearq)
  1411. -GEN_VEXT_VX(vwmaccus_vx_b, 1, 2, clearh)
  1412. -GEN_VEXT_VX(vwmaccus_vx_h, 2, 4, clearl)
  1413. -GEN_VEXT_VX(vwmaccus_vx_w, 4, 8, clearq)
  1414. +GEN_VEXT_VX(vwmaccu_vx_b, 1, 2)
  1415. +GEN_VEXT_VX(vwmaccu_vx_h, 2, 4)
  1416. +GEN_VEXT_VX(vwmaccu_vx_w, 4, 8)
  1417. +GEN_VEXT_VX(vwmacc_vx_b, 1, 2)
  1418. +GEN_VEXT_VX(vwmacc_vx_h, 2, 4)
  1419. +GEN_VEXT_VX(vwmacc_vx_w, 4, 8)
  1420. +GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2)
  1421. +GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4)
  1422. +GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8)
  1423. +GEN_VEXT_VX(vwmaccus_vx_b, 1, 2)
  1424. +GEN_VEXT_VX(vwmaccus_vx_h, 2, 4)
  1425. +GEN_VEXT_VX(vwmaccus_vx_w, 4, 8)
  1426. /* Vector Integer Merge and Move Instructions */
  1427. -#define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN) \
  1428. +#define GEN_VEXT_VMV_VV(NAME, ETYPE, H) \
  1429. void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
  1430. uint32_t desc) \
  1431. { \
  1432. uint32_t vl = env->vl; \
  1433. - uint32_t esz = sizeof(ETYPE); \
  1434. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  1435. uint32_t i; \
  1436. \
  1437. for (i = 0; i < vl; i++) { \
  1438. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  1439. *((ETYPE *)vd + H(i)) = s1; \
  1440. } \
  1441. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  1442. }
  1443. -GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1, clearb)
  1444. -GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2, clearh)
  1445. -GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4, clearl)
  1446. -GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8, clearq)
  1447. +GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1)
  1448. +GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2)
  1449. +GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4)
  1450. +GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8)
  1451. -#define GEN_VEXT_VMV_VX(NAME, ETYPE, H, CLEAR_FN) \
  1452. +#define GEN_VEXT_VMV_VX(NAME, ETYPE, H) \
  1453. void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
  1454. uint32_t desc) \
  1455. { \
  1456. uint32_t vl = env->vl; \
  1457. - uint32_t esz = sizeof(ETYPE); \
  1458. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  1459. uint32_t i; \
  1460. \
  1461. for (i = 0; i < vl; i++) { \
  1462. *((ETYPE *)vd + H(i)) = (ETYPE)s1; \
  1463. } \
  1464. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  1465. }
  1466. -GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1, clearb)
  1467. -GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2, clearh)
  1468. -GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4, clearl)
  1469. -GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq)
  1470. +GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1)
  1471. +GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2)
  1472. +GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4)
  1473. +GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8)
  1474. -#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H, CLEAR_FN) \
  1475. +#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H) \
  1476. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  1477. CPURISCVState *env, uint32_t desc) \
  1478. { \
  1479. uint32_t vl = env->vl; \
  1480. - uint32_t esz = sizeof(ETYPE); \
  1481. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  1482. uint32_t i; \
  1483. \
  1484. for (i = 0; i < vl; i++) { \
  1485. ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
  1486. *((ETYPE *)vd + H(i)) = *(vt + H(i)); \
  1487. } \
  1488. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  1489. }
  1490. -GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1, clearb)
  1491. -GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2, clearh)
  1492. -GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4, clearl)
  1493. -GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq)
  1494. +GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1)
  1495. +GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2)
  1496. +GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4)
  1497. +GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8)
  1498. -#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H, CLEAR_FN) \
  1499. +#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H) \
  1500. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  1501. void *vs2, CPURISCVState *env, uint32_t desc) \
  1502. { \
  1503. uint32_t vl = env->vl; \
  1504. - uint32_t esz = sizeof(ETYPE); \
  1505. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  1506. uint32_t i; \
  1507. \
  1508. for (i = 0; i < vl; i++) { \
  1509. @@ -2108,13 +2000,12 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  1510. (ETYPE)(target_long)s1); \
  1511. *((ETYPE *)vd + H(i)) = d; \
  1512. } \
  1513. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  1514. }
  1515. -GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1, clearb)
  1516. -GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2, clearh)
  1517. -GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4, clearl)
  1518. -GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8, clearq)
  1519. +GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1)
  1520. +GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2)
  1521. +GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4)
  1522. +GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8)
  1523. /*
  1524. *** Vector Fixed-Point Arithmetic Instructions
  1525. @@ -2157,9 +2048,8 @@ static inline void
  1526. vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
  1527. CPURISCVState *env,
  1528. uint32_t desc, uint32_t esz, uint32_t dsz,
  1529. - opivv2_rm_fn *fn, clear_fn *clearfn)
  1530. + opivv2_rm_fn *fn)
  1531. {
  1532. - uint32_t vlmax = vext_maxsz(desc) / esz;
  1533. uint32_t vm = vext_vm(desc);
  1534. uint32_t vl = env->vl;
  1535. @@ -2181,17 +2071,15 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
  1536. env, vl, vm, 3, fn);
  1537. break;
  1538. }
  1539. -
  1540. - clearfn(vd, vl, vl * dsz, vlmax * dsz);
  1541. }
  1542. /* generate helpers for fixed point instructions with OPIVV format */
  1543. -#define GEN_VEXT_VV_RM(NAME, ESZ, DSZ, CLEAR_FN) \
  1544. +#define GEN_VEXT_VV_RM(NAME, ESZ, DSZ) \
  1545. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  1546. CPURISCVState *env, uint32_t desc) \
  1547. { \
  1548. vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
  1549. - do_##NAME, CLEAR_FN); \
  1550. + do_##NAME); \
  1551. }
  1552. static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
  1553. @@ -2241,10 +2129,10 @@ RVVCALL(OPIVV2_RM, vsaddu_vv_b, OP_UUU_B, H1, H1, H1, saddu8)
  1554. RVVCALL(OPIVV2_RM, vsaddu_vv_h, OP_UUU_H, H2, H2, H2, saddu16)
  1555. RVVCALL(OPIVV2_RM, vsaddu_vv_w, OP_UUU_W, H4, H4, H4, saddu32)
  1556. RVVCALL(OPIVV2_RM, vsaddu_vv_d, OP_UUU_D, H8, H8, H8, saddu64)
  1557. -GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1, clearb)
  1558. -GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2, clearh)
  1559. -GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4, clearl)
  1560. -GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8, clearq)
  1561. +GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1)
  1562. +GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2)
  1563. +GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4)
  1564. +GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8)
  1565. typedef void opivx2_rm_fn(void *vd, target_long s1, void *vs2, int i,
  1566. CPURISCVState *env, int vxrm);
  1567. @@ -2276,9 +2164,8 @@ static inline void
  1568. vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
  1569. CPURISCVState *env,
  1570. uint32_t desc, uint32_t esz, uint32_t dsz,
  1571. - opivx2_rm_fn *fn, clear_fn *clearfn)
  1572. + opivx2_rm_fn *fn)
  1573. {
  1574. - uint32_t vlmax = vext_maxsz(desc) / esz;
  1575. uint32_t vm = vext_vm(desc);
  1576. uint32_t vl = env->vl;
  1577. @@ -2300,27 +2187,25 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
  1578. env, vl, vm, 3, fn);
  1579. break;
  1580. }
  1581. -
  1582. - clearfn(vd, vl, vl * dsz, vlmax * dsz);
  1583. }
  1584. /* generate helpers for fixed point instructions with OPIVX format */
  1585. -#define GEN_VEXT_VX_RM(NAME, ESZ, DSZ, CLEAR_FN) \
  1586. +#define GEN_VEXT_VX_RM(NAME, ESZ, DSZ) \
  1587. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  1588. void *vs2, CPURISCVState *env, uint32_t desc) \
  1589. { \
  1590. vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
  1591. - do_##NAME, CLEAR_FN); \
  1592. + do_##NAME); \
  1593. }
  1594. RVVCALL(OPIVX2_RM, vsaddu_vx_b, OP_UUU_B, H1, H1, saddu8)
  1595. RVVCALL(OPIVX2_RM, vsaddu_vx_h, OP_UUU_H, H2, H2, saddu16)
  1596. RVVCALL(OPIVX2_RM, vsaddu_vx_w, OP_UUU_W, H4, H4, saddu32)
  1597. RVVCALL(OPIVX2_RM, vsaddu_vx_d, OP_UUU_D, H8, H8, saddu64)
  1598. -GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1, clearb)
  1599. -GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh)
  1600. -GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl)
  1601. -GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq)
  1602. +GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1)
  1603. +GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2)
  1604. +GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4)
  1605. +GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8)
  1606. static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
  1607. {
  1608. @@ -2366,19 +2251,19 @@ RVVCALL(OPIVV2_RM, vsadd_vv_b, OP_SSS_B, H1, H1, H1, sadd8)
  1609. RVVCALL(OPIVV2_RM, vsadd_vv_h, OP_SSS_H, H2, H2, H2, sadd16)
  1610. RVVCALL(OPIVV2_RM, vsadd_vv_w, OP_SSS_W, H4, H4, H4, sadd32)
  1611. RVVCALL(OPIVV2_RM, vsadd_vv_d, OP_SSS_D, H8, H8, H8, sadd64)
  1612. -GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1, clearb)
  1613. -GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2, clearh)
  1614. -GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4, clearl)
  1615. -GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8, clearq)
  1616. +GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1)
  1617. +GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2)
  1618. +GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4)
  1619. +GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8)
  1620. RVVCALL(OPIVX2_RM, vsadd_vx_b, OP_SSS_B, H1, H1, sadd8)
  1621. RVVCALL(OPIVX2_RM, vsadd_vx_h, OP_SSS_H, H2, H2, sadd16)
  1622. RVVCALL(OPIVX2_RM, vsadd_vx_w, OP_SSS_W, H4, H4, sadd32)
  1623. RVVCALL(OPIVX2_RM, vsadd_vx_d, OP_SSS_D, H8, H8, sadd64)
  1624. -GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1, clearb)
  1625. -GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh)
  1626. -GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl)
  1627. -GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq)
  1628. +GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1)
  1629. +GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2)
  1630. +GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4)
  1631. +GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8)
  1632. static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
  1633. {
  1634. @@ -2427,19 +2312,19 @@ RVVCALL(OPIVV2_RM, vssubu_vv_b, OP_UUU_B, H1, H1, H1, ssubu8)
  1635. RVVCALL(OPIVV2_RM, vssubu_vv_h, OP_UUU_H, H2, H2, H2, ssubu16)
  1636. RVVCALL(OPIVV2_RM, vssubu_vv_w, OP_UUU_W, H4, H4, H4, ssubu32)
  1637. RVVCALL(OPIVV2_RM, vssubu_vv_d, OP_UUU_D, H8, H8, H8, ssubu64)
  1638. -GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1, clearb)
  1639. -GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2, clearh)
  1640. -GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4, clearl)
  1641. -GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8, clearq)
  1642. +GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1)
  1643. +GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2)
  1644. +GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4)
  1645. +GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8)
  1646. RVVCALL(OPIVX2_RM, vssubu_vx_b, OP_UUU_B, H1, H1, ssubu8)
  1647. RVVCALL(OPIVX2_RM, vssubu_vx_h, OP_UUU_H, H2, H2, ssubu16)
  1648. RVVCALL(OPIVX2_RM, vssubu_vx_w, OP_UUU_W, H4, H4, ssubu32)
  1649. RVVCALL(OPIVX2_RM, vssubu_vx_d, OP_UUU_D, H8, H8, ssubu64)
  1650. -GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1, clearb)
  1651. -GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh)
  1652. -GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl)
  1653. -GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq)
  1654. +GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1)
  1655. +GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2)
  1656. +GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4)
  1657. +GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8)
  1658. static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
  1659. {
  1660. @@ -2485,19 +2370,19 @@ RVVCALL(OPIVV2_RM, vssub_vv_b, OP_SSS_B, H1, H1, H1, ssub8)
  1661. RVVCALL(OPIVV2_RM, vssub_vv_h, OP_SSS_H, H2, H2, H2, ssub16)
  1662. RVVCALL(OPIVV2_RM, vssub_vv_w, OP_SSS_W, H4, H4, H4, ssub32)
  1663. RVVCALL(OPIVV2_RM, vssub_vv_d, OP_SSS_D, H8, H8, H8, ssub64)
  1664. -GEN_VEXT_VV_RM(vssub_vv_b, 1, 1, clearb)
  1665. -GEN_VEXT_VV_RM(vssub_vv_h, 2, 2, clearh)
  1666. -GEN_VEXT_VV_RM(vssub_vv_w, 4, 4, clearl)
  1667. -GEN_VEXT_VV_RM(vssub_vv_d, 8, 8, clearq)
  1668. +GEN_VEXT_VV_RM(vssub_vv_b, 1, 1)
  1669. +GEN_VEXT_VV_RM(vssub_vv_h, 2, 2)
  1670. +GEN_VEXT_VV_RM(vssub_vv_w, 4, 4)
  1671. +GEN_VEXT_VV_RM(vssub_vv_d, 8, 8)
  1672. RVVCALL(OPIVX2_RM, vssub_vx_b, OP_SSS_B, H1, H1, ssub8)
  1673. RVVCALL(OPIVX2_RM, vssub_vx_h, OP_SSS_H, H2, H2, ssub16)
  1674. RVVCALL(OPIVX2_RM, vssub_vx_w, OP_SSS_W, H4, H4, ssub32)
  1675. RVVCALL(OPIVX2_RM, vssub_vx_d, OP_SSS_D, H8, H8, ssub64)
  1676. -GEN_VEXT_VX_RM(vssub_vx_b, 1, 1, clearb)
  1677. -GEN_VEXT_VX_RM(vssub_vx_h, 2, 2, clearh)
  1678. -GEN_VEXT_VX_RM(vssub_vx_w, 4, 4, clearl)
  1679. -GEN_VEXT_VX_RM(vssub_vx_d, 8, 8, clearq)
  1680. +GEN_VEXT_VX_RM(vssub_vx_b, 1, 1)
  1681. +GEN_VEXT_VX_RM(vssub_vx_h, 2, 2)
  1682. +GEN_VEXT_VX_RM(vssub_vx_w, 4, 4)
  1683. +GEN_VEXT_VX_RM(vssub_vx_d, 8, 8)
  1684. /* Vector Single-Width Averaging Add and Subtract */
  1685. static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift)
  1686. @@ -2549,19 +2434,19 @@ RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32)
  1687. RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32)
  1688. RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32)
  1689. RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64)
  1690. -GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1, clearb)
  1691. -GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2, clearh)
  1692. -GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4, clearl)
  1693. -GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8, clearq)
  1694. +GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1)
  1695. +GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2)
  1696. +GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4)
  1697. +GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8)
  1698. RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32)
  1699. RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32)
  1700. RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32)
  1701. RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64)
  1702. -GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1, clearb)
  1703. -GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2, clearh)
  1704. -GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4, clearl)
  1705. -GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8, clearq)
  1706. +GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1)
  1707. +GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2)
  1708. +GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4)
  1709. +GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8)
  1710. static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
  1711. {
  1712. @@ -2585,19 +2470,19 @@ RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32)
  1713. RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32)
  1714. RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32)
  1715. RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64)
  1716. -GEN_VEXT_VV_RM(vasub_vv_b, 1, 1, clearb)
  1717. -GEN_VEXT_VV_RM(vasub_vv_h, 2, 2, clearh)
  1718. -GEN_VEXT_VV_RM(vasub_vv_w, 4, 4, clearl)
  1719. -GEN_VEXT_VV_RM(vasub_vv_d, 8, 8, clearq)
  1720. +GEN_VEXT_VV_RM(vasub_vv_b, 1, 1)
  1721. +GEN_VEXT_VV_RM(vasub_vv_h, 2, 2)
  1722. +GEN_VEXT_VV_RM(vasub_vv_w, 4, 4)
  1723. +GEN_VEXT_VV_RM(vasub_vv_d, 8, 8)
  1724. RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32)
  1725. RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32)
  1726. RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32)
  1727. RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64)
  1728. -GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb)
  1729. -GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh)
  1730. -GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl)
  1731. -GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq)
  1732. +GEN_VEXT_VX_RM(vasub_vx_b, 1, 1)
  1733. +GEN_VEXT_VX_RM(vasub_vx_h, 2, 2)
  1734. +GEN_VEXT_VX_RM(vasub_vx_w, 4, 4)
  1735. +GEN_VEXT_VX_RM(vasub_vx_d, 8, 8)
  1736. /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
  1737. static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
  1738. @@ -2692,19 +2577,19 @@ RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8)
  1739. RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16)
  1740. RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32)
  1741. RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64)
  1742. -GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1, clearb)
  1743. -GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2, clearh)
  1744. -GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4, clearl)
  1745. -GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8, clearq)
  1746. +GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1)
  1747. +GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2)
  1748. +GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4)
  1749. +GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8)
  1750. RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8)
  1751. RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16)
  1752. RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32)
  1753. RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64)
  1754. -GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1, clearb)
  1755. -GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2, clearh)
  1756. -GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4, clearl)
  1757. -GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8, clearq)
  1758. +GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1)
  1759. +GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2)
  1760. +GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4)
  1761. +GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8)
  1762. /* Vector Widening Saturating Scaled Multiply-Add */
  1763. static inline uint16_t
  1764. @@ -2757,9 +2642,9 @@ do_##NAME(void *vd, void *vs1, void *vs2, int i, \
  1765. RVVCALL(OPIVV3_RM, vwsmaccu_vv_b, WOP_UUU_B, H2, H1, H1, vwsmaccu8)
  1766. RVVCALL(OPIVV3_RM, vwsmaccu_vv_h, WOP_UUU_H, H4, H2, H2, vwsmaccu16)
  1767. RVVCALL(OPIVV3_RM, vwsmaccu_vv_w, WOP_UUU_W, H8, H4, H4, vwsmaccu32)
  1768. -GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2, clearh)
  1769. -GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4, clearl)
  1770. -GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8, clearq)
  1771. +GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2)
  1772. +GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4)
  1773. +GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8)
  1774. #define OPIVX3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
  1775. static inline void \
  1776. @@ -2774,9 +2659,9 @@ do_##NAME(void *vd, target_long s1, void *vs2, int i, \
  1777. RVVCALL(OPIVX3_RM, vwsmaccu_vx_b, WOP_UUU_B, H2, H1, vwsmaccu8)
  1778. RVVCALL(OPIVX3_RM, vwsmaccu_vx_h, WOP_UUU_H, H4, H2, vwsmaccu16)
  1779. RVVCALL(OPIVX3_RM, vwsmaccu_vx_w, WOP_UUU_W, H8, H4, vwsmaccu32)
  1780. -GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2, clearh)
  1781. -GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4, clearl)
  1782. -GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8, clearq)
  1783. +GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2)
  1784. +GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4)
  1785. +GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8)
  1786. static inline int16_t
  1787. vwsmacc8(CPURISCVState *env, int vxrm, int8_t a, int8_t b, int16_t c)
  1788. @@ -2815,15 +2700,15 @@ vwsmacc32(CPURISCVState *env, int vxrm, int32_t a, int32_t b, int64_t c)
  1789. RVVCALL(OPIVV3_RM, vwsmacc_vv_b, WOP_SSS_B, H2, H1, H1, vwsmacc8)
  1790. RVVCALL(OPIVV3_RM, vwsmacc_vv_h, WOP_SSS_H, H4, H2, H2, vwsmacc16)
  1791. RVVCALL(OPIVV3_RM, vwsmacc_vv_w, WOP_SSS_W, H8, H4, H4, vwsmacc32)
  1792. -GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2, clearh)
  1793. -GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4, clearl)
  1794. -GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8, clearq)
  1795. +GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2)
  1796. +GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4)
  1797. +GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8)
  1798. RVVCALL(OPIVX3_RM, vwsmacc_vx_b, WOP_SSS_B, H2, H1, vwsmacc8)
  1799. RVVCALL(OPIVX3_RM, vwsmacc_vx_h, WOP_SSS_H, H4, H2, vwsmacc16)
  1800. RVVCALL(OPIVX3_RM, vwsmacc_vx_w, WOP_SSS_W, H8, H4, vwsmacc32)
  1801. -GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2, clearh)
  1802. -GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4, clearl)
  1803. -GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8, clearq)
  1804. +GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2)
  1805. +GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4)
  1806. +GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8)
  1807. static inline int16_t
  1808. vwsmaccsu8(CPURISCVState *env, int vxrm, uint8_t a, int8_t b, int16_t c)
  1809. @@ -2861,15 +2746,15 @@ vwsmaccsu32(CPURISCVState *env, int vxrm, uint32_t a, int32_t b, int64_t c)
  1810. RVVCALL(OPIVV3_RM, vwsmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, vwsmaccsu8)
  1811. RVVCALL(OPIVV3_RM, vwsmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, vwsmaccsu16)
  1812. RVVCALL(OPIVV3_RM, vwsmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, vwsmaccsu32)
  1813. -GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2, clearh)
  1814. -GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4, clearl)
  1815. -GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8, clearq)
  1816. +GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2)
  1817. +GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4)
  1818. +GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8)
  1819. RVVCALL(OPIVX3_RM, vwsmaccsu_vx_b, WOP_SSU_B, H2, H1, vwsmaccsu8)
  1820. RVVCALL(OPIVX3_RM, vwsmaccsu_vx_h, WOP_SSU_H, H4, H2, vwsmaccsu16)
  1821. RVVCALL(OPIVX3_RM, vwsmaccsu_vx_w, WOP_SSU_W, H8, H4, vwsmaccsu32)
  1822. -GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2, clearh)
  1823. -GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4, clearl)
  1824. -GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8, clearq)
  1825. +GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2)
  1826. +GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4)
  1827. +GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8)
  1828. static inline int16_t
  1829. vwsmaccus8(CPURISCVState *env, int vxrm, int8_t a, uint8_t b, int16_t c)
  1830. @@ -2907,9 +2792,9 @@ vwsmaccus32(CPURISCVState *env, int vxrm, int32_t a, uint32_t b, int64_t c)
  1831. RVVCALL(OPIVX3_RM, vwsmaccus_vx_b, WOP_SUS_B, H2, H1, vwsmaccus8)
  1832. RVVCALL(OPIVX3_RM, vwsmaccus_vx_h, WOP_SUS_H, H4, H2, vwsmaccus16)
  1833. RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32)
  1834. -GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2, clearh)
  1835. -GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl)
  1836. -GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq)
  1837. +GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2)
  1838. +GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4)
  1839. +GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8)
  1840. /* Vector Single-Width Scaling Shift Instructions */
  1841. static inline uint8_t
  1842. @@ -2956,19 +2841,19 @@ RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8)
  1843. RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16)
  1844. RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32)
  1845. RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64)
  1846. -GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1, clearb)
  1847. -GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2, clearh)
  1848. -GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4, clearl)
  1849. -GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8, clearq)
  1850. +GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1)
  1851. +GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2)
  1852. +GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4)
  1853. +GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8)
  1854. RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8)
  1855. RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16)
  1856. RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32)
  1857. RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64)
  1858. -GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1, clearb)
  1859. -GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh)
  1860. -GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl)
  1861. -GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq)
  1862. +GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1)
  1863. +GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2)
  1864. +GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4)
  1865. +GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8)
  1866. static inline int8_t
  1867. vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
  1868. @@ -3015,19 +2900,19 @@ RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8)
  1869. RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16)
  1870. RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32)
  1871. RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64)
  1872. -GEN_VEXT_VV_RM(vssra_vv_b, 1, 1, clearb)
  1873. -GEN_VEXT_VV_RM(vssra_vv_h, 2, 2, clearh)
  1874. -GEN_VEXT_VV_RM(vssra_vv_w, 4, 4, clearl)
  1875. -GEN_VEXT_VV_RM(vssra_vv_d, 8, 8, clearq)
  1876. +GEN_VEXT_VV_RM(vssra_vv_b, 1, 1)
  1877. +GEN_VEXT_VV_RM(vssra_vv_h, 2, 2)
  1878. +GEN_VEXT_VV_RM(vssra_vv_w, 4, 4)
  1879. +GEN_VEXT_VV_RM(vssra_vv_d, 8, 8)
  1880. RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8)
  1881. RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16)
  1882. RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32)
  1883. RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64)
  1884. -GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb)
  1885. -GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh)
  1886. -GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl)
  1887. -GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq)
  1888. +GEN_VEXT_VX_RM(vssra_vx_b, 1, 1)
  1889. +GEN_VEXT_VX_RM(vssra_vx_h, 2, 2)
  1890. +GEN_VEXT_VX_RM(vssra_vx_w, 4, 4)
  1891. +GEN_VEXT_VX_RM(vssra_vx_d, 8, 8)
  1892. /* Vector Narrowing Fixed-Point Clip Instructions */
  1893. static inline int8_t
  1894. @@ -3090,16 +2975,16 @@ vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
  1895. RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
  1896. RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
  1897. RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
  1898. -GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1, clearb)
  1899. -GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2, clearh)
  1900. -GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4, clearl)
  1901. +GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1)
  1902. +GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2)
  1903. +GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4)
  1904. RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8)
  1905. RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16)
  1906. RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32)
  1907. -GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1, clearb)
  1908. -GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2, clearh)
  1909. -GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4, clearl)
  1910. +GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1)
  1911. +GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2)
  1912. +GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4)
  1913. static inline uint8_t
  1914. vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
  1915. @@ -3152,16 +3037,16 @@ vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
  1916. RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
  1917. RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
  1918. RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
  1919. -GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1, clearb)
  1920. -GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2, clearh)
  1921. -GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4, clearl)
  1922. +GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1)
  1923. +GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2)
  1924. +GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4)
  1925. RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8)
  1926. RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16)
  1927. RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32)
  1928. -GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb)
  1929. -GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh)
  1930. -GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl)
  1931. +GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1)
  1932. +GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2)
  1933. +GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4)
  1934. /*
  1935. *** Vector Float Point Arithmetic Instructions
  1936. @@ -3176,12 +3061,11 @@ static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
  1937. *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \
  1938. }
  1939. -#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ, CLEAR_FN) \
  1940. +#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ) \
  1941. void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  1942. void *vs2, CPURISCVState *env, \
  1943. uint32_t desc) \
  1944. { \
  1945. - uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  1946. uint32_t vm = vext_vm(desc); \
  1947. uint32_t vl = env->vl; \
  1948. uint32_t i; \
  1949. @@ -3192,15 +3076,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  1950. } \
  1951. do_##NAME(vd, vs1, vs2, i, env); \
  1952. } \
  1953. - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
  1954. }
  1955. RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add)
  1956. RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add)
  1957. RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add)
  1958. -GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2, clearh)
  1959. -GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4, clearl)
  1960. -GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8, clearq)
  1961. +GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2)
  1962. +GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4)
  1963. +GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8)
  1964. #define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
  1965. static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
  1966. @@ -3210,12 +3093,11 @@ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
  1967. *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\
  1968. }
  1969. -#define GEN_VEXT_VF(NAME, ESZ, DSZ, CLEAR_FN) \
  1970. +#define GEN_VEXT_VF(NAME, ESZ, DSZ) \
  1971. void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
  1972. void *vs2, CPURISCVState *env, \
  1973. uint32_t desc) \
  1974. { \
  1975. - uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  1976. uint32_t vm = vext_vm(desc); \
  1977. uint32_t vl = env->vl; \
  1978. uint32_t i; \
  1979. @@ -3226,28 +3108,27 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
  1980. } \
  1981. do_##NAME(vd, s1, vs2, i, env); \
  1982. } \
  1983. - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
  1984. }
  1985. RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add)
  1986. RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add)
  1987. RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add)
  1988. -GEN_VEXT_VF(vfadd_vf_h, 2, 2, clearh)
  1989. -GEN_VEXT_VF(vfadd_vf_w, 4, 4, clearl)
  1990. -GEN_VEXT_VF(vfadd_vf_d, 8, 8, clearq)
  1991. +GEN_VEXT_VF(vfadd_vf_h, 2, 2)
  1992. +GEN_VEXT_VF(vfadd_vf_w, 4, 4)
  1993. +GEN_VEXT_VF(vfadd_vf_d, 8, 8)
  1994. RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub)
  1995. RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub)
  1996. RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub)
  1997. -GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2, clearh)
  1998. -GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4, clearl)
  1999. -GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8, clearq)
  2000. +GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2)
  2001. +GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4)
  2002. +GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8)
  2003. RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub)
  2004. RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub)
  2005. RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub)
  2006. -GEN_VEXT_VF(vfsub_vf_h, 2, 2, clearh)
  2007. -GEN_VEXT_VF(vfsub_vf_w, 4, 4, clearl)
  2008. -GEN_VEXT_VF(vfsub_vf_d, 8, 8, clearq)
  2009. +GEN_VEXT_VF(vfsub_vf_h, 2, 2)
  2010. +GEN_VEXT_VF(vfsub_vf_w, 4, 4)
  2011. +GEN_VEXT_VF(vfsub_vf_d, 8, 8)
  2012. static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s)
  2013. {
  2014. @@ -3267,9 +3148,9 @@ static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s)
  2015. RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub)
  2016. RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub)
  2017. RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub)
  2018. -GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh)
  2019. -GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl)
  2020. -GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq)
  2021. +GEN_VEXT_VF(vfrsub_vf_h, 2, 2)
  2022. +GEN_VEXT_VF(vfrsub_vf_w, 4, 4)
  2023. +GEN_VEXT_VF(vfrsub_vf_d, 8, 8)
  2024. /* Vector Widening Floating-Point Add/Subtract Instructions */
  2025. static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s)
  2026. @@ -3287,12 +3168,12 @@ static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s)
  2027. RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16)
  2028. RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32)
  2029. -GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4, clearl)
  2030. -GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8, clearq)
  2031. +GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4)
  2032. +GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8)
  2033. RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16)
  2034. RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32)
  2035. -GEN_VEXT_VF(vfwadd_vf_h, 2, 4, clearl)
  2036. -GEN_VEXT_VF(vfwadd_vf_w, 4, 8, clearq)
  2037. +GEN_VEXT_VF(vfwadd_vf_h, 2, 4)
  2038. +GEN_VEXT_VF(vfwadd_vf_w, 4, 8)
  2039. static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s)
  2040. {
  2041. @@ -3309,12 +3190,12 @@ static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s)
  2042. RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16)
  2043. RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32)
  2044. -GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4, clearl)
  2045. -GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8, clearq)
  2046. +GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4)
  2047. +GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8)
  2048. RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16)
  2049. RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32)
  2050. -GEN_VEXT_VF(vfwsub_vf_h, 2, 4, clearl)
  2051. -GEN_VEXT_VF(vfwsub_vf_w, 4, 8, clearq)
  2052. +GEN_VEXT_VF(vfwsub_vf_h, 2, 4)
  2053. +GEN_VEXT_VF(vfwsub_vf_w, 4, 8)
  2054. static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s)
  2055. {
  2056. @@ -3328,12 +3209,12 @@ static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s)
  2057. RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16)
  2058. RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32)
  2059. -GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4, clearl)
  2060. -GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8, clearq)
  2061. +GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4)
  2062. +GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8)
  2063. RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16)
  2064. RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32)
  2065. -GEN_VEXT_VF(vfwadd_wf_h, 2, 4, clearl)
  2066. -GEN_VEXT_VF(vfwadd_wf_w, 4, 8, clearq)
  2067. +GEN_VEXT_VF(vfwadd_wf_h, 2, 4)
  2068. +GEN_VEXT_VF(vfwadd_wf_w, 4, 8)
  2069. static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s)
  2070. {
  2071. @@ -3347,39 +3228,39 @@ static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s)
  2072. RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16)
  2073. RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32)
  2074. -GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4, clearl)
  2075. -GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8, clearq)
  2076. +GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4)
  2077. +GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8)
  2078. RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16)
  2079. RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32)
  2080. -GEN_VEXT_VF(vfwsub_wf_h, 2, 4, clearl)
  2081. -GEN_VEXT_VF(vfwsub_wf_w, 4, 8, clearq)
  2082. +GEN_VEXT_VF(vfwsub_wf_h, 2, 4)
  2083. +GEN_VEXT_VF(vfwsub_wf_w, 4, 8)
  2084. /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
  2085. RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul)
  2086. RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul)
  2087. RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul)
  2088. -GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2, clearh)
  2089. -GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4, clearl)
  2090. -GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8, clearq)
  2091. +GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2)
  2092. +GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4)
  2093. +GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8)
  2094. RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul)
  2095. RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul)
  2096. RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul)
  2097. -GEN_VEXT_VF(vfmul_vf_h, 2, 2, clearh)
  2098. -GEN_VEXT_VF(vfmul_vf_w, 4, 4, clearl)
  2099. -GEN_VEXT_VF(vfmul_vf_d, 8, 8, clearq)
  2100. +GEN_VEXT_VF(vfmul_vf_h, 2, 2)
  2101. +GEN_VEXT_VF(vfmul_vf_w, 4, 4)
  2102. +GEN_VEXT_VF(vfmul_vf_d, 8, 8)
  2103. RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div)
  2104. RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div)
  2105. RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div)
  2106. -GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2, clearh)
  2107. -GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4, clearl)
  2108. -GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8, clearq)
  2109. +GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2)
  2110. +GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4)
  2111. +GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8)
  2112. RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div)
  2113. RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div)
  2114. RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div)
  2115. -GEN_VEXT_VF(vfdiv_vf_h, 2, 2, clearh)
  2116. -GEN_VEXT_VF(vfdiv_vf_w, 4, 4, clearl)
  2117. -GEN_VEXT_VF(vfdiv_vf_d, 8, 8, clearq)
  2118. +GEN_VEXT_VF(vfdiv_vf_h, 2, 2)
  2119. +GEN_VEXT_VF(vfdiv_vf_w, 4, 4)
  2120. +GEN_VEXT_VF(vfdiv_vf_d, 8, 8)
  2121. static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s)
  2122. {
  2123. @@ -3399,9 +3280,9 @@ static uint64_t float64_rdiv(uint64_t a, uint64_t b, float_status *s)
  2124. RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv)
  2125. RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv)
  2126. RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv)
  2127. -GEN_VEXT_VF(vfrdiv_vf_h, 2, 2, clearh)
  2128. -GEN_VEXT_VF(vfrdiv_vf_w, 4, 4, clearl)
  2129. -GEN_VEXT_VF(vfrdiv_vf_d, 8, 8, clearq)
  2130. +GEN_VEXT_VF(vfrdiv_vf_h, 2, 2)
  2131. +GEN_VEXT_VF(vfrdiv_vf_w, 4, 4)
  2132. +GEN_VEXT_VF(vfrdiv_vf_d, 8, 8)
  2133. /* Vector Widening Floating-Point Multiply */
  2134. static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s)
  2135. @@ -3418,12 +3299,12 @@ static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s)
  2136. }
  2137. RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16)
  2138. RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32)
  2139. -GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4, clearl)
  2140. -GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8, clearq)
  2141. +GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4)
  2142. +GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8)
  2143. RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16)
  2144. RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32)
  2145. -GEN_VEXT_VF(vfwmul_vf_h, 2, 4, clearl)
  2146. -GEN_VEXT_VF(vfwmul_vf_w, 4, 8, clearq)
  2147. +GEN_VEXT_VF(vfwmul_vf_h, 2, 4)
  2148. +GEN_VEXT_VF(vfwmul_vf_w, 4, 8)
  2149. /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
  2150. #define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
  2151. @@ -3454,9 +3335,9 @@ static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2152. RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16)
  2153. RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32)
  2154. RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64)
  2155. -GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2, clearh)
  2156. -GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4, clearl)
  2157. -GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8, clearq)
  2158. +GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2)
  2159. +GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4)
  2160. +GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8)
  2161. #define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
  2162. static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
  2163. @@ -3470,9 +3351,9 @@ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
  2164. RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16)
  2165. RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32)
  2166. RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64)
  2167. -GEN_VEXT_VF(vfmacc_vf_h, 2, 2, clearh)
  2168. -GEN_VEXT_VF(vfmacc_vf_w, 4, 4, clearl)
  2169. -GEN_VEXT_VF(vfmacc_vf_d, 8, 8, clearq)
  2170. +GEN_VEXT_VF(vfmacc_vf_h, 2, 2)
  2171. +GEN_VEXT_VF(vfmacc_vf_w, 4, 4)
  2172. +GEN_VEXT_VF(vfmacc_vf_d, 8, 8)
  2173. static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
  2174. {
  2175. @@ -3495,15 +3376,15 @@ static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2176. RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16)
  2177. RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32)
  2178. RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64)
  2179. -GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2, clearh)
  2180. -GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4, clearl)
  2181. -GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8, clearq)
  2182. +GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2)
  2183. +GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4)
  2184. +GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8)
  2185. RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16)
  2186. RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32)
  2187. RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64)
  2188. -GEN_VEXT_VF(vfnmacc_vf_h, 2, 2, clearh)
  2189. -GEN_VEXT_VF(vfnmacc_vf_w, 4, 4, clearl)
  2190. -GEN_VEXT_VF(vfnmacc_vf_d, 8, 8, clearq)
  2191. +GEN_VEXT_VF(vfnmacc_vf_h, 2, 2)
  2192. +GEN_VEXT_VF(vfnmacc_vf_w, 4, 4)
  2193. +GEN_VEXT_VF(vfnmacc_vf_d, 8, 8)
  2194. static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
  2195. {
  2196. @@ -3523,15 +3404,15 @@ static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2197. RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16)
  2198. RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32)
  2199. RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64)
  2200. -GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2, clearh)
  2201. -GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4, clearl)
  2202. -GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8, clearq)
  2203. +GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2)
  2204. +GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4)
  2205. +GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8)
  2206. RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16)
  2207. RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32)
  2208. RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64)
  2209. -GEN_VEXT_VF(vfmsac_vf_h, 2, 2, clearh)
  2210. -GEN_VEXT_VF(vfmsac_vf_w, 4, 4, clearl)
  2211. -GEN_VEXT_VF(vfmsac_vf_d, 8, 8, clearq)
  2212. +GEN_VEXT_VF(vfmsac_vf_h, 2, 2)
  2213. +GEN_VEXT_VF(vfmsac_vf_w, 4, 4)
  2214. +GEN_VEXT_VF(vfmsac_vf_d, 8, 8)
  2215. static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
  2216. {
  2217. @@ -3551,15 +3432,15 @@ static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2218. RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16)
  2219. RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32)
  2220. RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64)
  2221. -GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2, clearh)
  2222. -GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4, clearl)
  2223. -GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8, clearq)
  2224. +GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2)
  2225. +GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4)
  2226. +GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8)
  2227. RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16)
  2228. RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32)
  2229. RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64)
  2230. -GEN_VEXT_VF(vfnmsac_vf_h, 2, 2, clearh)
  2231. -GEN_VEXT_VF(vfnmsac_vf_w, 4, 4, clearl)
  2232. -GEN_VEXT_VF(vfnmsac_vf_d, 8, 8, clearq)
  2233. +GEN_VEXT_VF(vfnmsac_vf_h, 2, 2)
  2234. +GEN_VEXT_VF(vfnmsac_vf_w, 4, 4)
  2235. +GEN_VEXT_VF(vfnmsac_vf_d, 8, 8)
  2236. static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
  2237. {
  2238. @@ -3579,15 +3460,15 @@ static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2239. RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16)
  2240. RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32)
  2241. RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64)
  2242. -GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2, clearh)
  2243. -GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4, clearl)
  2244. -GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8, clearq)
  2245. +GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2)
  2246. +GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4)
  2247. +GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8)
  2248. RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16)
  2249. RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32)
  2250. RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64)
  2251. -GEN_VEXT_VF(vfmadd_vf_h, 2, 2, clearh)
  2252. -GEN_VEXT_VF(vfmadd_vf_w, 4, 4, clearl)
  2253. -GEN_VEXT_VF(vfmadd_vf_d, 8, 8, clearq)
  2254. +GEN_VEXT_VF(vfmadd_vf_h, 2, 2)
  2255. +GEN_VEXT_VF(vfmadd_vf_w, 4, 4)
  2256. +GEN_VEXT_VF(vfmadd_vf_d, 8, 8)
  2257. static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
  2258. {
  2259. @@ -3610,15 +3491,15 @@ static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2260. RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16)
  2261. RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32)
  2262. RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64)
  2263. -GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2, clearh)
  2264. -GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4, clearl)
  2265. -GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8, clearq)
  2266. +GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2)
  2267. +GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4)
  2268. +GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8)
  2269. RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16)
  2270. RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32)
  2271. RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64)
  2272. -GEN_VEXT_VF(vfnmadd_vf_h, 2, 2, clearh)
  2273. -GEN_VEXT_VF(vfnmadd_vf_w, 4, 4, clearl)
  2274. -GEN_VEXT_VF(vfnmadd_vf_d, 8, 8, clearq)
  2275. +GEN_VEXT_VF(vfnmadd_vf_h, 2, 2)
  2276. +GEN_VEXT_VF(vfnmadd_vf_w, 4, 4)
  2277. +GEN_VEXT_VF(vfnmadd_vf_d, 8, 8)
  2278. static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
  2279. {
  2280. @@ -3638,15 +3519,15 @@ static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2281. RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16)
  2282. RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32)
  2283. RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64)
  2284. -GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2, clearh)
  2285. -GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4, clearl)
  2286. -GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8, clearq)
  2287. +GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2)
  2288. +GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4)
  2289. +GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8)
  2290. RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16)
  2291. RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32)
  2292. RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64)
  2293. -GEN_VEXT_VF(vfmsub_vf_h, 2, 2, clearh)
  2294. -GEN_VEXT_VF(vfmsub_vf_w, 4, 4, clearl)
  2295. -GEN_VEXT_VF(vfmsub_vf_d, 8, 8, clearq)
  2296. +GEN_VEXT_VF(vfmsub_vf_h, 2, 2)
  2297. +GEN_VEXT_VF(vfmsub_vf_w, 4, 4)
  2298. +GEN_VEXT_VF(vfmsub_vf_d, 8, 8)
  2299. static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
  2300. {
  2301. @@ -3666,15 +3547,15 @@ static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
  2302. RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16)
  2303. RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32)
  2304. RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64)
  2305. -GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2, clearh)
  2306. -GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4, clearl)
  2307. -GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8, clearq)
  2308. +GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2)
  2309. +GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4)
  2310. +GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8)
  2311. RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16)
  2312. RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32)
  2313. RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64)
  2314. -GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh)
  2315. -GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl)
  2316. -GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq)
  2317. +GEN_VEXT_VF(vfnmsub_vf_h, 2, 2)
  2318. +GEN_VEXT_VF(vfnmsub_vf_w, 4, 4)
  2319. +GEN_VEXT_VF(vfnmsub_vf_d, 8, 8)
  2320. /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
  2321. static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
  2322. @@ -3691,12 +3572,12 @@ static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
  2323. RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16)
  2324. RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32)
  2325. -GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4, clearl)
  2326. -GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8, clearq)
  2327. +GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4)
  2328. +GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8)
  2329. RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16)
  2330. RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32)
  2331. -GEN_VEXT_VF(vfwmacc_vf_h, 2, 4, clearl)
  2332. -GEN_VEXT_VF(vfwmacc_vf_w, 4, 8, clearq)
  2333. +GEN_VEXT_VF(vfwmacc_vf_h, 2, 4)
  2334. +GEN_VEXT_VF(vfwmacc_vf_w, 4, 8)
  2335. static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
  2336. {
  2337. @@ -3714,12 +3595,12 @@ static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
  2338. RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16)
  2339. RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32)
  2340. -GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4, clearl)
  2341. -GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8, clearq)
  2342. +GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4)
  2343. +GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8)
  2344. RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16)
  2345. RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32)
  2346. -GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4, clearl)
  2347. -GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8, clearq)
  2348. +GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4)
  2349. +GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8)
  2350. static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
  2351. {
  2352. @@ -3737,12 +3618,12 @@ static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
  2353. RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16)
  2354. RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32)
  2355. -GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4, clearl)
  2356. -GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8, clearq)
  2357. +GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4)
  2358. +GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8)
  2359. RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16)
  2360. RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32)
  2361. -GEN_VEXT_VF(vfwmsac_vf_h, 2, 4, clearl)
  2362. -GEN_VEXT_VF(vfwmsac_vf_w, 4, 8, clearq)
  2363. +GEN_VEXT_VF(vfwmsac_vf_h, 2, 4)
  2364. +GEN_VEXT_VF(vfwmsac_vf_w, 4, 8)
  2365. static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
  2366. {
  2367. @@ -3760,12 +3641,12 @@ static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
  2368. RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16)
  2369. RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32)
  2370. -GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4, clearl)
  2371. -GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8, clearq)
  2372. +GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4)
  2373. +GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8)
  2374. RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16)
  2375. RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32)
  2376. -GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4, clearl)
  2377. -GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8, clearq)
  2378. +GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4)
  2379. +GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8)
  2380. /* Vector Floating-Point Square-Root Instruction */
  2381. /* (TD, T2, TX2) */
  2382. @@ -3781,11 +3662,10 @@ static void do_##NAME(void *vd, void *vs2, int i, \
  2383. *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \
  2384. }
  2385. -#define GEN_VEXT_V_ENV(NAME, ESZ, DSZ, CLEAR_FN) \
  2386. +#define GEN_VEXT_V_ENV(NAME, ESZ, DSZ) \
  2387. void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  2388. CPURISCVState *env, uint32_t desc) \
  2389. { \
  2390. - uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  2391. uint32_t vm = vext_vm(desc); \
  2392. uint32_t vl = env->vl; \
  2393. uint32_t i; \
  2394. @@ -3799,42 +3679,41 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  2395. } \
  2396. do_##NAME(vd, vs2, i, env); \
  2397. } \
  2398. - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
  2399. }
  2400. RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt)
  2401. RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt)
  2402. RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt)
  2403. -GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2, clearh)
  2404. -GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4, clearl)
  2405. -GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8, clearq)
  2406. +GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2)
  2407. +GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4)
  2408. +GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8)
  2409. /* Vector Floating-Point MIN/MAX Instructions */
  2410. RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum)
  2411. RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum)
  2412. RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum)
  2413. -GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2, clearh)
  2414. -GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4, clearl)
  2415. -GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8, clearq)
  2416. +GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2)
  2417. +GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4)
  2418. +GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8)
  2419. RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum)
  2420. RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum)
  2421. RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum)
  2422. -GEN_VEXT_VF(vfmin_vf_h, 2, 2, clearh)
  2423. -GEN_VEXT_VF(vfmin_vf_w, 4, 4, clearl)
  2424. -GEN_VEXT_VF(vfmin_vf_d, 8, 8, clearq)
  2425. +GEN_VEXT_VF(vfmin_vf_h, 2, 2)
  2426. +GEN_VEXT_VF(vfmin_vf_w, 4, 4)
  2427. +GEN_VEXT_VF(vfmin_vf_d, 8, 8)
  2428. RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum)
  2429. RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum)
  2430. RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum)
  2431. -GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2, clearh)
  2432. -GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4, clearl)
  2433. -GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8, clearq)
  2434. +GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2)
  2435. +GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4)
  2436. +GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8)
  2437. RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum)
  2438. RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum)
  2439. RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum)
  2440. -GEN_VEXT_VF(vfmax_vf_h, 2, 2, clearh)
  2441. -GEN_VEXT_VF(vfmax_vf_w, 4, 4, clearl)
  2442. -GEN_VEXT_VF(vfmax_vf_d, 8, 8, clearq)
  2443. +GEN_VEXT_VF(vfmax_vf_h, 2, 2)
  2444. +GEN_VEXT_VF(vfmax_vf_w, 4, 4)
  2445. +GEN_VEXT_VF(vfmax_vf_d, 8, 8)
  2446. /* Vector Floating-Point Sign-Injection Instructions */
  2447. static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s)
  2448. @@ -3855,15 +3734,15 @@ static uint64_t fsgnj64(uint64_t a, uint64_t b, float_status *s)
  2449. RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16)
  2450. RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32)
  2451. RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64)
  2452. -GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2, clearh)
  2453. -GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4, clearl)
  2454. -GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8, clearq)
  2455. +GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2)
  2456. +GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4)
  2457. +GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8)
  2458. RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16)
  2459. RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32)
  2460. RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64)
  2461. -GEN_VEXT_VF(vfsgnj_vf_h, 2, 2, clearh)
  2462. -GEN_VEXT_VF(vfsgnj_vf_w, 4, 4, clearl)
  2463. -GEN_VEXT_VF(vfsgnj_vf_d, 8, 8, clearq)
  2464. +GEN_VEXT_VF(vfsgnj_vf_h, 2, 2)
  2465. +GEN_VEXT_VF(vfsgnj_vf_w, 4, 4)
  2466. +GEN_VEXT_VF(vfsgnj_vf_d, 8, 8)
  2467. static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s)
  2468. {
  2469. @@ -3883,15 +3762,15 @@ static uint64_t fsgnjn64(uint64_t a, uint64_t b, float_status *s)
  2470. RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16)
  2471. RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32)
  2472. RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64)
  2473. -GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2, clearh)
  2474. -GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4, clearl)
  2475. -GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8, clearq)
  2476. +GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2)
  2477. +GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4)
  2478. +GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8)
  2479. RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16)
  2480. RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32)
  2481. RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64)
  2482. -GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2, clearh)
  2483. -GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4, clearl)
  2484. -GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8, clearq)
  2485. +GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2)
  2486. +GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4)
  2487. +GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8)
  2488. static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s)
  2489. {
  2490. @@ -3911,15 +3790,15 @@ static uint64_t fsgnjx64(uint64_t a, uint64_t b, float_status *s)
  2491. RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16)
  2492. RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32)
  2493. RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64)
  2494. -GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2, clearh)
  2495. -GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4, clearl)
  2496. -GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8, clearq)
  2497. +GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2)
  2498. +GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4)
  2499. +GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8)
  2500. RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16)
  2501. RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32)
  2502. RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64)
  2503. -GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2, clearh)
  2504. -GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4, clearl)
  2505. -GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq)
  2506. +GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2)
  2507. +GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4)
  2508. +GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8)
  2509. /* Vector Floating-Point Compare Instructions */
  2510. #define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \
  2511. @@ -4076,11 +3955,10 @@ static void do_##NAME(void *vd, void *vs2, int i) \
  2512. *((TD *)vd + HD(i)) = OP(s2); \
  2513. }
  2514. -#define GEN_VEXT_V(NAME, ESZ, DSZ, CLEAR_FN) \
  2515. +#define GEN_VEXT_V(NAME, ESZ, DSZ) \
  2516. void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  2517. CPURISCVState *env, uint32_t desc) \
  2518. { \
  2519. - uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  2520. uint32_t vm = vext_vm(desc); \
  2521. uint32_t vl = env->vl; \
  2522. uint32_t i; \
  2523. @@ -4091,7 +3969,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  2524. } \
  2525. do_##NAME(vd, vs2, i); \
  2526. } \
  2527. - CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
  2528. }
  2529. target_ulong fclass_h(uint64_t frs1)
  2530. @@ -4154,19 +4031,17 @@ target_ulong fclass_d(uint64_t frs1)
  2531. RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h)
  2532. RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s)
  2533. RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d)
  2534. -GEN_VEXT_V(vfclass_v_h, 2, 2, clearh)
  2535. -GEN_VEXT_V(vfclass_v_w, 4, 4, clearl)
  2536. -GEN_VEXT_V(vfclass_v_d, 8, 8, clearq)
  2537. +GEN_VEXT_V(vfclass_v_h, 2, 2)
  2538. +GEN_VEXT_V(vfclass_v_w, 4, 4)
  2539. +GEN_VEXT_V(vfclass_v_d, 8, 8)
  2540. /* Vector Floating-Point Merge Instruction */
  2541. -#define GEN_VFMERGE_VF(NAME, ETYPE, H, CLEAR_FN) \
  2542. +#define GEN_VFMERGE_VF(NAME, ETYPE, H) \
  2543. void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  2544. CPURISCVState *env, uint32_t desc) \
  2545. { \
  2546. uint32_t vm = vext_vm(desc); \
  2547. uint32_t vl = env->vl; \
  2548. - uint32_t esz = sizeof(ETYPE); \
  2549. - uint32_t vlmax = vext_maxsz(desc) / esz; \
  2550. uint32_t i; \
  2551. \
  2552. for (i = 0; i < vl; i++) { \
  2553. @@ -4174,45 +4049,44 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  2554. *((ETYPE *)vd + H(i)) \
  2555. = (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \
  2556. } \
  2557. - CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  2558. }
  2559. -GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2, clearh)
  2560. -GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4, clearl)
  2561. -GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8, clearq)
  2562. +GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
  2563. +GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4)
  2564. +GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8)
  2565. /* Single-Width Floating-Point/Integer Type-Convert Instructions */
  2566. /* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
  2567. RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16)
  2568. RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32)
  2569. RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64)
  2570. -GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2, clearh)
  2571. -GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4, clearl)
  2572. -GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8, clearq)
  2573. +GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2)
  2574. +GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4)
  2575. +GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8)
  2576. /* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */
  2577. RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16)
  2578. RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32)
  2579. RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64)
  2580. -GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2, clearh)
  2581. -GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4, clearl)
  2582. -GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8, clearq)
  2583. +GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2)
  2584. +GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4)
  2585. +GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8)
  2586. /* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */
  2587. RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16)
  2588. RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32)
  2589. RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64)
  2590. -GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2, clearh)
  2591. -GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4, clearl)
  2592. -GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8, clearq)
  2593. +GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2)
  2594. +GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4)
  2595. +GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8)
  2596. /* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */
  2597. RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16)
  2598. RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32)
  2599. RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64)
  2600. -GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2, clearh)
  2601. -GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4, clearl)
  2602. -GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq)
  2603. +GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2)
  2604. +GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4)
  2605. +GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8)
  2606. /* Widening Floating-Point/Integer Type-Convert Instructions */
  2607. /* (TD, T2, TX2) */
  2608. @@ -4221,26 +4095,26 @@ GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq)
  2609. /* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/
  2610. RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32)
  2611. RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64)
  2612. -GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4, clearl)
  2613. -GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8, clearq)
  2614. +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4)
  2615. +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8)
  2616. /* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */
  2617. RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32)
  2618. RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64)
  2619. -GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4, clearl)
  2620. -GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8, clearq)
  2621. +GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4)
  2622. +GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8)
  2623. /* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */
  2624. RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32)
  2625. RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64)
  2626. -GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4, clearl)
  2627. -GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8, clearq)
  2628. +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4)
  2629. +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8)
  2630. /* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */
  2631. RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32)
  2632. RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64)
  2633. -GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4, clearl)
  2634. -GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8, clearq)
  2635. +GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4)
  2636. +GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8)
  2637. /*
  2638. * vfwcvt.f.f.v vd, vs2, vm #
  2639. @@ -4253,8 +4127,8 @@ static uint32_t vfwcvtffv16(uint16_t a, float_status *s)
  2640. RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16)
  2641. RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64)
  2642. -GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4, clearl)
  2643. -GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq)
  2644. +GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4)
  2645. +GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8)
  2646. /* Narrowing Floating-Point/Integer Type-Convert Instructions */
  2647. /* (TD, T2, TX2) */
  2648. @@ -4263,26 +4137,26 @@ GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq)
  2649. /* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
  2650. RVVCALL(OPFVV1, vfncvt_xu_f_v_h, NOP_UU_H, H2, H4, float32_to_uint16)
  2651. RVVCALL(OPFVV1, vfncvt_xu_f_v_w, NOP_UU_W, H4, H8, float64_to_uint32)
  2652. -GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2, clearh)
  2653. -GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4, clearl)
  2654. +GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2)
  2655. +GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4)
  2656. /* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */
  2657. RVVCALL(OPFVV1, vfncvt_x_f_v_h, NOP_UU_H, H2, H4, float32_to_int16)
  2658. RVVCALL(OPFVV1, vfncvt_x_f_v_w, NOP_UU_W, H4, H8, float64_to_int32)
  2659. -GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2, clearh)
  2660. -GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4, clearl)
  2661. +GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2)
  2662. +GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4)
  2663. /* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */
  2664. RVVCALL(OPFVV1, vfncvt_f_xu_v_h, NOP_UU_H, H2, H4, uint32_to_float16)
  2665. RVVCALL(OPFVV1, vfncvt_f_xu_v_w, NOP_UU_W, H4, H8, uint64_to_float32)
  2666. -GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2, clearh)
  2667. -GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4, clearl)
  2668. +GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2)
  2669. +GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4)
  2670. /* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */
  2671. RVVCALL(OPFVV1, vfncvt_f_x_v_h, NOP_UU_H, H2, H4, int32_to_float16)
  2672. RVVCALL(OPFVV1, vfncvt_f_x_v_w, NOP_UU_W, H4, H8, int64_to_float32)
  2673. -GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2, clearh)
  2674. -GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4, clearl)
  2675. +GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2)
  2676. +GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4)
  2677. /* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */
  2678. static uint16_t vfncvtffv16(uint32_t a, float_status *s)
  2679. @@ -4292,21 +4166,20 @@ static uint16_t vfncvtffv16(uint32_t a, float_status *s)
  2680. RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, vfncvtffv16)
  2681. RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32)
  2682. -GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2, clearh)
  2683. -GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl)
  2684. +GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2)
  2685. +GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4)
  2686. /*
  2687. *** Vector Reduction Operations
  2688. */
  2689. /* Vector Single-Width Integer Reduction Instructions */
  2690. -#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\
  2691. +#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP) \
  2692. void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  2693. void *vs2, CPURISCVState *env, uint32_t desc) \
  2694. { \
  2695. uint32_t vm = vext_vm(desc); \
  2696. uint32_t vl = env->vl; \
  2697. uint32_t i; \
  2698. - uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \
  2699. TD s1 = *((TD *)vs1 + HD(0)); \
  2700. \
  2701. for (i = 0; i < vl; i++) { \
  2702. @@ -4317,70 +4190,69 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  2703. s1 = OP(s1, (TD)s2); \
  2704. } \
  2705. *((TD *)vd + HD(0)) = s1; \
  2706. - CLEAR_FN(vd, 1, sizeof(TD), tot); \
  2707. }
  2708. /* vd[0] = sum(vs1[0], vs2[*]) */
  2709. -GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD, clearb)
  2710. -GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD, clearh)
  2711. -GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD, clearl)
  2712. -GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD, clearq)
  2713. +GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD)
  2714. +GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD)
  2715. +GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD)
  2716. +GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD)
  2717. /* vd[0] = maxu(vs1[0], vs2[*]) */
  2718. -GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX, clearb)
  2719. -GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX, clearh)
  2720. -GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX, clearl)
  2721. -GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX, clearq)
  2722. +GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX)
  2723. +GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX)
  2724. +GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX)
  2725. +GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX)
  2726. /* vd[0] = max(vs1[0], vs2[*]) */
  2727. -GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX, clearb)
  2728. -GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX, clearh)
  2729. -GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX, clearl)
  2730. -GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX, clearq)
  2731. +GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX)
  2732. +GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX)
  2733. +GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX)
  2734. +GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX)
  2735. /* vd[0] = minu(vs1[0], vs2[*]) */
  2736. -GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN, clearb)
  2737. -GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN, clearh)
  2738. -GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN, clearl)
  2739. -GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN, clearq)
  2740. +GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN)
  2741. +GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN)
  2742. +GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN)
  2743. +GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN)
  2744. /* vd[0] = min(vs1[0], vs2[*]) */
  2745. -GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN, clearb)
  2746. -GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN, clearh)
  2747. -GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN, clearl)
  2748. -GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN, clearq)
  2749. +GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN)
  2750. +GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN)
  2751. +GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN)
  2752. +GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN)
  2753. /* vd[0] = and(vs1[0], vs2[*]) */
  2754. -GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND, clearb)
  2755. -GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND, clearh)
  2756. -GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND, clearl)
  2757. -GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND, clearq)
  2758. +GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND)
  2759. +GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND)
  2760. +GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND)
  2761. +GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND)
  2762. /* vd[0] = or(vs1[0], vs2[*]) */
  2763. -GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR, clearb)
  2764. -GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR, clearh)
  2765. -GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR, clearl)
  2766. -GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR, clearq)
  2767. +GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR)
  2768. +GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR)
  2769. +GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR)
  2770. +GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR)
  2771. /* vd[0] = xor(vs1[0], vs2[*]) */
  2772. -GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR, clearb)
  2773. -GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR, clearh)
  2774. -GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR, clearl)
  2775. -GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR, clearq)
  2776. +GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR)
  2777. +GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR)
  2778. +GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR)
  2779. +GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR)
  2780. /* Vector Widening Integer Reduction Instructions */
  2781. /* signed sum reduction into double-width accumulator */
  2782. -GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD, clearh)
  2783. -GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD, clearl)
  2784. -GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD, clearq)
  2785. +GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD)
  2786. +GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD)
  2787. +GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD)
  2788. /* Unsigned sum reduction into double-width accumulator */
  2789. -GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD, clearh)
  2790. -GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD, clearl)
  2791. -GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD, clearq)
  2792. +GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD)
  2793. +GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD)
  2794. +GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD)
  2795. /* Vector Single-Width Floating-Point Reduction Instructions */
  2796. -#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\
  2797. +#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP) \
  2798. void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  2799. void *vs2, CPURISCVState *env, \
  2800. uint32_t desc) \
  2801. @@ -4388,7 +4260,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  2802. uint32_t vm = vext_vm(desc); \
  2803. uint32_t vl = env->vl; \
  2804. uint32_t i; \
  2805. - uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \
  2806. TD s1 = *((TD *)vs1 + HD(0)); \
  2807. \
  2808. for (i = 0; i < vl; i++) { \
  2809. @@ -4399,23 +4270,22 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  2810. s1 = OP(s1, (TD)s2, &env->fp_status); \
  2811. } \
  2812. *((TD *)vd + HD(0)) = s1; \
  2813. - CLEAR_FN(vd, 1, sizeof(TD), tot); \
  2814. }
  2815. /* Unordered sum */
  2816. -GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add, clearh)
  2817. -GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add, clearl)
  2818. -GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add, clearq)
  2819. +GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
  2820. +GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
  2821. +GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
  2822. /* Maximum value */
  2823. -GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum, clearh)
  2824. -GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum, clearl)
  2825. -GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum, clearq)
  2826. +GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum)
  2827. +GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum)
  2828. +GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum)
  2829. /* Minimum value */
  2830. -GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum, clearh)
  2831. -GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum, clearl)
  2832. -GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq)
  2833. +GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum)
  2834. +GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum)
  2835. +GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum)
  2836. /* Vector Widening Floating-Point Reduction Instructions */
  2837. /* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
  2838. @@ -4425,7 +4295,6 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  2839. uint32_t vm = vext_vm(desc);
  2840. uint32_t vl = env->vl;
  2841. uint32_t i;
  2842. - uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
  2843. uint32_t s1 = *((uint32_t *)vs1 + H4(0));
  2844. for (i = 0; i < vl; i++) {
  2845. @@ -4437,7 +4306,6 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  2846. &env->fp_status);
  2847. }
  2848. *((uint32_t *)vd + H4(0)) = s1;
  2849. - clearl(vd, 1, sizeof(uint32_t), tot);
  2850. }
  2851. void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  2852. @@ -4446,7 +4314,6 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  2853. uint32_t vm = vext_vm(desc);
  2854. uint32_t vl = env->vl;
  2855. uint32_t i;
  2856. - uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
  2857. uint64_t s1 = *((uint64_t *)vs1);
  2858. for (i = 0; i < vl; i++) {
  2859. @@ -4458,7 +4325,6 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  2860. &env->fp_status);
  2861. }
  2862. *((uint64_t *)vd) = s1;
  2863. - clearq(vd, 1, sizeof(uint64_t), tot);
  2864. }
  2865. /*
  2866. @@ -4600,11 +4466,10 @@ void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
  2867. }
  2868. /* Vector Iota Instruction */
  2869. -#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H, CLEAR_FN) \
  2870. +#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H) \
  2871. void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
  2872. uint32_t desc) \
  2873. { \
  2874. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  2875. uint32_t vm = vext_vm(desc); \
  2876. uint32_t vl = env->vl; \
  2877. uint32_t sum = 0; \
  2878. @@ -4619,19 +4484,17 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
  2879. sum++; \
  2880. } \
  2881. } \
  2882. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  2883. }
  2884. -GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1, clearb)
  2885. -GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2, clearh)
  2886. -GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4, clearl)
  2887. -GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq)
  2888. +GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1)
  2889. +GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2)
  2890. +GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4)
  2891. +GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8)
  2892. /* Vector Element Index Instruction */
  2893. -#define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN) \
  2894. +#define GEN_VEXT_VID_V(NAME, ETYPE, H) \
  2895. void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
  2896. { \
  2897. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  2898. uint32_t vm = vext_vm(desc); \
  2899. uint32_t vl = env->vl; \
  2900. int i; \
  2901. @@ -4642,24 +4505,22 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
  2902. } \
  2903. *((ETYPE *)vd + H(i)) = i; \
  2904. } \
  2905. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  2906. }
  2907. -GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb)
  2908. -GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh)
  2909. -GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl)
  2910. -GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq)
  2911. +GEN_VEXT_VID_V(vid_v_b, uint8_t, H1)
  2912. +GEN_VEXT_VID_V(vid_v_h, uint16_t, H2)
  2913. +GEN_VEXT_VID_V(vid_v_w, uint32_t, H4)
  2914. +GEN_VEXT_VID_V(vid_v_d, uint64_t, H8)
  2915. /*
  2916. *** Vector Permutation Instructions
  2917. */
  2918. /* Vector Slide Instructions */
  2919. -#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \
  2920. +#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H) \
  2921. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2922. CPURISCVState *env, uint32_t desc) \
  2923. { \
  2924. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  2925. uint32_t vm = vext_vm(desc); \
  2926. uint32_t vl = env->vl; \
  2927. target_ulong offset = s1, i; \
  2928. @@ -4670,16 +4531,15 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2929. } \
  2930. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
  2931. } \
  2932. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  2933. }
  2934. /* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
  2935. -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb)
  2936. -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh)
  2937. -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl)
  2938. -GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq)
  2939. +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1)
  2940. +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2)
  2941. +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4)
  2942. +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8)
  2943. -#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
  2944. +#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H) \
  2945. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2946. CPURISCVState *env, uint32_t desc) \
  2947. { \
  2948. @@ -4695,20 +4555,18 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2949. } \
  2950. *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \
  2951. } \
  2952. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  2953. }
  2954. /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
  2955. -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb)
  2956. -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh)
  2957. -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl)
  2958. -GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq)
  2959. +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1)
  2960. +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2)
  2961. +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
  2962. +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
  2963. -#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \
  2964. +#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H) \
  2965. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2966. CPURISCVState *env, uint32_t desc) \
  2967. { \
  2968. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  2969. uint32_t vm = vext_vm(desc); \
  2970. uint32_t vl = env->vl; \
  2971. uint32_t i; \
  2972. @@ -4723,20 +4581,18 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2973. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
  2974. } \
  2975. } \
  2976. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  2977. }
  2978. /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
  2979. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb)
  2980. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh)
  2981. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl)
  2982. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq)
  2983. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1)
  2984. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2)
  2985. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4)
  2986. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8)
  2987. -#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
  2988. +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H) \
  2989. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2990. CPURISCVState *env, uint32_t desc) \
  2991. { \
  2992. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  2993. uint32_t vm = vext_vm(desc); \
  2994. uint32_t vl = env->vl; \
  2995. uint32_t i; \
  2996. @@ -4751,17 +4607,16 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  2997. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
  2998. } \
  2999. } \
  3000. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  3001. }
  3002. /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
  3003. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb)
  3004. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh)
  3005. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl)
  3006. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
  3007. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1)
  3008. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2)
  3009. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4)
  3010. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
  3011. /* Vector Register Gather Instruction */
  3012. -#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H, CLEAR_FN) \
  3013. +#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H) \
  3014. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  3015. CPURISCVState *env, uint32_t desc) \
  3016. { \
  3017. @@ -4782,16 +4637,15 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  3018. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
  3019. } \
  3020. } \
  3021. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  3022. }
  3023. /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
  3024. -GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1, clearb)
  3025. -GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2, clearh)
  3026. -GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4, clearl)
  3027. -GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq)
  3028. +GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1)
  3029. +GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2)
  3030. +GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4)
  3031. +GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8)
  3032. -#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H, CLEAR_FN) \
  3033. +#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H) \
  3034. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  3035. CPURISCVState *env, uint32_t desc) \
  3036. { \
  3037. @@ -4811,21 +4665,19 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  3038. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
  3039. } \
  3040. } \
  3041. - CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  3042. }
  3043. /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
  3044. -GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1, clearb)
  3045. -GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2, clearh)
  3046. -GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4, clearl)
  3047. -GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq)
  3048. +GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1)
  3049. +GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2)
  3050. +GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4)
  3051. +GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8)
  3052. /* Vector Compress Instruction */
  3053. -#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H, CLEAR_FN) \
  3054. +#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H) \
  3055. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  3056. CPURISCVState *env, uint32_t desc) \
  3057. { \
  3058. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  3059. uint32_t vl = env->vl; \
  3060. uint32_t num = 0, i; \
  3061. \
  3062. @@ -4836,11 +4688,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  3063. *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \
  3064. num++; \
  3065. } \
  3066. - CLEAR_FN(vd, num, num * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
  3067. }
  3068. /* Compress into vd elements of vs2 where vs1 is enabled */
  3069. -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1, clearb)
  3070. -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2, clearh)
  3071. -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4, clearl)
  3072. -GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8, clearq)
  3073. +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1)
  3074. +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2)
  3075. +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4)
  3076. +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8)
  3077. --
  3078. 2.33.1