0026-target-riscv-rvv-1.0-stride-load-and-store-instructi.patch 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. From f2069daac8bc968e9c0d6781eab445f16b6f4bc9 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Fri, 14 Aug 2020 18:07:19 +0800
  4. Subject: [PATCH 026/107] target/riscv: rvv-1.0: stride load and store
  5. instructions
  6. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  7. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  8. ---
  9. target/riscv/helper.h | 129 +++-----------
  10. target/riscv/insn32.decode | 43 +++--
  11. target/riscv/insn_trans/trans_rvv.c.inc | 227 +++++++++++-------------
  12. target/riscv/vector_helper.c | 190 ++++++--------------
  13. 4 files changed, 194 insertions(+), 395 deletions(-)
  14. diff --git a/target/riscv/helper.h b/target/riscv/helper.h
  15. index 1104a3540a..3f4d460054 100644
  16. --- a/target/riscv/helper.h
  17. +++ b/target/riscv/helper.h
  18. @@ -113,111 +113,30 @@ DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl)
  19. /* Vector functions */
  20. DEF_HELPER_3(vsetvl, tl, env, tl, tl)
  21. -DEF_HELPER_5(vlb_v_b, void, ptr, ptr, tl, env, i32)
  22. -DEF_HELPER_5(vlb_v_b_mask, void, ptr, ptr, tl, env, i32)
  23. -DEF_HELPER_5(vlb_v_h, void, ptr, ptr, tl, env, i32)
  24. -DEF_HELPER_5(vlb_v_h_mask, void, ptr, ptr, tl, env, i32)
  25. -DEF_HELPER_5(vlb_v_w, void, ptr, ptr, tl, env, i32)
  26. -DEF_HELPER_5(vlb_v_w_mask, void, ptr, ptr, tl, env, i32)
  27. -DEF_HELPER_5(vlb_v_d, void, ptr, ptr, tl, env, i32)
  28. -DEF_HELPER_5(vlb_v_d_mask, void, ptr, ptr, tl, env, i32)
  29. -DEF_HELPER_5(vlh_v_h, void, ptr, ptr, tl, env, i32)
  30. -DEF_HELPER_5(vlh_v_h_mask, void, ptr, ptr, tl, env, i32)
  31. -DEF_HELPER_5(vlh_v_w, void, ptr, ptr, tl, env, i32)
  32. -DEF_HELPER_5(vlh_v_w_mask, void, ptr, ptr, tl, env, i32)
  33. -DEF_HELPER_5(vlh_v_d, void, ptr, ptr, tl, env, i32)
  34. -DEF_HELPER_5(vlh_v_d_mask, void, ptr, ptr, tl, env, i32)
  35. -DEF_HELPER_5(vlw_v_w, void, ptr, ptr, tl, env, i32)
  36. -DEF_HELPER_5(vlw_v_w_mask, void, ptr, ptr, tl, env, i32)
  37. -DEF_HELPER_5(vlw_v_d, void, ptr, ptr, tl, env, i32)
  38. -DEF_HELPER_5(vlw_v_d_mask, void, ptr, ptr, tl, env, i32)
  39. -DEF_HELPER_5(vle_v_b, void, ptr, ptr, tl, env, i32)
  40. -DEF_HELPER_5(vle_v_b_mask, void, ptr, ptr, tl, env, i32)
  41. -DEF_HELPER_5(vle_v_h, void, ptr, ptr, tl, env, i32)
  42. -DEF_HELPER_5(vle_v_h_mask, void, ptr, ptr, tl, env, i32)
  43. -DEF_HELPER_5(vle_v_w, void, ptr, ptr, tl, env, i32)
  44. -DEF_HELPER_5(vle_v_w_mask, void, ptr, ptr, tl, env, i32)
  45. -DEF_HELPER_5(vle_v_d, void, ptr, ptr, tl, env, i32)
  46. -DEF_HELPER_5(vle_v_d_mask, void, ptr, ptr, tl, env, i32)
  47. -DEF_HELPER_5(vlbu_v_b, void, ptr, ptr, tl, env, i32)
  48. -DEF_HELPER_5(vlbu_v_b_mask, void, ptr, ptr, tl, env, i32)
  49. -DEF_HELPER_5(vlbu_v_h, void, ptr, ptr, tl, env, i32)
  50. -DEF_HELPER_5(vlbu_v_h_mask, void, ptr, ptr, tl, env, i32)
  51. -DEF_HELPER_5(vlbu_v_w, void, ptr, ptr, tl, env, i32)
  52. -DEF_HELPER_5(vlbu_v_w_mask, void, ptr, ptr, tl, env, i32)
  53. -DEF_HELPER_5(vlbu_v_d, void, ptr, ptr, tl, env, i32)
  54. -DEF_HELPER_5(vlbu_v_d_mask, void, ptr, ptr, tl, env, i32)
  55. -DEF_HELPER_5(vlhu_v_h, void, ptr, ptr, tl, env, i32)
  56. -DEF_HELPER_5(vlhu_v_h_mask, void, ptr, ptr, tl, env, i32)
  57. -DEF_HELPER_5(vlhu_v_w, void, ptr, ptr, tl, env, i32)
  58. -DEF_HELPER_5(vlhu_v_w_mask, void, ptr, ptr, tl, env, i32)
  59. -DEF_HELPER_5(vlhu_v_d, void, ptr, ptr, tl, env, i32)
  60. -DEF_HELPER_5(vlhu_v_d_mask, void, ptr, ptr, tl, env, i32)
  61. -DEF_HELPER_5(vlwu_v_w, void, ptr, ptr, tl, env, i32)
  62. -DEF_HELPER_5(vlwu_v_w_mask, void, ptr, ptr, tl, env, i32)
  63. -DEF_HELPER_5(vlwu_v_d, void, ptr, ptr, tl, env, i32)
  64. -DEF_HELPER_5(vlwu_v_d_mask, void, ptr, ptr, tl, env, i32)
  65. -DEF_HELPER_5(vsb_v_b, void, ptr, ptr, tl, env, i32)
  66. -DEF_HELPER_5(vsb_v_b_mask, void, ptr, ptr, tl, env, i32)
  67. -DEF_HELPER_5(vsb_v_h, void, ptr, ptr, tl, env, i32)
  68. -DEF_HELPER_5(vsb_v_h_mask, void, ptr, ptr, tl, env, i32)
  69. -DEF_HELPER_5(vsb_v_w, void, ptr, ptr, tl, env, i32)
  70. -DEF_HELPER_5(vsb_v_w_mask, void, ptr, ptr, tl, env, i32)
  71. -DEF_HELPER_5(vsb_v_d, void, ptr, ptr, tl, env, i32)
  72. -DEF_HELPER_5(vsb_v_d_mask, void, ptr, ptr, tl, env, i32)
  73. -DEF_HELPER_5(vsh_v_h, void, ptr, ptr, tl, env, i32)
  74. -DEF_HELPER_5(vsh_v_h_mask, void, ptr, ptr, tl, env, i32)
  75. -DEF_HELPER_5(vsh_v_w, void, ptr, ptr, tl, env, i32)
  76. -DEF_HELPER_5(vsh_v_w_mask, void, ptr, ptr, tl, env, i32)
  77. -DEF_HELPER_5(vsh_v_d, void, ptr, ptr, tl, env, i32)
  78. -DEF_HELPER_5(vsh_v_d_mask, void, ptr, ptr, tl, env, i32)
  79. -DEF_HELPER_5(vsw_v_w, void, ptr, ptr, tl, env, i32)
  80. -DEF_HELPER_5(vsw_v_w_mask, void, ptr, ptr, tl, env, i32)
  81. -DEF_HELPER_5(vsw_v_d, void, ptr, ptr, tl, env, i32)
  82. -DEF_HELPER_5(vsw_v_d_mask, void, ptr, ptr, tl, env, i32)
  83. -DEF_HELPER_5(vse_v_b, void, ptr, ptr, tl, env, i32)
  84. -DEF_HELPER_5(vse_v_b_mask, void, ptr, ptr, tl, env, i32)
  85. -DEF_HELPER_5(vse_v_h, void, ptr, ptr, tl, env, i32)
  86. -DEF_HELPER_5(vse_v_h_mask, void, ptr, ptr, tl, env, i32)
  87. -DEF_HELPER_5(vse_v_w, void, ptr, ptr, tl, env, i32)
  88. -DEF_HELPER_5(vse_v_w_mask, void, ptr, ptr, tl, env, i32)
  89. -DEF_HELPER_5(vse_v_d, void, ptr, ptr, tl, env, i32)
  90. -DEF_HELPER_5(vse_v_d_mask, void, ptr, ptr, tl, env, i32)
  91. -DEF_HELPER_6(vlsb_v_b, void, ptr, ptr, tl, tl, env, i32)
  92. -DEF_HELPER_6(vlsb_v_h, void, ptr, ptr, tl, tl, env, i32)
  93. -DEF_HELPER_6(vlsb_v_w, void, ptr, ptr, tl, tl, env, i32)
  94. -DEF_HELPER_6(vlsb_v_d, void, ptr, ptr, tl, tl, env, i32)
  95. -DEF_HELPER_6(vlsh_v_h, void, ptr, ptr, tl, tl, env, i32)
  96. -DEF_HELPER_6(vlsh_v_w, void, ptr, ptr, tl, tl, env, i32)
  97. -DEF_HELPER_6(vlsh_v_d, void, ptr, ptr, tl, tl, env, i32)
  98. -DEF_HELPER_6(vlsw_v_w, void, ptr, ptr, tl, tl, env, i32)
  99. -DEF_HELPER_6(vlsw_v_d, void, ptr, ptr, tl, tl, env, i32)
  100. -DEF_HELPER_6(vlse_v_b, void, ptr, ptr, tl, tl, env, i32)
  101. -DEF_HELPER_6(vlse_v_h, void, ptr, ptr, tl, tl, env, i32)
  102. -DEF_HELPER_6(vlse_v_w, void, ptr, ptr, tl, tl, env, i32)
  103. -DEF_HELPER_6(vlse_v_d, void, ptr, ptr, tl, tl, env, i32)
  104. -DEF_HELPER_6(vlsbu_v_b, void, ptr, ptr, tl, tl, env, i32)
  105. -DEF_HELPER_6(vlsbu_v_h, void, ptr, ptr, tl, tl, env, i32)
  106. -DEF_HELPER_6(vlsbu_v_w, void, ptr, ptr, tl, tl, env, i32)
  107. -DEF_HELPER_6(vlsbu_v_d, void, ptr, ptr, tl, tl, env, i32)
  108. -DEF_HELPER_6(vlshu_v_h, void, ptr, ptr, tl, tl, env, i32)
  109. -DEF_HELPER_6(vlshu_v_w, void, ptr, ptr, tl, tl, env, i32)
  110. -DEF_HELPER_6(vlshu_v_d, void, ptr, ptr, tl, tl, env, i32)
  111. -DEF_HELPER_6(vlswu_v_w, void, ptr, ptr, tl, tl, env, i32)
  112. -DEF_HELPER_6(vlswu_v_d, void, ptr, ptr, tl, tl, env, i32)
  113. -DEF_HELPER_6(vssb_v_b, void, ptr, ptr, tl, tl, env, i32)
  114. -DEF_HELPER_6(vssb_v_h, void, ptr, ptr, tl, tl, env, i32)
  115. -DEF_HELPER_6(vssb_v_w, void, ptr, ptr, tl, tl, env, i32)
  116. -DEF_HELPER_6(vssb_v_d, void, ptr, ptr, tl, tl, env, i32)
  117. -DEF_HELPER_6(vssh_v_h, void, ptr, ptr, tl, tl, env, i32)
  118. -DEF_HELPER_6(vssh_v_w, void, ptr, ptr, tl, tl, env, i32)
  119. -DEF_HELPER_6(vssh_v_d, void, ptr, ptr, tl, tl, env, i32)
  120. -DEF_HELPER_6(vssw_v_w, void, ptr, ptr, tl, tl, env, i32)
  121. -DEF_HELPER_6(vssw_v_d, void, ptr, ptr, tl, tl, env, i32)
  122. -DEF_HELPER_6(vsse_v_b, void, ptr, ptr, tl, tl, env, i32)
  123. -DEF_HELPER_6(vsse_v_h, void, ptr, ptr, tl, tl, env, i32)
  124. -DEF_HELPER_6(vsse_v_w, void, ptr, ptr, tl, tl, env, i32)
  125. -DEF_HELPER_6(vsse_v_d, void, ptr, ptr, tl, tl, env, i32)
  126. +DEF_HELPER_5(vle8_v, void, ptr, ptr, tl, env, i32)
  127. +DEF_HELPER_5(vle16_v, void, ptr, ptr, tl, env, i32)
  128. +DEF_HELPER_5(vle32_v, void, ptr, ptr, tl, env, i32)
  129. +DEF_HELPER_5(vle64_v, void, ptr, ptr, tl, env, i32)
  130. +DEF_HELPER_5(vle8_v_mask, void, ptr, ptr, tl, env, i32)
  131. +DEF_HELPER_5(vle16_v_mask, void, ptr, ptr, tl, env, i32)
  132. +DEF_HELPER_5(vle32_v_mask, void, ptr, ptr, tl, env, i32)
  133. +DEF_HELPER_5(vle64_v_mask, void, ptr, ptr, tl, env, i32)
  134. +DEF_HELPER_5(vse8_v, void, ptr, ptr, tl, env, i32)
  135. +DEF_HELPER_5(vse16_v, void, ptr, ptr, tl, env, i32)
  136. +DEF_HELPER_5(vse32_v, void, ptr, ptr, tl, env, i32)
  137. +DEF_HELPER_5(vse64_v, void, ptr, ptr, tl, env, i32)
  138. +DEF_HELPER_5(vse8_v_mask, void, ptr, ptr, tl, env, i32)
  139. +DEF_HELPER_5(vse16_v_mask, void, ptr, ptr, tl, env, i32)
  140. +DEF_HELPER_5(vse32_v_mask, void, ptr, ptr, tl, env, i32)
  141. +DEF_HELPER_5(vse64_v_mask, void, ptr, ptr, tl, env, i32)
  142. +DEF_HELPER_6(vlse8_v, void, ptr, ptr, tl, tl, env, i32)
  143. +DEF_HELPER_6(vlse16_v, void, ptr, ptr, tl, tl, env, i32)
  144. +DEF_HELPER_6(vlse32_v, void, ptr, ptr, tl, tl, env, i32)
  145. +DEF_HELPER_6(vlse64_v, void, ptr, ptr, tl, tl, env, i32)
  146. +DEF_HELPER_6(vsse8_v, void, ptr, ptr, tl, tl, env, i32)
  147. +DEF_HELPER_6(vsse16_v, void, ptr, ptr, tl, tl, env, i32)
  148. +DEF_HELPER_6(vsse32_v, void, ptr, ptr, tl, tl, env, i32)
  149. +DEF_HELPER_6(vsse64_v, void, ptr, ptr, tl, tl, env, i32)
  150. DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
  151. DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
  152. DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
  153. diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
  154. index 8d9064a7a0..03a1f6e53e 100644
  155. --- a/target/riscv/insn32.decode
  156. +++ b/target/riscv/insn32.decode
  157. @@ -240,13 +240,26 @@ hfence_vvma 0010001 ..... ..... 000 00000 1110011 @hfence_vvma
  158. # *** RV32V Extension ***
  159. # *** Vector loads and stores are encoded within LOADFP/STORE-FP ***
  160. -vlb_v ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm
  161. -vlh_v ... 100 . 00000 ..... 101 ..... 0000111 @r2_nfvm
  162. -vlw_v ... 100 . 00000 ..... 110 ..... 0000111 @r2_nfvm
  163. -vle_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
  164. -vlbu_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
  165. -vlhu_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
  166. -vlwu_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
  167. +# Vector unit-stride load/store insns.
  168. +vle8_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
  169. +vle16_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
  170. +vle32_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
  171. +vle64_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
  172. +vse8_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
  173. +vse16_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
  174. +vse32_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
  175. +vse64_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
  176. +
  177. +# Vector strided insns.
  178. +vlse8_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
  179. +vlse16_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
  180. +vlse32_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
  181. +vlse64_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
  182. +vsse8_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
  183. +vsse16_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
  184. +vsse32_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
  185. +vsse64_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
  186. +
  187. vlbff_v ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm
  188. vlhff_v ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm
  189. vlwff_v ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm
  190. @@ -254,22 +267,6 @@ vleff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
  191. vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
  192. vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
  193. vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
  194. -vsb_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
  195. -vsh_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
  196. -vsw_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
  197. -vse_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
  198. -
  199. -vlsb_v ... 110 . ..... ..... 000 ..... 0000111 @r_nfvm
  200. -vlsh_v ... 110 . ..... ..... 101 ..... 0000111 @r_nfvm
  201. -vlsw_v ... 110 . ..... ..... 110 ..... 0000111 @r_nfvm
  202. -vlse_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
  203. -vlsbu_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
  204. -vlshu_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
  205. -vlswu_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
  206. -vssb_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
  207. -vssh_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
  208. -vssw_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
  209. -vsse_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
  210. vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
  211. vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
  212. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  213. index fcb01d1b5f..e4c83cf74d 100644
  214. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  215. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  216. @@ -188,9 +188,42 @@ static uint32_t vreg_ofs(DisasContext *s, int reg)
  217. /* check functions */
  218. /*
  219. - * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
  220. - * So RVV is also be checked in this function.
  221. + * Vector unit-stride, strided, unit-stride segment, strided segment
  222. + * store check function.
  223. + *
  224. + * Rules to be checked here:
  225. + * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
  226. + * 2. Destination vector register number is multiples of EMUL.
  227. + * (Section 3.3.2, 7.3)
  228. + * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
  229. + * 4. Vector register numbers accessed by the segment load or store
  230. + * cannot increment past 31. (Section 7.8)
  231. + */
  232. +static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
  233. +{
  234. + int8_t emul = eew - s->sew + s->lmul;
  235. + return (emul >= -3 && emul <= 3) &&
  236. + require_align(vd, emul) &&
  237. + require_nf(vd, nf, emul);
  238. +}
  239. +
  240. +/*
  241. + * Vector unit-stride, strided, unit-stride segment, strided segment
  242. + * load check function.
  243. + *
  244. + * Rules to be checked here:
  245. + * 1. All rules applies to store instructions are applies
  246. + * to load instructions.
  247. + * 2. Destination vector register group for a masked vector
  248. + * instruction cannot overlap the source mask register (v0).
  249. + * (Section 5.3)
  250. */
  251. +static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
  252. + uint8_t eew)
  253. +{
  254. + return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
  255. +}
  256. +
  257. static bool vext_check_isa_ill(DisasContext *s)
  258. {
  259. return !s->vill;
  260. @@ -437,13 +470,13 @@ static bool vext_check_isa_ill(DisasContext *s)
  261. }
  262. /* common translation macro */
  263. -#define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \
  264. -static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
  265. -{ \
  266. - if (CHECK(s, a)) { \
  267. - return OP(s, a, SEQ); \
  268. - } \
  269. - return false; \
  270. +#define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \
  271. +static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
  272. +{ \
  273. + if (CHECK(s, a, EEW)) { \
  274. + return OP(s, a, EEW); \
  275. + } \
  276. + return false; \
  277. }
  278. /*
  279. @@ -493,44 +526,20 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
  280. return true;
  281. }
  282. -static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
  283. +static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
  284. {
  285. uint32_t data = 0;
  286. gen_helper_ldst_us *fn;
  287. - static gen_helper_ldst_us * const fns[2][7][4] = {
  288. + static gen_helper_ldst_us * const fns[2][4] = {
  289. /* masked unit stride load */
  290. - { { gen_helper_vlb_v_b_mask, gen_helper_vlb_v_h_mask,
  291. - gen_helper_vlb_v_w_mask, gen_helper_vlb_v_d_mask },
  292. - { NULL, gen_helper_vlh_v_h_mask,
  293. - gen_helper_vlh_v_w_mask, gen_helper_vlh_v_d_mask },
  294. - { NULL, NULL,
  295. - gen_helper_vlw_v_w_mask, gen_helper_vlw_v_d_mask },
  296. - { gen_helper_vle_v_b_mask, gen_helper_vle_v_h_mask,
  297. - gen_helper_vle_v_w_mask, gen_helper_vle_v_d_mask },
  298. - { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask,
  299. - gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask },
  300. - { NULL, gen_helper_vlhu_v_h_mask,
  301. - gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask },
  302. - { NULL, NULL,
  303. - gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } },
  304. + { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
  305. + gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
  306. /* unmasked unit stride load */
  307. - { { gen_helper_vlb_v_b, gen_helper_vlb_v_h,
  308. - gen_helper_vlb_v_w, gen_helper_vlb_v_d },
  309. - { NULL, gen_helper_vlh_v_h,
  310. - gen_helper_vlh_v_w, gen_helper_vlh_v_d },
  311. - { NULL, NULL,
  312. - gen_helper_vlw_v_w, gen_helper_vlw_v_d },
  313. - { gen_helper_vle_v_b, gen_helper_vle_v_h,
  314. - gen_helper_vle_v_w, gen_helper_vle_v_d },
  315. - { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h,
  316. - gen_helper_vlbu_v_w, gen_helper_vlbu_v_d },
  317. - { NULL, gen_helper_vlhu_v_h,
  318. - gen_helper_vlhu_v_w, gen_helper_vlhu_v_d },
  319. - { NULL, NULL,
  320. - gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } }
  321. + { gen_helper_vle8_v, gen_helper_vle16_v,
  322. + gen_helper_vle32_v, gen_helper_vle64_v }
  323. };
  324. - fn = fns[a->vm][seq][s->sew];
  325. + fn = fns[a->vm][eew];
  326. if (fn == NULL) {
  327. return false;
  328. }
  329. @@ -541,48 +550,32 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
  330. return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
  331. }
  332. -static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
  333. +static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
  334. {
  335. - return (vext_check_isa_ill(s) &&
  336. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  337. - vext_check_reg(s, a->rd, false) &&
  338. - vext_check_nf(s, a->nf));
  339. + return require_rvv(s) &&
  340. + vext_check_isa_ill(s) &&
  341. + vext_check_load(s, a->rd, a->nf, a->vm, eew);
  342. }
  343. -GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check)
  344. -GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check)
  345. -GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check)
  346. -GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check)
  347. -GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check)
  348. -GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check)
  349. -GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check)
  350. +GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check)
  351. +GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
  352. +GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
  353. +GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
  354. -static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
  355. +static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
  356. {
  357. uint32_t data = 0;
  358. gen_helper_ldst_us *fn;
  359. - static gen_helper_ldst_us * const fns[2][4][4] = {
  360. - /* masked unit stride load and store */
  361. - { { gen_helper_vsb_v_b_mask, gen_helper_vsb_v_h_mask,
  362. - gen_helper_vsb_v_w_mask, gen_helper_vsb_v_d_mask },
  363. - { NULL, gen_helper_vsh_v_h_mask,
  364. - gen_helper_vsh_v_w_mask, gen_helper_vsh_v_d_mask },
  365. - { NULL, NULL,
  366. - gen_helper_vsw_v_w_mask, gen_helper_vsw_v_d_mask },
  367. - { gen_helper_vse_v_b_mask, gen_helper_vse_v_h_mask,
  368. - gen_helper_vse_v_w_mask, gen_helper_vse_v_d_mask } },
  369. + static gen_helper_ldst_us * const fns[2][4] = {
  370. + /* masked unit stride store */
  371. + { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
  372. + gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
  373. /* unmasked unit stride store */
  374. - { { gen_helper_vsb_v_b, gen_helper_vsb_v_h,
  375. - gen_helper_vsb_v_w, gen_helper_vsb_v_d },
  376. - { NULL, gen_helper_vsh_v_h,
  377. - gen_helper_vsh_v_w, gen_helper_vsh_v_d },
  378. - { NULL, NULL,
  379. - gen_helper_vsw_v_w, gen_helper_vsw_v_d },
  380. - { gen_helper_vse_v_b, gen_helper_vse_v_h,
  381. - gen_helper_vse_v_w, gen_helper_vse_v_d } }
  382. + { gen_helper_vse8_v, gen_helper_vse16_v,
  383. + gen_helper_vse32_v, gen_helper_vse64_v }
  384. };
  385. - fn = fns[a->vm][seq][s->sew];
  386. + fn = fns[a->vm][eew];
  387. if (fn == NULL) {
  388. return false;
  389. }
  390. @@ -593,17 +586,17 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
  391. return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
  392. }
  393. -static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
  394. +static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
  395. {
  396. - return (vext_check_isa_ill(s) &&
  397. - vext_check_reg(s, a->rd, false) &&
  398. - vext_check_nf(s, a->nf));
  399. + return require_rvv(s) &&
  400. + vext_check_isa_ill(s) &&
  401. + vext_check_store(s, a->rd, a->nf, eew);
  402. }
  403. -GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check)
  404. -GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check)
  405. -GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check)
  406. -GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check)
  407. +GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check)
  408. +GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
  409. +GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
  410. +GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
  411. /*
  412. *** stride load and store
  413. @@ -647,28 +640,16 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
  414. return true;
  415. }
  416. -static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  417. +static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  418. {
  419. uint32_t data = 0;
  420. gen_helper_ldst_stride *fn;
  421. - static gen_helper_ldst_stride * const fns[7][4] = {
  422. - { gen_helper_vlsb_v_b, gen_helper_vlsb_v_h,
  423. - gen_helper_vlsb_v_w, gen_helper_vlsb_v_d },
  424. - { NULL, gen_helper_vlsh_v_h,
  425. - gen_helper_vlsh_v_w, gen_helper_vlsh_v_d },
  426. - { NULL, NULL,
  427. - gen_helper_vlsw_v_w, gen_helper_vlsw_v_d },
  428. - { gen_helper_vlse_v_b, gen_helper_vlse_v_h,
  429. - gen_helper_vlse_v_w, gen_helper_vlse_v_d },
  430. - { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h,
  431. - gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d },
  432. - { NULL, gen_helper_vlshu_v_h,
  433. - gen_helper_vlshu_v_w, gen_helper_vlshu_v_d },
  434. - { NULL, NULL,
  435. - gen_helper_vlswu_v_w, gen_helper_vlswu_v_d },
  436. + static gen_helper_ldst_stride * const fns[4] = {
  437. + gen_helper_vlse8_v, gen_helper_vlse16_v,
  438. + gen_helper_vlse32_v, gen_helper_vlse64_v
  439. };
  440. - fn = fns[seq][s->sew];
  441. + fn = fns[eew];
  442. if (fn == NULL) {
  443. return false;
  444. }
  445. @@ -679,42 +660,32 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  446. return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
  447. }
  448. -static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
  449. +static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
  450. {
  451. - return (vext_check_isa_ill(s) &&
  452. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  453. - vext_check_reg(s, a->rd, false) &&
  454. - vext_check_nf(s, a->nf));
  455. + return require_rvv(s) &&
  456. + vext_check_isa_ill(s) &&
  457. + vext_check_load(s, a->rd, a->nf, a->vm, eew);
  458. }
  459. -GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check)
  460. -GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check)
  461. -GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check)
  462. -GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check)
  463. -GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check)
  464. -GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check)
  465. -GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check)
  466. +GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check)
  467. +GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
  468. +GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
  469. +GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
  470. -static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  471. +static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  472. {
  473. uint32_t data = 0;
  474. gen_helper_ldst_stride *fn;
  475. - static gen_helper_ldst_stride * const fns[4][4] = {
  476. + static gen_helper_ldst_stride * const fns[4] = {
  477. /* masked stride store */
  478. - { gen_helper_vssb_v_b, gen_helper_vssb_v_h,
  479. - gen_helper_vssb_v_w, gen_helper_vssb_v_d },
  480. - { NULL, gen_helper_vssh_v_h,
  481. - gen_helper_vssh_v_w, gen_helper_vssh_v_d },
  482. - { NULL, NULL,
  483. - gen_helper_vssw_v_w, gen_helper_vssw_v_d },
  484. - { gen_helper_vsse_v_b, gen_helper_vsse_v_h,
  485. - gen_helper_vsse_v_w, gen_helper_vsse_v_d }
  486. + gen_helper_vsse8_v, gen_helper_vsse16_v,
  487. + gen_helper_vsse32_v, gen_helper_vsse64_v
  488. };
  489. data = FIELD_DP32(data, VDATA, VM, a->vm);
  490. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  491. data = FIELD_DP32(data, VDATA, NF, a->nf);
  492. - fn = fns[seq][s->sew];
  493. + fn = fns[eew];
  494. if (fn == NULL) {
  495. return false;
  496. }
  497. @@ -722,17 +693,17 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  498. return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
  499. }
  500. -static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
  501. +static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
  502. {
  503. - return (vext_check_isa_ill(s) &&
  504. - vext_check_reg(s, a->rd, false) &&
  505. - vext_check_nf(s, a->nf));
  506. + return require_rvv(s) &&
  507. + vext_check_isa_ill(s) &&
  508. + vext_check_store(s, a->rd, a->nf, eew);
  509. }
  510. -GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check)
  511. -GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check)
  512. -GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check)
  513. -GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check)
  514. +GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check)
  515. +GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
  516. +GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
  517. +GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
  518. /*
  519. *** index load and store
  520. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  521. index e8912ee8fe..ad45dd9006 100644
  522. --- a/target/riscv/vector_helper.c
  523. +++ b/target/riscv/vector_helper.c
  524. @@ -183,38 +183,18 @@ static inline int vext_elem_mask(void *v0, int index)
  525. typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr,
  526. uint32_t idx, void *vd, uintptr_t retaddr);
  527. -#define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \
  528. +#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
  529. static void NAME(CPURISCVState *env, abi_ptr addr, \
  530. uint32_t idx, void *vd, uintptr_t retaddr)\
  531. { \
  532. - MTYPE data; \
  533. ETYPE *cur = ((ETYPE *)vd + H(idx)); \
  534. - data = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
  535. - *cur = data; \
  536. + *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
  537. } \
  538. -GEN_VEXT_LD_ELEM(ldb_b, int8_t, int8_t, H1, ldsb)
  539. -GEN_VEXT_LD_ELEM(ldb_h, int8_t, int16_t, H2, ldsb)
  540. -GEN_VEXT_LD_ELEM(ldb_w, int8_t, int32_t, H4, ldsb)
  541. -GEN_VEXT_LD_ELEM(ldb_d, int8_t, int64_t, H8, ldsb)
  542. -GEN_VEXT_LD_ELEM(ldh_h, int16_t, int16_t, H2, ldsw)
  543. -GEN_VEXT_LD_ELEM(ldh_w, int16_t, int32_t, H4, ldsw)
  544. -GEN_VEXT_LD_ELEM(ldh_d, int16_t, int64_t, H8, ldsw)
  545. -GEN_VEXT_LD_ELEM(ldw_w, int32_t, int32_t, H4, ldl)
  546. -GEN_VEXT_LD_ELEM(ldw_d, int32_t, int64_t, H8, ldl)
  547. -GEN_VEXT_LD_ELEM(lde_b, int8_t, int8_t, H1, ldsb)
  548. -GEN_VEXT_LD_ELEM(lde_h, int16_t, int16_t, H2, ldsw)
  549. -GEN_VEXT_LD_ELEM(lde_w, int32_t, int32_t, H4, ldl)
  550. -GEN_VEXT_LD_ELEM(lde_d, int64_t, int64_t, H8, ldq)
  551. -GEN_VEXT_LD_ELEM(ldbu_b, uint8_t, uint8_t, H1, ldub)
  552. -GEN_VEXT_LD_ELEM(ldbu_h, uint8_t, uint16_t, H2, ldub)
  553. -GEN_VEXT_LD_ELEM(ldbu_w, uint8_t, uint32_t, H4, ldub)
  554. -GEN_VEXT_LD_ELEM(ldbu_d, uint8_t, uint64_t, H8, ldub)
  555. -GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, uint16_t, H2, lduw)
  556. -GEN_VEXT_LD_ELEM(ldhu_w, uint16_t, uint32_t, H4, lduw)
  557. -GEN_VEXT_LD_ELEM(ldhu_d, uint16_t, uint64_t, H8, lduw)
  558. -GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, uint32_t, H4, ldl)
  559. -GEN_VEXT_LD_ELEM(ldwu_d, uint32_t, uint64_t, H8, ldl)
  560. +GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb)
  561. +GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw)
  562. +GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl)
  563. +GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq)
  564. #define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
  565. static void NAME(CPURISCVState *env, abi_ptr addr, \
  566. @@ -224,15 +204,6 @@ static void NAME(CPURISCVState *env, abi_ptr addr, \
  567. cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
  568. }
  569. -GEN_VEXT_ST_ELEM(stb_b, int8_t, H1, stb)
  570. -GEN_VEXT_ST_ELEM(stb_h, int16_t, H2, stb)
  571. -GEN_VEXT_ST_ELEM(stb_w, int32_t, H4, stb)
  572. -GEN_VEXT_ST_ELEM(stb_d, int64_t, H8, stb)
  573. -GEN_VEXT_ST_ELEM(sth_h, int16_t, H2, stw)
  574. -GEN_VEXT_ST_ELEM(sth_w, int32_t, H4, stw)
  575. -GEN_VEXT_ST_ELEM(sth_d, int64_t, H8, stw)
  576. -GEN_VEXT_ST_ELEM(stw_w, int32_t, H4, stl)
  577. -GEN_VEXT_ST_ELEM(stw_d, int64_t, H8, stl)
  578. GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb)
  579. GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
  580. GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
  581. @@ -246,8 +217,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  582. target_ulong stride, CPURISCVState *env,
  583. uint32_t desc, uint32_t vm,
  584. vext_ldst_elem_fn *ldst_elem,
  585. - uint32_t esz, uint32_t msz, uintptr_t ra,
  586. - MMUAccessType access_type)
  587. + uint32_t esz, uintptr_t ra, MMUAccessType access_type)
  588. {
  589. uint32_t i, k;
  590. uint32_t nf = vext_nf(desc);
  591. @@ -258,7 +228,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  592. if (!vm && !vext_elem_mask(v0, i)) {
  593. continue;
  594. }
  595. - probe_pages(env, base + stride * i, nf * msz, ra, access_type);
  596. + probe_pages(env, base + stride * i, nf * esz, ra, access_type);
  597. }
  598. /* do real access */
  599. for (i = 0; i < env->vl; i++) {
  600. @@ -267,71 +237,42 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  601. continue;
  602. }
  603. while (k < nf) {
  604. - target_ulong addr = base + stride * i + k * msz;
  605. + target_ulong addr = base + stride * i + k * esz;
  606. ldst_elem(env, addr, i + k * vlmax, vd, ra);
  607. k++;
  608. }
  609. }
  610. }
  611. -#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN) \
  612. +#define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \
  613. void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
  614. target_ulong stride, CPURISCVState *env, \
  615. uint32_t desc) \
  616. { \
  617. uint32_t vm = vext_vm(desc); \
  618. vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
  619. - sizeof(ETYPE), sizeof(MTYPE), \
  620. - GETPC(), MMU_DATA_LOAD); \
  621. -}
  622. -
  623. -GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b)
  624. -GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h)
  625. -GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w)
  626. -GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d)
  627. -GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h)
  628. -GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w)
  629. -GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d)
  630. -GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w)
  631. -GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d)
  632. -GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b)
  633. -GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h)
  634. -GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w)
  635. -GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d)
  636. -GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b)
  637. -GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h)
  638. -GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w)
  639. -GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d)
  640. -GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h)
  641. -GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w)
  642. -GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d)
  643. -GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w)
  644. -GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d)
  645. -
  646. -#define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \
  647. + sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  648. +}
  649. +
  650. +GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b)
  651. +GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h)
  652. +GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w)
  653. +GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d)
  654. +
  655. +#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \
  656. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  657. target_ulong stride, CPURISCVState *env, \
  658. uint32_t desc) \
  659. { \
  660. uint32_t vm = vext_vm(desc); \
  661. vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
  662. - sizeof(ETYPE), sizeof(MTYPE), \
  663. - GETPC(), MMU_DATA_STORE); \
  664. -}
  665. -
  666. -GEN_VEXT_ST_STRIDE(vssb_v_b, int8_t, int8_t, stb_b)
  667. -GEN_VEXT_ST_STRIDE(vssb_v_h, int8_t, int16_t, stb_h)
  668. -GEN_VEXT_ST_STRIDE(vssb_v_w, int8_t, int32_t, stb_w)
  669. -GEN_VEXT_ST_STRIDE(vssb_v_d, int8_t, int64_t, stb_d)
  670. -GEN_VEXT_ST_STRIDE(vssh_v_h, int16_t, int16_t, sth_h)
  671. -GEN_VEXT_ST_STRIDE(vssh_v_w, int16_t, int32_t, sth_w)
  672. -GEN_VEXT_ST_STRIDE(vssh_v_d, int16_t, int64_t, sth_d)
  673. -GEN_VEXT_ST_STRIDE(vssw_v_w, int32_t, int32_t, stw_w)
  674. -GEN_VEXT_ST_STRIDE(vssw_v_d, int32_t, int64_t, stw_d)
  675. -GEN_VEXT_ST_STRIDE(vsse_v_b, int8_t, int8_t, ste_b)
  676. -GEN_VEXT_ST_STRIDE(vsse_v_h, int16_t, int16_t, ste_h)
  677. -GEN_VEXT_ST_STRIDE(vsse_v_w, int32_t, int32_t, ste_w)
  678. -GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d)
  679. + sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
  680. +}
  681. +
  682. +GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b)
  683. +GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h)
  684. +GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w)
  685. +GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
  686. /*
  687. *** unit-stride: access elements stored contiguously in memory
  688. @@ -340,20 +281,20 @@ GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d)
  689. /* unmasked unit-stride load and store operation*/
  690. static void
  691. vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  692. - vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t msz,
  693. - uintptr_t ra, MMUAccessType access_type)
  694. + vext_ldst_elem_fn *ldst_elem,
  695. + uint32_t esz, uintptr_t ra, MMUAccessType access_type)
  696. {
  697. uint32_t i, k;
  698. uint32_t nf = vext_nf(desc);
  699. uint32_t vlmax = vext_maxsz(desc) / esz;
  700. /* probe every access */
  701. - probe_pages(env, base, env->vl * nf * msz, ra, access_type);
  702. + probe_pages(env, base, env->vl * nf * esz, ra, access_type);
  703. /* load bytes from guest memory */
  704. for (i = 0; i < env->vl; i++) {
  705. k = 0;
  706. while (k < nf) {
  707. - target_ulong addr = base + (i * nf + k) * msz;
  708. + target_ulong addr = base + (i * nf + k) * esz;
  709. ldst_elem(env, addr, i + k * vlmax, vd, ra);
  710. k++;
  711. }
  712. @@ -365,76 +306,47 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  713. * stride = NF * sizeof (MTYPE)
  714. */
  715. -#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN) \
  716. +#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \
  717. void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  718. CPURISCVState *env, uint32_t desc) \
  719. { \
  720. - uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
  721. + uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \
  722. vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
  723. - sizeof(ETYPE), sizeof(MTYPE), \
  724. - GETPC(), MMU_DATA_LOAD); \
  725. + sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  726. } \
  727. \
  728. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  729. CPURISCVState *env, uint32_t desc) \
  730. { \
  731. vext_ldst_us(vd, base, env, desc, LOAD_FN, \
  732. - sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \
  733. -}
  734. -
  735. -GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b)
  736. -GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h)
  737. -GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w)
  738. -GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d)
  739. -GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h)
  740. -GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w)
  741. -GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d)
  742. -GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w)
  743. -GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d)
  744. -GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b)
  745. -GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h)
  746. -GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w)
  747. -GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d)
  748. -GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b)
  749. -GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h)
  750. -GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w)
  751. -GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d)
  752. -GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h)
  753. -GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w)
  754. -GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d)
  755. -GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w)
  756. -GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d)
  757. -
  758. -#define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \
  759. + sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  760. +}
  761. +
  762. +GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
  763. +GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
  764. +GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
  765. +GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
  766. +
  767. +#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
  768. void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  769. CPURISCVState *env, uint32_t desc) \
  770. { \
  771. - uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
  772. + uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \
  773. vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
  774. - sizeof(ETYPE), sizeof(MTYPE), \
  775. - GETPC(), MMU_DATA_STORE); \
  776. + sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
  777. } \
  778. \
  779. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  780. CPURISCVState *env, uint32_t desc) \
  781. { \
  782. vext_ldst_us(vd, base, env, desc, STORE_FN, \
  783. - sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\
  784. -}
  785. -
  786. -GEN_VEXT_ST_US(vsb_v_b, int8_t, int8_t , stb_b)
  787. -GEN_VEXT_ST_US(vsb_v_h, int8_t, int16_t, stb_h)
  788. -GEN_VEXT_ST_US(vsb_v_w, int8_t, int32_t, stb_w)
  789. -GEN_VEXT_ST_US(vsb_v_d, int8_t, int64_t, stb_d)
  790. -GEN_VEXT_ST_US(vsh_v_h, int16_t, int16_t, sth_h)
  791. -GEN_VEXT_ST_US(vsh_v_w, int16_t, int32_t, sth_w)
  792. -GEN_VEXT_ST_US(vsh_v_d, int16_t, int64_t, sth_d)
  793. -GEN_VEXT_ST_US(vsw_v_w, int32_t, int32_t, stw_w)
  794. -GEN_VEXT_ST_US(vsw_v_d, int32_t, int64_t, stw_d)
  795. -GEN_VEXT_ST_US(vse_v_b, int8_t, int8_t , ste_b)
  796. -GEN_VEXT_ST_US(vse_v_h, int16_t, int16_t, ste_h)
  797. -GEN_VEXT_ST_US(vse_v_w, int32_t, int32_t, ste_w)
  798. -GEN_VEXT_ST_US(vse_v_d, int64_t, int64_t, ste_d)
  799. + sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
  800. +}
  801. +
  802. +GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
  803. +GEN_VEXT_ST_US(vse16_v, int16_t, ste_h)
  804. +GEN_VEXT_ST_US(vse32_v, int32_t, ste_w)
  805. +GEN_VEXT_ST_US(vse64_v, int64_t, ste_d)
  806. /*
  807. *** index: access vector element from indexed memory
  808. --
  809. 2.33.1