0030-target-riscv-rvv-1.0-amo-operations.patch 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. From 6bc2f0d5ff1eaa91ea2c82e9bca60c96ad4b3a29 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Fri, 14 Aug 2020 18:07:40 +0800
  4. Subject: [PATCH 030/107] target/riscv: rvv-1.0: amo operations
  5. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  6. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  7. ---
  8. target/riscv/helper.h | 100 +++++++---
  9. target/riscv/insn32-64.decode | 18 +-
  10. target/riscv/insn32.decode | 36 +++-
  11. target/riscv/insn_trans/trans_rvv.c.inc | 229 +++++++++++++++--------
  12. target/riscv/vector_helper.c | 232 ++++++++++++++++--------
  13. 5 files changed, 414 insertions(+), 201 deletions(-)
  14. diff --git a/target/riscv/helper.h b/target/riscv/helper.h
  15. index bbea5403fb..f26af64d5b 100644
  16. --- a/target/riscv/helper.h
  17. +++ b/target/riscv/helper.h
  18. @@ -174,36 +174,80 @@ DEF_HELPER_5(vle16ff_v, void, ptr, ptr, tl, env, i32)
  19. DEF_HELPER_5(vle32ff_v, void, ptr, ptr, tl, env, i32)
  20. DEF_HELPER_5(vle64ff_v, void, ptr, ptr, tl, env, i32)
  21. +DEF_HELPER_6(vamoswapei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  22. +DEF_HELPER_6(vamoswapei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  23. +DEF_HELPER_6(vamoswapei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  24. +DEF_HELPER_6(vamoswapei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  25. +DEF_HELPER_6(vamoswapei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  26. +DEF_HELPER_6(vamoswapei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  27. +DEF_HELPER_6(vamoaddei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  28. +DEF_HELPER_6(vamoaddei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  29. +DEF_HELPER_6(vamoaddei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  30. +DEF_HELPER_6(vamoaddei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  31. +DEF_HELPER_6(vamoaddei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  32. +DEF_HELPER_6(vamoaddei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  33. +DEF_HELPER_6(vamoxorei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  34. +DEF_HELPER_6(vamoxorei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  35. +DEF_HELPER_6(vamoxorei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  36. +DEF_HELPER_6(vamoxorei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  37. +DEF_HELPER_6(vamoxorei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  38. +DEF_HELPER_6(vamoxorei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  39. +DEF_HELPER_6(vamoandei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  40. +DEF_HELPER_6(vamoandei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  41. +DEF_HELPER_6(vamoandei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  42. +DEF_HELPER_6(vamoandei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  43. +DEF_HELPER_6(vamoandei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  44. +DEF_HELPER_6(vamoandei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  45. +DEF_HELPER_6(vamoorei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  46. +DEF_HELPER_6(vamoorei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  47. +DEF_HELPER_6(vamoorei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  48. +DEF_HELPER_6(vamoorei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  49. +DEF_HELPER_6(vamoorei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  50. +DEF_HELPER_6(vamoorei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  51. +DEF_HELPER_6(vamominei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  52. +DEF_HELPER_6(vamominei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  53. +DEF_HELPER_6(vamominei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  54. +DEF_HELPER_6(vamominei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  55. +DEF_HELPER_6(vamominei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  56. +DEF_HELPER_6(vamominei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  57. +DEF_HELPER_6(vamomaxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  58. +DEF_HELPER_6(vamomaxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  59. +DEF_HELPER_6(vamomaxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  60. +DEF_HELPER_6(vamomaxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  61. +DEF_HELPER_6(vamomaxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  62. +DEF_HELPER_6(vamomaxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  63. +DEF_HELPER_6(vamominuei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  64. +DEF_HELPER_6(vamominuei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  65. +DEF_HELPER_6(vamominuei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  66. +DEF_HELPER_6(vamominuei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  67. +DEF_HELPER_6(vamominuei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  68. +DEF_HELPER_6(vamominuei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  69. +DEF_HELPER_6(vamomaxuei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  70. +DEF_HELPER_6(vamomaxuei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  71. +DEF_HELPER_6(vamomaxuei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  72. +DEF_HELPER_6(vamomaxuei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  73. +DEF_HELPER_6(vamomaxuei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  74. +DEF_HELPER_6(vamomaxuei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  75. #ifdef TARGET_RISCV64
  76. -DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  77. -DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32)
  78. -DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  79. -DEF_HELPER_6(vamoaddd_v_d, void, ptr, ptr, tl, ptr, env, i32)
  80. -DEF_HELPER_6(vamoxorw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  81. -DEF_HELPER_6(vamoxord_v_d, void, ptr, ptr, tl, ptr, env, i32)
  82. -DEF_HELPER_6(vamoandw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  83. -DEF_HELPER_6(vamoandd_v_d, void, ptr, ptr, tl, ptr, env, i32)
  84. -DEF_HELPER_6(vamoorw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  85. -DEF_HELPER_6(vamoord_v_d, void, ptr, ptr, tl, ptr, env, i32)
  86. -DEF_HELPER_6(vamominw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  87. -DEF_HELPER_6(vamomind_v_d, void, ptr, ptr, tl, ptr, env, i32)
  88. -DEF_HELPER_6(vamomaxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  89. -DEF_HELPER_6(vamomaxd_v_d, void, ptr, ptr, tl, ptr, env, i32)
  90. -DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  91. -DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32)
  92. -DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  93. -DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32)
  94. +DEF_HELPER_6(vamoswapei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  95. +DEF_HELPER_6(vamoswapei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  96. +DEF_HELPER_6(vamoaddei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  97. +DEF_HELPER_6(vamoaddei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  98. +DEF_HELPER_6(vamoxorei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  99. +DEF_HELPER_6(vamoxorei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  100. +DEF_HELPER_6(vamoandei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  101. +DEF_HELPER_6(vamoandei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  102. +DEF_HELPER_6(vamoorei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  103. +DEF_HELPER_6(vamoorei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  104. +DEF_HELPER_6(vamominei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  105. +DEF_HELPER_6(vamominei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  106. +DEF_HELPER_6(vamomaxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  107. +DEF_HELPER_6(vamomaxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  108. +DEF_HELPER_6(vamominuei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  109. +DEF_HELPER_6(vamominuei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  110. +DEF_HELPER_6(vamomaxuei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  111. +DEF_HELPER_6(vamomaxuei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  112. #endif
  113. -DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  114. -DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  115. -DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  116. -DEF_HELPER_6(vamoandw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  117. -DEF_HELPER_6(vamoorw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  118. -DEF_HELPER_6(vamominw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  119. -DEF_HELPER_6(vamomaxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  120. -DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  121. -DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  122. -
  123. DEF_HELPER_6(vadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
  124. DEF_HELPER_6(vadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
  125. DEF_HELPER_6(vadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
  126. diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode
  127. index 1f5d0b7a5c..bf39c5064f 100644
  128. --- a/target/riscv/insn32-64.decode
  129. +++ b/target/riscv/insn32-64.decode
  130. @@ -58,15 +58,15 @@ amominu_d 11000 . . ..... ..... 011 ..... 0101111 @atom_st
  131. amomaxu_d 11100 . . ..... ..... 011 ..... 0101111 @atom_st
  132. #*** Vector AMO operations (in addition to Zvamo) ***
  133. -vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
  134. -vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  135. -vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  136. -vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  137. -vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  138. -vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  139. -vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  140. -vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  141. -vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  142. +vamoswapei64_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
  143. +vamoaddei64_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  144. +vamoxorei64_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  145. +vamoandei64_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  146. +vamoorei64_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  147. +vamominei64_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  148. +vamomaxei64_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  149. +vamominuei64_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
  150. +vamomaxuei64_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
  151. # *** RV64F Standard Extension (in addition to RV32F) ***
  152. fcvt_l_s 1100000 00010 ..... ... ..... 1010011 @r2_rm
  153. diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
  154. index 97b9b8f5f5..ae406dff3b 100644
  155. --- a/target/riscv/insn32.decode
  156. +++ b/target/riscv/insn32.decode
  157. @@ -279,15 +279,33 @@ vle32ff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
  158. vle64ff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
  159. #*** Vector AMO operations are encoded under the standard AMO major opcode ***
  160. -vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
  161. -vamoaddw_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  162. -vamoxorw_v 00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  163. -vamoandw_v 01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  164. -vamoorw_v 01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  165. -vamominw_v 10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  166. -vamomaxw_v 10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  167. -vamominuw_v 11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  168. -vamomaxuw_v 11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  169. +vamoswapei8_v 00001 . . ..... ..... 000 ..... 0101111 @r_wdvm
  170. +vamoswapei16_v 00001 . . ..... ..... 101 ..... 0101111 @r_wdvm
  171. +vamoswapei32_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
  172. +vamoaddei8_v 00000 . . ..... ..... 000 ..... 0101111 @r_wdvm
  173. +vamoaddei16_v 00000 . . ..... ..... 101 ..... 0101111 @r_wdvm
  174. +vamoaddei32_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  175. +vamoxorei8_v 00100 . . ..... ..... 000 ..... 0101111 @r_wdvm
  176. +vamoxorei16_v 00100 . . ..... ..... 101 ..... 0101111 @r_wdvm
  177. +vamoxorei32_v 00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  178. +vamoandei8_v 01100 . . ..... ..... 000 ..... 0101111 @r_wdvm
  179. +vamoandei16_v 01100 . . ..... ..... 101 ..... 0101111 @r_wdvm
  180. +vamoandei32_v 01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  181. +vamoorei8_v 01000 . . ..... ..... 000 ..... 0101111 @r_wdvm
  182. +vamoorei16_v 01000 . . ..... ..... 101 ..... 0101111 @r_wdvm
  183. +vamoorei32_v 01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  184. +vamominei8_v 10000 . . ..... ..... 000 ..... 0101111 @r_wdvm
  185. +vamominei16_v 10000 . . ..... ..... 101 ..... 0101111 @r_wdvm
  186. +vamominei32_v 10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  187. +vamomaxei8_v 10100 . . ..... ..... 000 ..... 0101111 @r_wdvm
  188. +vamomaxei16_v 10100 . . ..... ..... 101 ..... 0101111 @r_wdvm
  189. +vamomaxei32_v 10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  190. +vamominuei8_v 11000 . . ..... ..... 000 ..... 0101111 @r_wdvm
  191. +vamominuei16_v 11000 . . ..... ..... 101 ..... 0101111 @r_wdvm
  192. +vamominuei32_v 11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
  193. +vamomaxuei8_v 11100 . . ..... ..... 000 ..... 0101111 @r_wdvm
  194. +vamomaxuei16_v 11100 . . ..... ..... 101 ..... 0101111 @r_wdvm
  195. +vamomaxuei32_v 11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
  196. # *** new major opcode OP-V ***
  197. vadd_vv 000000 . ..... ..... 000 ..... 1010111 @r_vm
  198. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  199. index 14974ce288..5057dff5eb 100644
  200. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  201. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  202. @@ -290,6 +290,52 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
  203. return ret;
  204. }
  205. +/*
  206. + * Vector AMO check function.
  207. + *
  208. + * Rules to be checked here:
  209. + * 1. RVA must supported.
  210. + * 2. AMO can either operations on 64-bit (RV64 only) or 32-bit words
  211. + * in memory:
  212. + * For RV32: 32 <= SEW <= 32, EEW <= 32.
  213. + * For RV64: 32 <= SEW <= 64, EEW <= 64.
  214. + * 3. Destination vector register number is multiples of LMUL.
  215. + * (Section 3.3.2, 8)
  216. + * 4. Address vector register number is multiples of EMUL.
  217. + * (Section 3.3.2, 8)
  218. + * 5. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
  219. + * 6. If wd = 1:
  220. + * 6.1. Destination vector register group for a masked vector
  221. + * instruction cannot overlap the source mask register (v0).
  222. + * (Section 5.3)
  223. + * 6.2. Destination vector register cannot overlap a source vector
  224. + * register (vs2) group.
  225. + * (Section 5.2)
  226. + */
  227. +static bool vext_check_amo(DisasContext *s, int vd, int vs2,
  228. + int wd, int vm, uint8_t eew)
  229. +{
  230. + int8_t emul = eew - s->sew + s->lmul;
  231. + bool ret = has_ext(s, RVA) &&
  232. + (1 << s->sew >= 4) &&
  233. + (1 << s->sew <= sizeof(target_ulong)) &&
  234. + (eew <= (sizeof(target_ulong) << 3)) &&
  235. + require_align(vd, s->lmul) &&
  236. + require_align(vs2, emul) &&
  237. + (emul >= -3 && emul <= 3);
  238. + if (wd) {
  239. + ret &= require_vm(vm, vd);
  240. + if (eew > s->sew) {
  241. + if (vd != vs2) {
  242. + ret &= require_noover(vd, s->lmul, vs2, emul);
  243. + }
  244. + } else if (eew < s->sew) {
  245. + ret &= require_noover(vd, s->lmul, vs2, emul);
  246. + }
  247. + }
  248. + return ret;
  249. +}
  250. +
  251. static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
  252. {
  253. return require_vm(vm, vd) &&
  254. @@ -1007,104 +1053,129 @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
  255. return true;
  256. }
  257. -static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
  258. +static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t eew, uint8_t seq)
  259. {
  260. uint32_t data = 0;
  261. gen_helper_amo *fn;
  262. - static gen_helper_amo *const fnsw[9] = {
  263. - /* no atomic operation */
  264. - gen_helper_vamoswapw_v_w,
  265. - gen_helper_vamoaddw_v_w,
  266. - gen_helper_vamoxorw_v_w,
  267. - gen_helper_vamoandw_v_w,
  268. - gen_helper_vamoorw_v_w,
  269. - gen_helper_vamominw_v_w,
  270. - gen_helper_vamomaxw_v_w,
  271. - gen_helper_vamominuw_v_w,
  272. - gen_helper_vamomaxuw_v_w
  273. - };
  274. + static gen_helper_amo *const fns[36][2] = {
  275. + { gen_helper_vamoswapei8_32_v, gen_helper_vamoswapei8_64_v },
  276. + { gen_helper_vamoswapei16_32_v, gen_helper_vamoswapei16_64_v },
  277. + { gen_helper_vamoswapei32_32_v, gen_helper_vamoswapei32_64_v },
  278. + { gen_helper_vamoaddei8_32_v, gen_helper_vamoaddei8_64_v },
  279. + { gen_helper_vamoaddei16_32_v, gen_helper_vamoaddei16_64_v },
  280. + { gen_helper_vamoaddei32_32_v, gen_helper_vamoaddei32_64_v },
  281. + { gen_helper_vamoxorei8_32_v, gen_helper_vamoxorei8_64_v },
  282. + { gen_helper_vamoxorei16_32_v, gen_helper_vamoxorei16_64_v },
  283. + { gen_helper_vamoxorei32_32_v, gen_helper_vamoxorei32_64_v },
  284. + { gen_helper_vamoandei8_32_v, gen_helper_vamoandei8_64_v },
  285. + { gen_helper_vamoandei16_32_v, gen_helper_vamoandei16_64_v },
  286. + { gen_helper_vamoandei32_32_v, gen_helper_vamoandei32_64_v },
  287. + { gen_helper_vamoorei8_32_v, gen_helper_vamoorei8_64_v },
  288. + { gen_helper_vamoorei16_32_v, gen_helper_vamoorei16_64_v },
  289. + { gen_helper_vamoorei32_32_v, gen_helper_vamoorei32_64_v },
  290. + { gen_helper_vamominei8_32_v, gen_helper_vamominei8_64_v },
  291. + { gen_helper_vamominei16_32_v, gen_helper_vamominei16_64_v },
  292. + { gen_helper_vamominei32_32_v, gen_helper_vamominei32_64_v },
  293. + { gen_helper_vamomaxei8_32_v, gen_helper_vamomaxei8_64_v },
  294. + { gen_helper_vamomaxei16_32_v, gen_helper_vamomaxei16_64_v },
  295. + { gen_helper_vamomaxei32_32_v, gen_helper_vamomaxei32_64_v },
  296. + { gen_helper_vamominuei8_32_v, gen_helper_vamominuei8_64_v },
  297. + { gen_helper_vamominuei16_32_v, gen_helper_vamominuei16_64_v },
  298. + { gen_helper_vamominuei32_32_v, gen_helper_vamominuei32_64_v },
  299. + { gen_helper_vamomaxuei8_32_v, gen_helper_vamomaxuei8_64_v },
  300. + { gen_helper_vamomaxuei16_32_v, gen_helper_vamomaxuei16_64_v },
  301. + { gen_helper_vamomaxuei32_32_v, gen_helper_vamomaxuei32_64_v },
  302. #ifdef TARGET_RISCV64
  303. - static gen_helper_amo *const fnsd[18] = {
  304. - gen_helper_vamoswapw_v_d,
  305. - gen_helper_vamoaddw_v_d,
  306. - gen_helper_vamoxorw_v_d,
  307. - gen_helper_vamoandw_v_d,
  308. - gen_helper_vamoorw_v_d,
  309. - gen_helper_vamominw_v_d,
  310. - gen_helper_vamomaxw_v_d,
  311. - gen_helper_vamominuw_v_d,
  312. - gen_helper_vamomaxuw_v_d,
  313. - gen_helper_vamoswapd_v_d,
  314. - gen_helper_vamoaddd_v_d,
  315. - gen_helper_vamoxord_v_d,
  316. - gen_helper_vamoandd_v_d,
  317. - gen_helper_vamoord_v_d,
  318. - gen_helper_vamomind_v_d,
  319. - gen_helper_vamomaxd_v_d,
  320. - gen_helper_vamominud_v_d,
  321. - gen_helper_vamomaxud_v_d
  322. - };
  323. + { gen_helper_vamoswapei64_32_v, gen_helper_vamoswapei64_64_v },
  324. + { gen_helper_vamoaddei64_32_v, gen_helper_vamoaddei64_64_v },
  325. + { gen_helper_vamoxorei64_32_v, gen_helper_vamoxorei64_64_v },
  326. + { gen_helper_vamoandei64_32_v, gen_helper_vamoandei64_64_v },
  327. + { gen_helper_vamoorei64_32_v, gen_helper_vamoorei64_64_v },
  328. + { gen_helper_vamominei64_32_v, gen_helper_vamominei64_64_v },
  329. + { gen_helper_vamomaxei64_32_v, gen_helper_vamomaxei64_64_v },
  330. + { gen_helper_vamominuei64_32_v, gen_helper_vamominuei64_64_v },
  331. + { gen_helper_vamomaxuei64_32_v, gen_helper_vamomaxuei64_64_v }
  332. +#else
  333. + { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL },
  334. + { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL },
  335. + { NULL, NULL }
  336. #endif
  337. + };
  338. if (tb_cflags(s->base.tb) & CF_PARALLEL) {
  339. gen_helper_exit_atomic(cpu_env);
  340. s->base.is_jmp = DISAS_NORETURN;
  341. return true;
  342. - } else {
  343. - if (s->sew == 3) {
  344. -#ifdef TARGET_RISCV64
  345. - fn = fnsd[seq];
  346. -#else
  347. - /* Check done in amo_check(). */
  348. - g_assert_not_reached();
  349. -#endif
  350. - } else {
  351. - assert(seq < ARRAY_SIZE(fnsw));
  352. - fn = fnsw[seq];
  353. - }
  354. + }
  355. +
  356. + fn = fns[seq][s->sew - 2];
  357. + if (fn == NULL) {
  358. + return false;
  359. }
  360. data = FIELD_DP32(data, VDATA, VM, a->vm);
  361. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  362. data = FIELD_DP32(data, VDATA, WD, a->wd);
  363. return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
  364. }
  365. +
  366. +static bool amo_check(DisasContext *s, arg_rwdvm* a, uint8_t eew)
  367. +{
  368. + return require_rvv(s) &&
  369. + vext_check_isa_ill(s) &&
  370. + vext_check_amo(s, a->rd, a->rs2, a->wd, a->vm, eew);
  371. +}
  372. +
  373. +#define GEN_VEXT_AMO_TRANS(NAME, EEW, SEQ, ARGTYPE, OP, CHECK) \
  374. +static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
  375. +{ \
  376. + if (CHECK(s, a, EEW)) { \
  377. + return OP(s, a, EEW, SEQ); \
  378. + } \
  379. + return false; \
  380. +}
  381. +
  382. +GEN_VEXT_AMO_TRANS(vamoswapei8_v, MO_8, 0, rwdvm, amo_op, amo_check)
  383. +GEN_VEXT_AMO_TRANS(vamoswapei16_v, MO_16, 1, rwdvm, amo_op, amo_check)
  384. +GEN_VEXT_AMO_TRANS(vamoswapei32_v, MO_32, 2, rwdvm, amo_op, amo_check)
  385. +GEN_VEXT_AMO_TRANS(vamoaddei8_v, MO_8, 3, rwdvm, amo_op, amo_check)
  386. +GEN_VEXT_AMO_TRANS(vamoaddei16_v, MO_16, 4, rwdvm, amo_op, amo_check)
  387. +GEN_VEXT_AMO_TRANS(vamoaddei32_v, MO_32, 5, rwdvm, amo_op, amo_check)
  388. +GEN_VEXT_AMO_TRANS(vamoxorei8_v, MO_8, 6, rwdvm, amo_op, amo_check)
  389. +GEN_VEXT_AMO_TRANS(vamoxorei16_v, MO_16, 7, rwdvm, amo_op, amo_check)
  390. +GEN_VEXT_AMO_TRANS(vamoxorei32_v, MO_32, 8, rwdvm, amo_op, amo_check)
  391. +GEN_VEXT_AMO_TRANS(vamoandei8_v, MO_8, 9, rwdvm, amo_op, amo_check)
  392. +GEN_VEXT_AMO_TRANS(vamoandei16_v, MO_16, 10, rwdvm, amo_op, amo_check)
  393. +GEN_VEXT_AMO_TRANS(vamoandei32_v, MO_32, 11, rwdvm, amo_op, amo_check)
  394. +GEN_VEXT_AMO_TRANS(vamoorei8_v, MO_8, 12, rwdvm, amo_op, amo_check)
  395. +GEN_VEXT_AMO_TRANS(vamoorei16_v, MO_16, 13, rwdvm, amo_op, amo_check)
  396. +GEN_VEXT_AMO_TRANS(vamoorei32_v, MO_32, 14, rwdvm, amo_op, amo_check)
  397. +GEN_VEXT_AMO_TRANS(vamominei8_v, MO_8, 15, rwdvm, amo_op, amo_check)
  398. +GEN_VEXT_AMO_TRANS(vamominei16_v, MO_16, 16, rwdvm, amo_op, amo_check)
  399. +GEN_VEXT_AMO_TRANS(vamominei32_v, MO_32, 17, rwdvm, amo_op, amo_check)
  400. +GEN_VEXT_AMO_TRANS(vamomaxei8_v, MO_8, 18, rwdvm, amo_op, amo_check)
  401. +GEN_VEXT_AMO_TRANS(vamomaxei16_v, MO_16, 19, rwdvm, amo_op, amo_check)
  402. +GEN_VEXT_AMO_TRANS(vamomaxei32_v, MO_32, 20, rwdvm, amo_op, amo_check)
  403. +GEN_VEXT_AMO_TRANS(vamominuei8_v, MO_8, 21, rwdvm, amo_op, amo_check)
  404. +GEN_VEXT_AMO_TRANS(vamominuei16_v, MO_16, 22, rwdvm, amo_op, amo_check)
  405. +GEN_VEXT_AMO_TRANS(vamominuei32_v, MO_32, 23, rwdvm, amo_op, amo_check)
  406. +GEN_VEXT_AMO_TRANS(vamomaxuei8_v, MO_8, 24, rwdvm, amo_op, amo_check)
  407. +GEN_VEXT_AMO_TRANS(vamomaxuei16_v, MO_16, 25, rwdvm, amo_op, amo_check)
  408. +GEN_VEXT_AMO_TRANS(vamomaxuei32_v, MO_32, 26, rwdvm, amo_op, amo_check)
  409. +
  410. /*
  411. - * There are two rules check here.
  412. - *
  413. - * 1. SEW must be at least as wide as the AMO memory element size.
  414. - *
  415. - * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
  416. + * Index EEW cannot be greater than XLEN,
  417. + * else an illegal instruction is raised (Section 8)
  418. */
  419. -static bool amo_check(DisasContext *s, arg_rwdvm* a)
  420. -{
  421. - return (!s->vill && has_ext(s, RVA) &&
  422. - (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) &&
  423. - vext_check_reg(s, a->rd, false) &&
  424. - vext_check_reg(s, a->rs2, false) &&
  425. - ((1 << s->sew) <= sizeof(target_ulong)) &&
  426. - ((1 << s->sew) >= 4));
  427. -}
  428. -
  429. -GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check)
  430. -GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check)
  431. -GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check)
  432. -GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check)
  433. -GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check)
  434. -GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check)
  435. -GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check)
  436. -GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check)
  437. -GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check)
  438. #ifdef TARGET_RISCV64
  439. -GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check)
  440. -GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check)
  441. -GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check)
  442. -GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check)
  443. -GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check)
  444. -GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check)
  445. -GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check)
  446. -GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check)
  447. -GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check)
  448. +GEN_VEXT_AMO_TRANS(vamoswapei64_v, MO_64, 27, rwdvm, amo_op, amo_check)
  449. +GEN_VEXT_AMO_TRANS(vamoaddei64_v, MO_64, 28, rwdvm, amo_op, amo_check)
  450. +GEN_VEXT_AMO_TRANS(vamoxorei64_v, MO_64, 29, rwdvm, amo_op, amo_check)
  451. +GEN_VEXT_AMO_TRANS(vamoandei64_v, MO_64, 30, rwdvm, amo_op, amo_check)
  452. +GEN_VEXT_AMO_TRANS(vamoorei64_v, MO_64, 31, rwdvm, amo_op, amo_check)
  453. +GEN_VEXT_AMO_TRANS(vamominei64_v, MO_64, 32, rwdvm, amo_op, amo_check)
  454. +GEN_VEXT_AMO_TRANS(vamomaxei64_v, MO_64, 33, rwdvm, amo_op, amo_check)
  455. +GEN_VEXT_AMO_TRANS(vamominuei64_v, MO_64, 34, rwdvm, amo_op, amo_check)
  456. +GEN_VEXT_AMO_TRANS(vamomaxuei64_v, MO_64, 35, rwdvm, amo_op, amo_check)
  457. #endif
  458. /*
  459. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  460. index affe024600..8bc3bf77a3 100644
  461. --- a/target/riscv/vector_helper.c
  462. +++ b/target/riscv/vector_helper.c
  463. @@ -541,23 +541,22 @@ typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr,
  464. uint32_t wd, uint32_t idx, CPURISCVState *env,
  465. uintptr_t retaddr);
  466. -/* no atomic opreation for vector atomic insructions */
  467. +/* no atomic operation for vector atomic instructions */
  468. #define DO_SWAP(N, M) (M)
  469. #define DO_AND(N, M) (N & M)
  470. #define DO_XOR(N, M) (N ^ M)
  471. #define DO_OR(N, M) (N | M)
  472. #define DO_ADD(N, M) (N + M)
  473. +#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
  474. +#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
  475. -#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \
  476. +#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, MTYPE, H, DO_OP, SUF) \
  477. static void \
  478. vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \
  479. uint32_t wd, uint32_t idx, \
  480. CPURISCVState *env, uintptr_t retaddr)\
  481. { \
  482. - typedef int##ESZ##_t ETYPE; \
  483. - typedef int##MSZ##_t MTYPE; \
  484. - typedef uint##MSZ##_t UMTYPE __attribute__((unused)); \
  485. - ETYPE *pe3 = (ETYPE *)vs3 + H(idx); \
  486. + MTYPE *pe3 = (MTYPE *)vs3 + H(idx); \
  487. MTYPE a = cpu_ld##SUF##_data(env, addr), b = *pe3; \
  488. \
  489. cpu_st##SUF##_data(env, addr, DO_OP(a, b)); \
  490. @@ -566,42 +565,79 @@ vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \
  491. } \
  492. }
  493. -/* Signed min/max */
  494. -#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
  495. -#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
  496. -
  497. -/* Unsigned min/max */
  498. -#define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
  499. -#define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
  500. -
  501. -GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l)
  502. -GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w, 32, 32, H4, DO_ADD, l)
  503. -GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w, 32, 32, H4, DO_XOR, l)
  504. -GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w, 32, 32, H4, DO_AND, l)
  505. -GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w, 32, 32, H4, DO_OR, l)
  506. -GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w, 32, 32, H4, DO_MIN, l)
  507. -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w, 32, 32, H4, DO_MAX, l)
  508. -GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l)
  509. -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l)
  510. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei8_32_v, uint32_t, H4, DO_SWAP, l)
  511. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei8_64_v, uint64_t, H8, DO_SWAP, q)
  512. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei16_32_v, uint32_t, H4, DO_SWAP, l)
  513. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei16_64_v, uint64_t, H8, DO_SWAP, q)
  514. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei32_32_v, uint32_t, H4, DO_SWAP, l)
  515. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei32_64_v, uint64_t, H8, DO_SWAP, q)
  516. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei8_32_v, uint32_t, H4, DO_ADD, l)
  517. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei8_64_v, uint64_t, H8, DO_ADD, q)
  518. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei16_32_v, uint32_t, H4, DO_ADD, l)
  519. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei16_64_v, uint64_t, H8, DO_ADD, q)
  520. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei32_32_v, uint32_t, H4, DO_ADD, l)
  521. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei32_64_v, uint64_t, H8, DO_ADD, q)
  522. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei8_32_v, uint32_t, H4, DO_XOR, l)
  523. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei8_64_v, uint64_t, H8, DO_XOR, q)
  524. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei16_32_v, uint32_t, H4, DO_XOR, l)
  525. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei16_64_v, uint64_t, H8, DO_XOR, q)
  526. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei32_32_v, uint32_t, H4, DO_XOR, l)
  527. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei32_64_v, uint64_t, H8, DO_XOR, q)
  528. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei8_32_v, uint32_t, H4, DO_AND, l)
  529. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei8_64_v, uint64_t, H8, DO_AND, q)
  530. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei16_32_v, uint32_t, H4, DO_AND, l)
  531. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei16_64_v, uint64_t, H8, DO_AND, q)
  532. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei32_32_v, uint32_t, H4, DO_AND, l)
  533. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei32_64_v, uint64_t, H8, DO_AND, q)
  534. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei8_32_v, uint32_t, H4, DO_OR, l)
  535. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei8_64_v, uint64_t, H8, DO_OR, q)
  536. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei16_32_v, uint32_t, H4, DO_OR, l)
  537. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei16_64_v, uint64_t, H8, DO_OR, q)
  538. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei32_32_v, uint32_t, H4, DO_OR, l)
  539. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei32_64_v, uint64_t, H8, DO_OR, q)
  540. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei8_32_v, int32_t, H4, DO_MIN, l)
  541. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei8_64_v, int64_t, H8, DO_MIN, q)
  542. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei16_32_v, int32_t, H4, DO_MIN, l)
  543. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei16_64_v, int64_t, H8, DO_MIN, q)
  544. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei32_32_v, int32_t, H4, DO_MIN, l)
  545. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei32_64_v, int64_t, H8, DO_MIN, q)
  546. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei8_32_v, int32_t, H4, DO_MAX, l)
  547. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei8_64_v, int64_t, H8, DO_MAX, q)
  548. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei16_32_v, int32_t, H4, DO_MAX, l)
  549. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei16_64_v, int64_t, H8, DO_MAX, q)
  550. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei32_32_v, int32_t, H4, DO_MAX, l)
  551. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei32_64_v, int64_t, H8, DO_MAX, q)
  552. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei8_32_v, uint32_t, H4, DO_MIN, l)
  553. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei8_64_v, uint64_t, H8, DO_MIN, q)
  554. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei16_32_v, uint32_t, H4, DO_MIN, l)
  555. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei16_64_v, uint64_t, H8, DO_MIN, q)
  556. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei32_32_v, uint32_t, H4, DO_MIN, l)
  557. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei32_64_v, uint64_t, H8, DO_MIN, q)
  558. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei8_32_v, uint32_t, H4, DO_MAX, l)
  559. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei8_64_v, uint64_t, H8, DO_MAX, q)
  560. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei16_32_v, uint32_t, H4, DO_MAX, l)
  561. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei16_64_v, uint64_t, H8, DO_MAX, q)
  562. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei32_32_v, uint32_t, H4, DO_MAX, l)
  563. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei32_64_v, uint64_t, H8, DO_MAX, q)
  564. #ifdef TARGET_RISCV64
  565. -GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l)
  566. -GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q)
  567. -GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d, 64, 32, H8, DO_ADD, l)
  568. -GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d, 64, 64, H8, DO_ADD, q)
  569. -GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d, 64, 32, H8, DO_XOR, l)
  570. -GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d, 64, 64, H8, DO_XOR, q)
  571. -GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d, 64, 32, H8, DO_AND, l)
  572. -GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d, 64, 64, H8, DO_AND, q)
  573. -GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d, 64, 32, H8, DO_OR, l)
  574. -GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d, 64, 64, H8, DO_OR, q)
  575. -GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d, 64, 32, H8, DO_MIN, l)
  576. -GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d, 64, 64, H8, DO_MIN, q)
  577. -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d, 64, 32, H8, DO_MAX, l)
  578. -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d, 64, 64, H8, DO_MAX, q)
  579. -GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l)
  580. -GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q)
  581. -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l)
  582. -GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q)
  583. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei64_32_v, uint32_t, H4, DO_SWAP, l)
  584. +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei64_64_v, uint64_t, H8, DO_SWAP, q)
  585. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei64_32_v, uint32_t, H4, DO_ADD, l)
  586. +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei64_64_v, uint64_t, H8, DO_ADD, q)
  587. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei64_32_v, uint32_t, H4, DO_XOR, l)
  588. +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei64_64_v, uint64_t, H8, DO_XOR, q)
  589. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei64_32_v, uint32_t, H4, DO_AND, l)
  590. +GEN_VEXT_AMO_NOATOMIC_OP(vamoandei64_64_v, uint64_t, H8, DO_AND, q)
  591. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei64_32_v, uint32_t, H4, DO_OR, l)
  592. +GEN_VEXT_AMO_NOATOMIC_OP(vamoorei64_64_v, uint64_t, H8, DO_OR, q)
  593. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei64_32_v, int32_t, H4, DO_MIN, l)
  594. +GEN_VEXT_AMO_NOATOMIC_OP(vamominei64_64_v, int64_t, H8, DO_MIN, q)
  595. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei64_32_v, int32_t, H4, DO_MAX, l)
  596. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei64_64_v, int64_t, H8, DO_MAX, q)
  597. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei64_32_v, uint32_t, H4, DO_MIN, l)
  598. +GEN_VEXT_AMO_NOATOMIC_OP(vamominuei64_64_v, uint64_t, H8, DO_MIN, q)
  599. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei64_32_v, uint32_t, H4, DO_MAX, l)
  600. +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei64_64_v, uint64_t, H8, DO_MAX, q)
  601. #endif
  602. static inline void
  603. @@ -609,7 +645,7 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
  604. void *vs2, CPURISCVState *env, uint32_t desc,
  605. vext_get_index_addr get_index_addr,
  606. vext_amo_noatomic_fn *noatomic_op,
  607. - uint32_t esz, uint32_t msz, uintptr_t ra)
  608. + uint32_t esz, uintptr_t ra)
  609. {
  610. uint32_t i;
  611. target_long addr;
  612. @@ -620,8 +656,8 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
  613. if (!vm && !vext_elem_mask(v0, i)) {
  614. continue;
  615. }
  616. - probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD);
  617. - probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE);
  618. + probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_LOAD);
  619. + probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_STORE);
  620. }
  621. for (i = 0; i < env->vl; i++) {
  622. if (!vm && !vext_elem_mask(v0, i)) {
  623. @@ -632,45 +668,89 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
  624. }
  625. }
  626. -#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN) \
  627. +#define GEN_VEXT_AMO(NAME, ETYPE, INDEX_FN) \
  628. void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \
  629. void *vs2, CPURISCVState *env, uint32_t desc) \
  630. { \
  631. vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \
  632. INDEX_FN, vext_##NAME##_noatomic_op, \
  633. - sizeof(ETYPE), sizeof(MTYPE), \
  634. - GETPC()); \
  635. -}
  636. -
  637. + sizeof(ETYPE), GETPC()); \
  638. +}
  639. +
  640. +GEN_VEXT_AMO(vamoswapei8_32_v, int32_t, idx_b)
  641. +GEN_VEXT_AMO(vamoswapei8_64_v, int64_t, idx_b)
  642. +GEN_VEXT_AMO(vamoswapei16_32_v, int32_t, idx_h)
  643. +GEN_VEXT_AMO(vamoswapei16_64_v, int64_t, idx_h)
  644. +GEN_VEXT_AMO(vamoswapei32_32_v, int32_t, idx_w)
  645. +GEN_VEXT_AMO(vamoswapei32_64_v, int64_t, idx_w)
  646. +GEN_VEXT_AMO(vamoaddei8_32_v, int32_t, idx_b)
  647. +GEN_VEXT_AMO(vamoaddei8_64_v, int64_t, idx_b)
  648. +GEN_VEXT_AMO(vamoaddei16_32_v, int32_t, idx_h)
  649. +GEN_VEXT_AMO(vamoaddei16_64_v, int64_t, idx_h)
  650. +GEN_VEXT_AMO(vamoaddei32_32_v, int32_t, idx_w)
  651. +GEN_VEXT_AMO(vamoaddei32_64_v, int64_t, idx_w)
  652. +GEN_VEXT_AMO(vamoxorei8_32_v, int32_t, idx_b)
  653. +GEN_VEXT_AMO(vamoxorei8_64_v, int64_t, idx_b)
  654. +GEN_VEXT_AMO(vamoxorei16_32_v, int32_t, idx_h)
  655. +GEN_VEXT_AMO(vamoxorei16_64_v, int64_t, idx_h)
  656. +GEN_VEXT_AMO(vamoxorei32_32_v, int32_t, idx_w)
  657. +GEN_VEXT_AMO(vamoxorei32_64_v, int64_t, idx_w)
  658. +GEN_VEXT_AMO(vamoandei8_32_v, int32_t, idx_b)
  659. +GEN_VEXT_AMO(vamoandei8_64_v, int64_t, idx_b)
  660. +GEN_VEXT_AMO(vamoandei16_32_v, int32_t, idx_h)
  661. +GEN_VEXT_AMO(vamoandei16_64_v, int64_t, idx_h)
  662. +GEN_VEXT_AMO(vamoandei32_32_v, int32_t, idx_w)
  663. +GEN_VEXT_AMO(vamoandei32_64_v, int64_t, idx_w)
  664. +GEN_VEXT_AMO(vamoorei8_32_v, int32_t, idx_b)
  665. +GEN_VEXT_AMO(vamoorei8_64_v, int64_t, idx_b)
  666. +GEN_VEXT_AMO(vamoorei16_32_v, int32_t, idx_h)
  667. +GEN_VEXT_AMO(vamoorei16_64_v, int64_t, idx_h)
  668. +GEN_VEXT_AMO(vamoorei32_32_v, int32_t, idx_w)
  669. +GEN_VEXT_AMO(vamoorei32_64_v, int64_t, idx_w)
  670. +GEN_VEXT_AMO(vamominei8_32_v, int32_t, idx_b)
  671. +GEN_VEXT_AMO(vamominei8_64_v, int64_t, idx_b)
  672. +GEN_VEXT_AMO(vamominei16_32_v, int32_t, idx_h)
  673. +GEN_VEXT_AMO(vamominei16_64_v, int64_t, idx_h)
  674. +GEN_VEXT_AMO(vamominei32_32_v, int32_t, idx_w)
  675. +GEN_VEXT_AMO(vamominei32_64_v, int64_t, idx_w)
  676. +GEN_VEXT_AMO(vamomaxei8_32_v, int32_t, idx_b)
  677. +GEN_VEXT_AMO(vamomaxei8_64_v, int64_t, idx_b)
  678. +GEN_VEXT_AMO(vamomaxei16_32_v, int32_t, idx_h)
  679. +GEN_VEXT_AMO(vamomaxei16_64_v, int64_t, idx_h)
  680. +GEN_VEXT_AMO(vamomaxei32_32_v, int32_t, idx_w)
  681. +GEN_VEXT_AMO(vamomaxei32_64_v, int64_t, idx_w)
  682. +GEN_VEXT_AMO(vamominuei8_32_v, int32_t, idx_b)
  683. +GEN_VEXT_AMO(vamominuei8_64_v, int64_t, idx_b)
  684. +GEN_VEXT_AMO(vamominuei16_32_v, int32_t, idx_h)
  685. +GEN_VEXT_AMO(vamominuei16_64_v, int64_t, idx_h)
  686. +GEN_VEXT_AMO(vamominuei32_32_v, int32_t, idx_w)
  687. +GEN_VEXT_AMO(vamominuei32_64_v, int64_t, idx_w)
  688. +GEN_VEXT_AMO(vamomaxuei8_32_v, int32_t, idx_b)
  689. +GEN_VEXT_AMO(vamomaxuei8_64_v, int64_t, idx_b)
  690. +GEN_VEXT_AMO(vamomaxuei16_32_v, int32_t, idx_h)
  691. +GEN_VEXT_AMO(vamomaxuei16_64_v, int64_t, idx_h)
  692. +GEN_VEXT_AMO(vamomaxuei32_32_v, int32_t, idx_w)
  693. +GEN_VEXT_AMO(vamomaxuei32_64_v, int64_t, idx_w)
  694. #ifdef TARGET_RISCV64
  695. -GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d)
  696. -GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d)
  697. -GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d)
  698. -GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d)
  699. -GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d)
  700. -GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d)
  701. -GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d)
  702. -GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d)
  703. -GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d)
  704. -GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d)
  705. -GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d)
  706. -GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d)
  707. -GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d)
  708. -GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d)
  709. -GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d)
  710. -GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d)
  711. -GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d)
  712. -GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d)
  713. +GEN_VEXT_AMO(vamoswapei64_32_v, int32_t, idx_d)
  714. +GEN_VEXT_AMO(vamoswapei64_64_v, int64_t, idx_d)
  715. +GEN_VEXT_AMO(vamoaddei64_32_v, int32_t, idx_d)
  716. +GEN_VEXT_AMO(vamoaddei64_64_v, int64_t, idx_d)
  717. +GEN_VEXT_AMO(vamoxorei64_32_v, int32_t, idx_d)
  718. +GEN_VEXT_AMO(vamoxorei64_64_v, int64_t, idx_d)
  719. +GEN_VEXT_AMO(vamoandei64_32_v, int32_t, idx_d)
  720. +GEN_VEXT_AMO(vamoandei64_64_v, int64_t, idx_d)
  721. +GEN_VEXT_AMO(vamoorei64_32_v, int32_t, idx_d)
  722. +GEN_VEXT_AMO(vamoorei64_64_v, int64_t, idx_d)
  723. +GEN_VEXT_AMO(vamominei64_32_v, int32_t, idx_d)
  724. +GEN_VEXT_AMO(vamominei64_64_v, int64_t, idx_d)
  725. +GEN_VEXT_AMO(vamomaxei64_32_v, int32_t, idx_d)
  726. +GEN_VEXT_AMO(vamomaxei64_64_v, int64_t, idx_d)
  727. +GEN_VEXT_AMO(vamominuei64_32_v, int32_t, idx_d)
  728. +GEN_VEXT_AMO(vamominuei64_64_v, int64_t, idx_d)
  729. +GEN_VEXT_AMO(vamomaxuei64_32_v, int32_t, idx_d)
  730. +GEN_VEXT_AMO(vamomaxuei64_64_v, int64_t, idx_d)
  731. #endif
  732. -GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w)
  733. -GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w)
  734. -GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w)
  735. -GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w)
  736. -GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w)
  737. -GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w)
  738. -GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w)
  739. -GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w)
  740. -GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w)
  741. /*
  742. *** Vector Integer Arithmetic Instructions
  743. --
  744. 2.33.1