0032-target-riscv-rvv-1.0-update-vext_max_elems-for-load-.patch 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. From 1c1e08410bf1c6f1f8fe14aaa369b9cb57192293 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Tue, 29 Sep 2020 23:25:37 +0800
  4. Subject: [PATCH 032/107] target/riscv: rvv-1.0: update vext_max_elems() for
  5. load/store insns
  6. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  7. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  8. ---
  9. target/riscv/insn_trans/trans_rvv.c.inc | 32 +++++++--
  10. target/riscv/vector_helper.c | 90 ++++++++++++++-----------
  11. 2 files changed, 74 insertions(+), 48 deletions(-)
  12. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  13. index 146d330894..a992bd170d 100644
  14. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  15. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  16. @@ -586,6 +586,12 @@ static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
  17. return false; \
  18. }
  19. +static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
  20. +{
  21. + int8_t emul = eew - s->sew + s->lmul;
  22. + return emul < 0 ? 0 : emul;
  23. +}
  24. +
  25. /*
  26. *** unit stride load and store
  27. */
  28. @@ -651,8 +657,14 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
  29. return false;
  30. }
  31. + /*
  32. + * Vector load/store instructions have the EEW encoded
  33. + * directly in the instructions. The maximum vector size is
  34. + * calculated with EMUL rather than LMUL.
  35. + */
  36. + uint8_t emul = vext_get_emul(s, eew);
  37. data = FIELD_DP32(data, VDATA, VM, a->vm);
  38. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  39. + data = FIELD_DP32(data, VDATA, LMUL, emul);
  40. data = FIELD_DP32(data, VDATA, NF, a->nf);
  41. return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
  42. }
  43. @@ -687,8 +699,9 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
  44. return false;
  45. }
  46. + uint8_t emul = vext_get_emul(s, eew);
  47. data = FIELD_DP32(data, VDATA, VM, a->vm);
  48. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  49. + data = FIELD_DP32(data, VDATA, LMUL, emul);
  50. data = FIELD_DP32(data, VDATA, NF, a->nf);
  51. return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
  52. }
  53. @@ -761,8 +774,9 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  54. return false;
  55. }
  56. + uint8_t emul = vext_get_emul(s, eew);
  57. data = FIELD_DP32(data, VDATA, VM, a->vm);
  58. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  59. + data = FIELD_DP32(data, VDATA, LMUL, emul);
  60. data = FIELD_DP32(data, VDATA, NF, a->nf);
  61. return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
  62. }
  63. @@ -789,8 +803,9 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  64. gen_helper_vsse32_v, gen_helper_vsse64_v
  65. };
  66. + uint8_t emul = vext_get_emul(s, eew);
  67. data = FIELD_DP32(data, VDATA, VM, a->vm);
  68. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  69. + data = FIELD_DP32(data, VDATA, LMUL, emul);
  70. data = FIELD_DP32(data, VDATA, NF, a->nf);
  71. fn = fns[eew];
  72. if (fn == NULL) {
  73. @@ -887,8 +902,9 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  74. fn = fns[eew][s->sew];
  75. + uint8_t emul = vext_get_emul(s, s->sew);
  76. data = FIELD_DP32(data, VDATA, VM, a->vm);
  77. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  78. + data = FIELD_DP32(data, VDATA, LMUL, emul);
  79. data = FIELD_DP32(data, VDATA, NF, a->nf);
  80. return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
  81. }
  82. @@ -938,8 +954,9 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  83. fn = fns[eew][s->sew];
  84. + uint8_t emul = vext_get_emul(s, s->sew);
  85. data = FIELD_DP32(data, VDATA, VM, a->vm);
  86. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  87. + data = FIELD_DP32(data, VDATA, LMUL, emul);
  88. data = FIELD_DP32(data, VDATA, NF, a->nf);
  89. return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
  90. }
  91. @@ -1003,8 +1020,9 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
  92. return false;
  93. }
  94. + uint8_t emul = vext_get_emul(s, eew);
  95. data = FIELD_DP32(data, VDATA, VM, a->vm);
  96. - data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  97. + data = FIELD_DP32(data, VDATA, LMUL, emul);
  98. data = FIELD_DP32(data, VDATA, NF, a->nf);
  99. return ldff_trans(a->rd, a->rs1, data, fn, s);
  100. }
  101. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  102. index 05ec6e040c..0a9fb898a1 100644
  103. --- a/target/riscv/vector_helper.c
  104. +++ b/target/riscv/vector_helper.c
  105. @@ -17,6 +17,7 @@
  106. */
  107. #include "qemu/osdep.h"
  108. +#include "qemu/host-utils.h"
  109. #include "cpu.h"
  110. #include "exec/memop.h"
  111. #include "exec/exec-all.h"
  112. @@ -121,14 +122,21 @@ static uint32_t vext_wd(uint32_t desc)
  113. }
  114. /*
  115. - * Get vector group length in bytes. Its range is [64, 2048].
  116. + * Get the maximum number of elements can be operated.
  117. *
  118. - * As simd_desc support at most 256, the max vlen is 512 bits.
  119. - * So vlen in bytes is encoded as maxsz.
  120. + * esz: log2 of element size in bytes.
  121. */
  122. -static inline uint32_t vext_maxsz(uint32_t desc)
  123. +static inline uint32_t vext_max_elems(uint32_t desc, uint32_t esz)
  124. {
  125. - return simd_maxsz(desc) << vext_lmul(desc);
  126. + /*
  127. + * As simd_desc support at most 256 bytes, the max vlen is 256 bits.
  128. + * so vlen in bytes (vlenb) is encoded as maxsz.
  129. + */
  130. + uint32_t vlenb = simd_maxsz(desc);
  131. +
  132. + /* Return VLMAX */
  133. + int scale = vext_lmul(desc) - esz;
  134. + return scale < 0 ? vlenb >> -scale : vlenb << scale;
  135. }
  136. /*
  137. @@ -221,14 +229,14 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  138. {
  139. uint32_t i, k;
  140. uint32_t nf = vext_nf(desc);
  141. - uint32_t vlmax = vext_maxsz(desc) / esz;
  142. + uint32_t max_elems = vext_max_elems(desc, esz);
  143. /* probe every access*/
  144. for (i = 0; i < env->vl; i++) {
  145. if (!vm && !vext_elem_mask(v0, i)) {
  146. continue;
  147. }
  148. - probe_pages(env, base + stride * i, nf * esz, ra, access_type);
  149. + probe_pages(env, base + stride * i, nf << esz, ra, access_type);
  150. }
  151. /* do real access */
  152. for (i = 0; i < env->vl; i++) {
  153. @@ -237,8 +245,8 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  154. continue;
  155. }
  156. while (k < nf) {
  157. - target_ulong addr = base + stride * i + k * esz;
  158. - ldst_elem(env, addr, i + k * vlmax, vd, ra);
  159. + target_ulong addr = base + stride * i + (k << esz);
  160. + ldst_elem(env, addr, i + k * max_elems, vd, ra);
  161. k++;
  162. }
  163. }
  164. @@ -251,7 +259,7 @@ void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
  165. { \
  166. uint32_t vm = vext_vm(desc); \
  167. vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
  168. - sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  169. + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
  170. }
  171. GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b)
  172. @@ -266,7 +274,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  173. { \
  174. uint32_t vm = vext_vm(desc); \
  175. vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
  176. - sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
  177. + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
  178. }
  179. GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b)
  180. @@ -286,16 +294,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  181. {
  182. uint32_t i, k;
  183. uint32_t nf = vext_nf(desc);
  184. - uint32_t vlmax = vext_maxsz(desc) / esz;
  185. + uint32_t max_elems = vext_max_elems(desc, esz);
  186. /* probe every access */
  187. - probe_pages(env, base, env->vl * nf * esz, ra, access_type);
  188. + probe_pages(env, base, env->vl * (nf << esz), ra, access_type);
  189. /* load bytes from guest memory */
  190. for (i = 0; i < env->vl; i++) {
  191. k = 0;
  192. while (k < nf) {
  193. - target_ulong addr = base + (i * nf + k) * esz;
  194. - ldst_elem(env, addr, i + k * vlmax, vd, ra);
  195. + target_ulong addr = base + ((i * nf + k) << esz);
  196. + ldst_elem(env, addr, i + k * max_elems, vd, ra);
  197. k++;
  198. }
  199. }
  200. @@ -310,16 +318,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  201. void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  202. CPURISCVState *env, uint32_t desc) \
  203. { \
  204. - uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \
  205. + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
  206. vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
  207. - sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  208. + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
  209. } \
  210. \
  211. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  212. CPURISCVState *env, uint32_t desc) \
  213. { \
  214. vext_ldst_us(vd, base, env, desc, LOAD_FN, \
  215. - sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  216. + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
  217. }
  218. GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
  219. @@ -331,16 +339,16 @@ GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
  220. void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  221. CPURISCVState *env, uint32_t desc) \
  222. { \
  223. - uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \
  224. + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
  225. vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
  226. - sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
  227. + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
  228. } \
  229. \
  230. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  231. CPURISCVState *env, uint32_t desc) \
  232. { \
  233. vext_ldst_us(vd, base, env, desc, STORE_FN, \
  234. - sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
  235. + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
  236. }
  237. GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
  238. @@ -376,14 +384,14 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  239. uint32_t i, k;
  240. uint32_t nf = vext_nf(desc);
  241. uint32_t vm = vext_vm(desc);
  242. - uint32_t vlmax = vext_maxsz(desc) / esz;
  243. + uint32_t max_elems = vext_max_elems(desc, esz);
  244. /* probe every access*/
  245. for (i = 0; i < env->vl; i++) {
  246. if (!vm && !vext_elem_mask(v0, i)) {
  247. continue;
  248. }
  249. - probe_pages(env, get_index_addr(base, i, vs2), nf * esz, ra,
  250. + probe_pages(env, get_index_addr(base, i, vs2), nf << esz, ra,
  251. access_type);
  252. }
  253. /* load bytes from guest memory */
  254. @@ -393,8 +401,8 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  255. continue;
  256. }
  257. while (k < nf) {
  258. - abi_ptr addr = get_index_addr(base, i, vs2) + k * esz;
  259. - ldst_elem(env, addr, i + k * vlmax, vd, ra);
  260. + abi_ptr addr = get_index_addr(base, i, vs2) + (k << esz);
  261. + ldst_elem(env, addr, i + k * max_elems, vd, ra);
  262. k++;
  263. }
  264. }
  265. @@ -405,7 +413,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  266. void *vs2, CPURISCVState *env, uint32_t desc) \
  267. { \
  268. vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
  269. - LOAD_FN, sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  270. + LOAD_FN, ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
  271. }
  272. GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
  273. @@ -430,7 +438,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  274. void *vs2, CPURISCVState *env, uint32_t desc) \
  275. { \
  276. vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
  277. - STORE_FN, sizeof(ETYPE), \
  278. + STORE_FN, ctzl(sizeof(ETYPE)), \
  279. GETPC(), MMU_DATA_STORE); \
  280. }
  281. @@ -464,7 +472,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
  282. uint32_t i, k, vl = 0;
  283. uint32_t nf = vext_nf(desc);
  284. uint32_t vm = vext_vm(desc);
  285. - uint32_t vlmax = vext_maxsz(desc) / esz;
  286. + uint32_t max_elems = vext_max_elems(desc, esz);
  287. target_ulong addr, offset, remain;
  288. /* probe every access*/
  289. @@ -472,24 +480,24 @@ vext_ldff(void *vd, void *v0, target_ulong base,
  290. if (!vm && !vext_elem_mask(v0, i)) {
  291. continue;
  292. }
  293. - addr = base + nf * i * esz;
  294. + addr = base + i * (nf << esz);
  295. if (i == 0) {
  296. - probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
  297. + probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
  298. } else {
  299. /* if it triggers an exception, no need to check watchpoint */
  300. - remain = nf * esz;
  301. + remain = nf << esz;
  302. while (remain > 0) {
  303. offset = -(addr | TARGET_PAGE_MASK);
  304. host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD,
  305. cpu_mmu_index(env, false));
  306. if (host) {
  307. #ifdef CONFIG_USER_ONLY
  308. - if (page_check_range(addr, nf * esz, PAGE_READ) < 0) {
  309. + if (page_check_range(addr, nf << esz, PAGE_READ) < 0) {
  310. vl = i;
  311. goto ProbeSuccess;
  312. }
  313. #else
  314. - probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
  315. + probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
  316. #endif
  317. } else {
  318. vl = i;
  319. @@ -514,8 +522,8 @@ ProbeSuccess:
  320. continue;
  321. }
  322. while (k < nf) {
  323. - target_ulong addr = base + (i * nf + k) * esz;
  324. - ldst_elem(env, addr, i + k * vlmax, vd, ra);
  325. + target_ulong addr = base + ((i * nf + k) << esz);
  326. + ldst_elem(env, addr, i + k * max_elems, vd, ra);
  327. k++;
  328. }
  329. }
  330. @@ -526,7 +534,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  331. CPURISCVState *env, uint32_t desc) \
  332. { \
  333. vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
  334. - sizeof(ETYPE), GETPC()); \
  335. + ctzl(sizeof(ETYPE)), GETPC()); \
  336. }
  337. GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b)
  338. @@ -739,7 +747,7 @@ void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \
  339. { \
  340. vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \
  341. INDEX_FN, vext_##NAME##_noatomic_op, \
  342. - sizeof(ETYPE), GETPC()); \
  343. + ctzl(sizeof(ETYPE)), GETPC()); \
  344. }
  345. GEN_VEXT_AMO(vamoswapei8_32_v, int32_t, idx_b)
  346. @@ -1225,7 +1233,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  347. void *vs2, CPURISCVState *env, uint32_t desc) \
  348. { \
  349. uint32_t vl = env->vl; \
  350. - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  351. + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
  352. uint32_t i; \
  353. \
  354. for (i = 0; i < vl; i++) { \
  355. @@ -3880,7 +3888,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  356. { \
  357. uint32_t vm = vext_vm(desc); \
  358. uint32_t vl = env->vl; \
  359. - uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  360. + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
  361. uint32_t i; \
  362. \
  363. for (i = 0; i < vl; i++) { \
  364. @@ -4666,7 +4674,7 @@ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
  365. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  366. CPURISCVState *env, uint32_t desc) \
  367. { \
  368. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  369. + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
  370. uint32_t vm = vext_vm(desc); \
  371. uint32_t vl = env->vl; \
  372. uint64_t index; \
  373. @@ -4695,7 +4703,7 @@ GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8)
  374. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  375. CPURISCVState *env, uint32_t desc) \
  376. { \
  377. - uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  378. + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
  379. uint32_t vm = vext_vm(desc); \
  380. uint32_t vl = env->vl; \
  381. uint64_t index = s1; \
  382. --
  383. 2.33.1