0025-target-riscv-rvv-1.0-configure-instructions.patch 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. From 371b8815c98cf4e3f8e2048c3b2ee1c2830e537f Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Thu, 30 Jul 2020 18:40:37 +0800
  4. Subject: [PATCH 025/107] target/riscv: rvv-1.0: configure instructions
  5. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  6. Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
  7. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  8. ---
  9. target/riscv/insn_trans/trans_rvv.c.inc | 53 +++++++++----------------
  10. target/riscv/vector_helper.c | 14 ++++++-
  11. 2 files changed, 31 insertions(+), 36 deletions(-)
  12. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  13. index dcffc14909..fcb01d1b5f 100644
  14. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  15. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  16. @@ -132,28 +132,29 @@ static bool require_noover_seg(const int8_t dst, const int8_t nf,
  17. return !is_overlapped(dst, nf, src, 1);
  18. }
  19. -static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
  20. +static bool do_vsetvl(DisasContext *ctx, int rd, int rs1, TCGv s2)
  21. {
  22. - TCGv s1, s2, dst;
  23. + TCGv s1, dst;
  24. if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
  25. return false;
  26. }
  27. - s2 = tcg_temp_new();
  28. dst = tcg_temp_new();
  29. - /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
  30. - if (a->rs1 == 0) {
  31. + if (rd == 0 && rs1 == 0) {
  32. + s1 = tcg_temp_new();
  33. + tcg_gen_mov_tl(s1, cpu_vl);
  34. + } else if (rs1 == 0) {
  35. /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
  36. s1 = tcg_const_tl(RV_VLEN_MAX);
  37. } else {
  38. s1 = tcg_temp_new();
  39. - gen_get_gpr(s1, a->rs1);
  40. + gen_get_gpr(s1, rs1);
  41. }
  42. - gen_get_gpr(s2, a->rs2);
  43. +
  44. gen_helper_vsetvl(dst, cpu_env, s1, s2);
  45. - gen_set_gpr(a->rd, dst);
  46. + gen_set_gpr(rd, dst);
  47. mark_vs_dirty(ctx);
  48. tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
  49. lookup_and_goto_ptr(ctx);
  50. @@ -165,35 +166,17 @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
  51. return true;
  52. }
  53. -static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
  54. +static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
  55. {
  56. - TCGv s1, s2, dst;
  57. -
  58. - if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
  59. - return false;
  60. - }
  61. -
  62. - s2 = tcg_const_tl(a->zimm);
  63. - dst = tcg_temp_new();
  64. -
  65. - /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
  66. - if (a->rs1 == 0) {
  67. - /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
  68. - s1 = tcg_const_tl(RV_VLEN_MAX);
  69. - } else {
  70. - s1 = tcg_temp_new();
  71. - gen_get_gpr(s1, a->rs1);
  72. - }
  73. - gen_helper_vsetvl(dst, cpu_env, s1, s2);
  74. - gen_set_gpr(a->rd, dst);
  75. - mark_vs_dirty(ctx);
  76. - gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
  77. - ctx->base.is_jmp = DISAS_NORETURN;
  78. + TCGv s2 = tcg_temp_new();
  79. + gen_get_gpr(s2, a->rs2);
  80. + return do_vsetvl(ctx, a->rd, a->rs1, s2);
  81. +}
  82. - tcg_temp_free(s1);
  83. - tcg_temp_free(s2);
  84. - tcg_temp_free(dst);
  85. - return true;
  86. +static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
  87. +{
  88. + TCGv s2 = tcg_const_tl(a->zimm);
  89. + return do_vsetvl(ctx, a->rd, a->rs1, s2);
  90. }
  91. /* vector register offset from env */
  92. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  93. index 5a142a1f4b..e8912ee8fe 100644
  94. --- a/target/riscv/vector_helper.c
  95. +++ b/target/riscv/vector_helper.c
  96. @@ -31,12 +31,24 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
  97. {
  98. int vlmax, vl;
  99. RISCVCPU *cpu = env_archcpu(env);
  100. + uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL);
  101. uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW);
  102. uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
  103. bool vill = FIELD_EX64(s2, VTYPE, VILL);
  104. target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED);
  105. - if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) {
  106. + if (lmul & 4) {
  107. + /* Fractional LMUL. */
  108. + if (lmul == 4 ||
  109. + cpu->cfg.elen >> (8 - lmul) < sew) {
  110. + vill = true;
  111. + }
  112. + }
  113. +
  114. + if ((sew > cpu->cfg.elen)
  115. + || vill
  116. + || (ediv != 0)
  117. + || (reserved != 0)) {
  118. /* only set vill bit. */
  119. env->vtype = FIELD_DP64(0, VTYPE, VILL, 1);
  120. env->vl = 0;
  121. --
  122. 2.33.1