0084-target-riscv-rvv-1.0-patch-floating-point-reduction-.patch 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. From deceb2c6b3ba82c259c57bda1226e0ac6cbb2757 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Mon, 17 Aug 2020 16:11:32 +0800
  4. Subject: [PATCH 084/107] target/riscv: rvv-1.0: patch floating-point reduction
  5. instructions
  6. In SiFive's RTL implementation:
  7. * vfredosum: Pass through the original NaN and set no exception flags
  8. if no elements are active and the scalar input is NaN.
  9. * vfredsum: Canonicalize the NaN and, if the NaN is signaling, set the
  10. invalid exception flag.
  11. Same principles are also applied to vfwredosum and vfwredsum
  12. instructions.
  13. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  14. ---
  15. target/riscv/helper.h | 5 +
  16. target/riscv/insn32.decode | 6 +-
  17. target/riscv/insn_trans/trans_rvv.c.inc | 2 +
  18. target/riscv/vector_helper.c | 190 ++++++++++++++++++++----
  19. 4 files changed, 171 insertions(+), 32 deletions(-)
  20. diff --git a/target/riscv/helper.h b/target/riscv/helper.h
  21. index f085f2fbb5..38721ea5ee 100644
  22. --- a/target/riscv/helper.h
  23. +++ b/target/riscv/helper.h
  24. @@ -1079,6 +1079,9 @@ DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
  25. DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
  26. DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
  27. DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
  28. +DEF_HELPER_6(vfredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
  29. +DEF_HELPER_6(vfredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
  30. +DEF_HELPER_6(vfredosum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
  31. DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
  32. DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
  33. DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
  34. @@ -1088,6 +1091,8 @@ DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
  35. DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
  36. DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
  37. +DEF_HELPER_6(vfwredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
  38. +DEF_HELPER_6(vfwredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
  39. DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
  40. DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
  41. diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
  42. index 12fbad1b6b..83cd63f831 100644
  43. --- a/target/riscv/insn32.decode
  44. +++ b/target/riscv/insn32.decode
  45. @@ -603,11 +603,13 @@ vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm
  46. vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm
  47. vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
  48. # Vector ordered and unordered reduction sum
  49. -vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
  50. +vfredsum_vs 000001 . ..... ..... 001 ..... 1010111 @r_vm
  51. +vfredosum_vs 000011 . ..... ..... 001 ..... 1010111 @r_vm
  52. vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
  53. vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
  54. # Vector widening ordered and unordered float reduction sum
  55. -vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
  56. +vfwredsum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
  57. +vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
  58. vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
  59. vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
  60. vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r
  61. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  62. index c7ef4fa0ac..84e81ffa4d 100644
  63. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  64. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  65. @@ -3051,6 +3051,7 @@ static bool freduction_check(DisasContext *s, arg_rmrr *a)
  66. }
  67. GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
  68. +GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
  69. GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
  70. GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
  71. @@ -3063,6 +3064,7 @@ static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
  72. }
  73. GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
  74. +GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
  75. /*
  76. *** Vector Mask Operations
  77. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  78. index aca8e63023..a77e315045 100644
  79. --- a/target/riscv/vector_helper.c
  80. +++ b/target/riscv/vector_helper.c
  81. @@ -4597,43 +4597,155 @@ GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD)
  82. GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD)
  83. /* Vector Single-Width Floating-Point Reduction Instructions */
  84. -#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP) \
  85. -void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  86. - void *vs2, CPURISCVState *env, \
  87. - uint32_t desc) \
  88. -{ \
  89. - uint32_t vm = vext_vm(desc); \
  90. - uint32_t vl = env->vl; \
  91. - uint32_t i; \
  92. - TD s1 = *((TD *)vs1 + HD(0)); \
  93. - \
  94. - for (i = env->vstart; i < vl; i++) { \
  95. - TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  96. - if (!vm && !vext_elem_mask(v0, i)) { \
  97. - continue; \
  98. - } \
  99. - s1 = OP(s1, (TD)s2, &env->fp_status); \
  100. - } \
  101. - *((TD *)vd + HD(0)) = s1; \
  102. - env->vstart = 0; \
  103. +/*
  104. + * If f is NaN, return SEW-bit canonical NaN.
  105. + * Set the invalid exception flag if f is a sNaN.
  106. + */
  107. +static uint64_t propagate_nan(uint64_t f, uint32_t sew, float_status *s)
  108. +{
  109. + target_ulong ret;
  110. +
  111. + switch (sew) {
  112. + case 16:
  113. + ret = fclass_h(f);
  114. + /* check if f is NaN */
  115. + if (ret & 0x300) {
  116. + /* check if f is a sNaN */
  117. + if (ret & 0x100) {
  118. + s->float_exception_flags |= float_flag_invalid;
  119. + }
  120. + /* return canonical NaN */
  121. + return float16_default_nan(s);
  122. + } else {
  123. + return f;
  124. + }
  125. + break;
  126. + case 32:
  127. + ret = fclass_s(f);
  128. + /* check if f is NaN */
  129. + if (ret & 0x300) {
  130. + /* check if f is a sNaN */
  131. + if (ret & 0x100) {
  132. + s->float_exception_flags |= float_flag_invalid;
  133. + }
  134. + /* return canonical NaN */
  135. + return float32_default_nan(s);
  136. + } else {
  137. + return f;
  138. + }
  139. + break;
  140. + case 64:
  141. + ret = fclass_d(f);
  142. + /* check if f is NaN */
  143. + if (ret & 0x300) {
  144. + /* check if f is a sNaN */
  145. + if (ret & 0x100) {
  146. + s->float_exception_flags |= float_flag_invalid;
  147. + }
  148. + /* return canonical NaN */
  149. + return float64_default_nan(s);
  150. + } else {
  151. + return f;
  152. + }
  153. + break;
  154. + default:
  155. + g_assert_not_reached();
  156. + }
  157. +}
  158. +
  159. +#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, PROPAGATE_NAN, OP) \
  160. +void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  161. + void *vs2, CPURISCVState *env, \
  162. + uint32_t desc) \
  163. +{ \
  164. + uint32_t vm = vext_vm(desc); \
  165. + uint32_t vl = env->vl; \
  166. + uint32_t i; \
  167. + bool active = false; \
  168. + TD s1 = *((TD *)vs1 + HD(0)); \
  169. + \
  170. + for (i = env->vstart; i < vl; i++) { \
  171. + TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  172. + if (!vm && !vext_elem_mask(v0, i)) { \
  173. + continue; \
  174. + } \
  175. + active = true; \
  176. + s1 = OP(s1, (TD)s2, &env->fp_status); \
  177. + } \
  178. + \
  179. + if (vl > 0) { \
  180. + if (PROPAGATE_NAN && !active) { \
  181. + *((TD *)vd + HD(0)) = propagate_nan(s1, sizeof(TD) * 8, \
  182. + &env->fp_status); \
  183. + } else { \
  184. + *((TD *)vd + HD(0)) = s1; \
  185. + } \
  186. + } \
  187. + env->vstart = 0; \
  188. }
  189. +/* Ordered sum */
  190. +GEN_VEXT_FRED(vfredosum_vs_h, uint16_t, uint16_t, H2, H2, false, float16_add)
  191. +GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, false, float32_add)
  192. +GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, false, float64_add)
  193. +
  194. /* Unordered sum */
  195. -GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
  196. -GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
  197. -GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
  198. +GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, true, float16_add)
  199. +GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, true, float32_add)
  200. +GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, true, float64_add)
  201. /* Maximum value */
  202. -GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum_noprop)
  203. -GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum_noprop)
  204. -GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum_noprop)
  205. +GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, false, float16_maxnum_noprop)
  206. +GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, false, float32_maxnum_noprop)
  207. +GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, false, float64_maxnum_noprop)
  208. /* Minimum value */
  209. -GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum_noprop)
  210. -GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum_noprop)
  211. -GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum_noprop)
  212. +GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, false, float16_minnum_noprop)
  213. +GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, false, float32_minnum_noprop)
  214. +GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, false, float64_minnum_noprop)
  215. /* Vector Widening Floating-Point Reduction Instructions */
  216. +/* Ordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
  217. +void HELPER(vfwredosum_vs_h)(void *vd, void *v0, void *vs1,
  218. + void *vs2, CPURISCVState *env, uint32_t desc)
  219. +{
  220. + uint32_t vm = vext_vm(desc);
  221. + uint32_t vl = env->vl;
  222. + uint32_t i;
  223. + uint32_t s1 = *((uint32_t *)vs1 + H4(0));
  224. +
  225. + for (i = env->vstart; i < vl; i++) {
  226. + uint16_t s2 = *((uint16_t *)vs2 + H2(i));
  227. + if (!vm && !vext_elem_mask(v0, i)) {
  228. + continue;
  229. + }
  230. + s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
  231. + &env->fp_status);
  232. + }
  233. + *((uint32_t *)vd + H4(0)) = s1;
  234. + env->vstart = 0;
  235. +}
  236. +
  237. +void HELPER(vfwredosum_vs_w)(void *vd, void *v0, void *vs1,
  238. + void *vs2, CPURISCVState *env, uint32_t desc)
  239. +{
  240. + uint32_t vm = vext_vm(desc);
  241. + uint32_t vl = env->vl;
  242. + uint32_t i;
  243. + uint64_t s1 = *((uint64_t *)vs1);
  244. +
  245. + for (i = env->vstart; i < vl; i++) {
  246. + uint32_t s2 = *((uint32_t *)vs2 + H4(i));
  247. + if (!vm && !vext_elem_mask(v0, i)) {
  248. + continue;
  249. + }
  250. + s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
  251. + &env->fp_status);
  252. + }
  253. + *((uint64_t *)vd) = s1;
  254. + env->vstart = 0;
  255. +}
  256. +
  257. /* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
  258. void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  259. void *vs2, CPURISCVState *env, uint32_t desc)
  260. @@ -4642,16 +4754,25 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  261. uint32_t vl = env->vl;
  262. uint32_t i;
  263. uint32_t s1 = *((uint32_t *)vs1 + H4(0));
  264. + bool active = false;
  265. for (i = env->vstart; i < vl; i++) {
  266. uint16_t s2 = *((uint16_t *)vs2 + H2(i));
  267. if (!vm && !vext_elem_mask(v0, i)) {
  268. continue;
  269. }
  270. + active = true;
  271. s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
  272. &env->fp_status);
  273. }
  274. - *((uint32_t *)vd + H4(0)) = s1;
  275. +
  276. + if (vl > 0) {
  277. + if (!active) {
  278. + *((uint32_t *)vd + H4(0)) = propagate_nan(s1, 32, &env->fp_status);
  279. + } else {
  280. + *((uint32_t *)vd + H4(0)) = s1;
  281. + }
  282. + }
  283. env->vstart = 0;
  284. }
  285. @@ -4662,16 +4783,25 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  286. uint32_t vl = env->vl;
  287. uint32_t i;
  288. uint64_t s1 = *((uint64_t *)vs1);
  289. + bool active = false;
  290. for (i = env->vstart; i < vl; i++) {
  291. uint32_t s2 = *((uint32_t *)vs2 + H4(i));
  292. if (!vm && !vext_elem_mask(v0, i)) {
  293. continue;
  294. }
  295. + active = true;
  296. s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
  297. &env->fp_status);
  298. }
  299. - *((uint64_t *)vd) = s1;
  300. +
  301. + if (vl > 0) {
  302. + if (!active) {
  303. + *((uint64_t *)vd) = propagate_nan(s1, 64, &env->fp_status);
  304. + } else {
  305. + *((uint64_t *)vd) = s1;
  306. + }
  307. + }
  308. env->vstart = 0;
  309. }
  310. --
  311. 2.33.1