0002-neon2rvv-Model-vxrm-in-LLVM-intrinsics-and-add-optim.patch 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. From 29318f779da9c962af60d574b19159ecc10cfdd7 Mon Sep 17 00:00:00 2001
  2. From: Eric Tang <eric.tang@starfivetech.com>
  3. Date: Thu, 15 Jun 2023 09:30:16 +0800
  4. Subject: [PATCH 2/9] [neon2rvv] Model vxrm in LLVM intrinsics and add
  5. optimization for removing redundant csrwi
  6. Signed-off-by: Eric Tang <eric.tang@starfivetech.com>
  7. ---
  8. clang/include/clang/Basic/riscv_vector.td | 134 +++++-
  9. llvm/include/llvm/IR/IntrinsicsRISCV.td | 94 +++-
  10. llvm/lib/Target/RISCV/CMakeLists.txt | 1 +
  11. .../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 8 +
  12. llvm/lib/Target/RISCV/RISCV.h | 3 +
  13. .../Target/RISCV/RISCVInsertReadWriteCSR.cpp | 115 +++++
  14. llvm/lib/Target/RISCV/RISCVInstrFormats.td | 3 +
  15. llvm/lib/Target/RISCV/RISCVInstrInfo.td | 2 +
  16. .../Target/RISCV/RISCVInstrInfoVPseudos.td | 428 +++++++++++++++++-
  17. llvm/lib/Target/RISCV/RISCVMCInstLower.cpp | 2 +
  18. llvm/lib/Target/RISCV/RISCVSystemOperands.td | 2 +-
  19. llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 2 +
  20. 12 files changed, 747 insertions(+), 47 deletions(-)
  21. create mode 100644 llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
  22. diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
  23. index b23e26ecaa57..9d443be0403c 100644
  24. --- a/clang/include/clang/Basic/riscv_vector.td
  25. +++ b/clang/include/clang/Basic/riscv_vector.td
  26. @@ -316,11 +316,21 @@ multiclass RVVSignedBinBuiltinSet
  27. [["vv", "v", "vvv"],
  28. ["vx", "v", "vve"]]>;
  29. +multiclass RVVSignedBinBuiltinSetRoundingMode
  30. + : RVVOutOp1BuiltinSet<NAME, "csil",
  31. + [["vv", "v", "vvvu"],
  32. + ["vx", "v", "vveu"]]>;
  33. +
  34. multiclass RVVUnsignedBinBuiltinSet
  35. : RVVOutOp1BuiltinSet<NAME, "csil",
  36. [["vv", "Uv", "UvUvUv"],
  37. ["vx", "Uv", "UvUvUe"]]>;
  38. +multiclass RVVUnsignedBinBuiltinSetRoundingMode
  39. + : RVVOutOp1BuiltinSet<NAME, "csil",
  40. + [["vv", "Uv", "UvUvUvu"],
  41. + ["vx", "Uv", "UvUvUeu"]]>;
  42. +
  43. multiclass RVVIntBinBuiltinSet
  44. : RVVSignedBinBuiltinSet,
  45. RVVUnsignedBinBuiltinSet;
  46. @@ -335,11 +345,21 @@ multiclass RVVSignedShiftBuiltinSet
  47. [["vv", "v", "vvUv"],
  48. ["vx", "v", "vvz"]]>;
  49. +multiclass RVVSignedShiftBuiltinSetRoundingMode
  50. + : RVVOutOp1BuiltinSet<NAME, "csil",
  51. + [["vv", "v", "vvUvu"],
  52. + ["vx", "v", "vvzu"]]>;
  53. +
  54. multiclass RVVUnsignedShiftBuiltinSet
  55. : RVVOutOp1BuiltinSet<NAME, "csil",
  56. [["vv", "Uv", "UvUvUv"],
  57. ["vx", "Uv", "UvUvz"]]>;
  58. +multiclass RVVUnsignedShiftBuiltinSetRoundingMode
  59. + : RVVOutOp1BuiltinSet<NAME, "csil",
  60. + [["vv", "Uv", "UvUvUvu"],
  61. + ["vx", "Uv", "UvUvzu"]]>;
  62. +
  63. multiclass RVVShiftBuiltinSet
  64. : RVVSignedShiftBuiltinSet,
  65. RVVUnsignedShiftBuiltinSet;
  66. @@ -349,10 +369,18 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
  67. : RVVOutOp0Op1BuiltinSet<NAME, "csil",
  68. [["wv", "v", "vwUv"],
  69. ["wx", "v", "vwz"]]>;
  70. + multiclass RVVSignedNShiftBuiltinSetRoundingMode
  71. + : RVVOutOp0Op1BuiltinSet<NAME, "csil",
  72. + [["wv", "v", "vwUvu"],
  73. + ["wx", "v", "vwzu"]]>;
  74. multiclass RVVUnsignedNShiftBuiltinSet
  75. : RVVOutOp0Op1BuiltinSet<NAME, "csil",
  76. [["wv", "Uv", "UvUwUv"],
  77. ["wx", "Uv", "UvUwz"]]>;
  78. + multiclass RVVUnsignedNShiftBuiltinSetRoundingMode
  79. + : RVVOutOp0Op1BuiltinSet<NAME, "csil",
  80. + [["wv", "Uv", "UvUwUvu"],
  81. + ["wx", "Uv", "UvUwzu"]]>;
  82. }
  83. multiclass RVVCarryinBuiltinSet
  84. @@ -1928,6 +1956,17 @@ let HasMasked = false,
  85. }
  86. // 13. Vector Fixed-Point Arithmetic Instructions
  87. +let HeaderCode =
  88. +[{
  89. +enum __RISCV_VXRM {
  90. + __RISCV_VXRM_RNU = 0,
  91. + __RISCV_VXRM_RNE = 1,
  92. + __RISCV_VXRM_RDN = 2,
  93. + __RISCV_VXRM_ROD = 3,
  94. +};
  95. +}] in
  96. +def vxrm_enum : RVVHeader;
  97. +
  98. // 13.1. Vector Single-Width Saturating Add and Subtract
  99. let UnMaskedPolicyScheme = HasPassthruOperand in {
  100. defm vsaddu : RVVUnsignedBinBuiltinSet;
  101. @@ -1936,23 +1975,94 @@ defm vssubu : RVVUnsignedBinBuiltinSet;
  102. defm vssub : RVVSignedBinBuiltinSet;
  103. // 13.2. Vector Single-Width Averaging Add and Subtract
  104. -defm vaaddu : RVVUnsignedBinBuiltinSet;
  105. -defm vaadd : RVVSignedBinBuiltinSet;
  106. -defm vasubu : RVVUnsignedBinBuiltinSet;
  107. -defm vasub : RVVSignedBinBuiltinSet;
  108. +let ManualCodegen = [{
  109. + {
  110. + // LLVM intrinsic
  111. + // Unmasked: (passthru, op0, op1, round_mode, vl)
  112. + // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  113. +
  114. + SmallVector<llvm::Value*, 7> Operands;
  115. + bool HasMaskedOff = !(
  116. + (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
  117. + (!IsMasked && PolicyAttrs & RVV_VTA));
  118. + unsigned Offset = IsMasked ?
  119. + (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
  120. +
  121. + if (!HasMaskedOff)
  122. + Operands.push_back(llvm::PoisonValue::get(ResultType));
  123. + else
  124. + Operands.push_back(Ops[Offset]);
  125. +
  126. + Operands.push_back(Ops[Offset]); // op0
  127. + Operands.push_back(Ops[Offset + 1]); // op1
  128. +
  129. + if (IsMasked)
  130. + Operands.push_back(Ops[0]); // mask
  131. +
  132. + Operands.push_back(Ops[Offset + 2]); // vxrm
  133. + Operands.push_back(Ops[Offset + 3]); // vl
  134. +
  135. + if (IsMasked)
  136. + Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
  137. +
  138. + IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()};
  139. + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
  140. + return Builder.CreateCall(F, Operands, "");
  141. + }
  142. +}] in {
  143. + defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode;
  144. + defm vaadd : RVVSignedBinBuiltinSetRoundingMode;
  145. + defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode;
  146. + defm vasub : RVVSignedBinBuiltinSetRoundingMode;
  147. // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
  148. -let RequiredFeatures = ["FullMultiply"] in {
  149. -defm vsmul : RVVSignedBinBuiltinSet;
  150. -}
  151. + let RequiredFeatures = ["FullMultiply"] in {
  152. + defm vsmul : RVVSignedBinBuiltinSetRoundingMode;
  153. + }
  154. // 13.4. Vector Single-Width Scaling Shift Instructions
  155. -defm vssrl : RVVUnsignedShiftBuiltinSet;
  156. -defm vssra : RVVSignedShiftBuiltinSet;
  157. -
  158. + defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;
  159. + defm vssra : RVVSignedShiftBuiltinSetRoundingMode;
  160. +}
  161. +let ManualCodegen = [{
  162. + {
  163. + // LLVM intrinsic
  164. + // Unmasked: (passthru, op0, op1, round_mode, vl)
  165. + // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  166. +
  167. + SmallVector<llvm::Value*, 7> Operands;
  168. + bool HasMaskedOff = !(
  169. + (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
  170. + (!IsMasked && PolicyAttrs & RVV_VTA));
  171. + unsigned Offset = IsMasked ?
  172. + (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
  173. +
  174. + if (!HasMaskedOff)
  175. + Operands.push_back(llvm::PoisonValue::get(ResultType));
  176. + else
  177. + Operands.push_back(Ops[Offset - 1]);
  178. +
  179. + Operands.push_back(Ops[Offset]); // op0
  180. + Operands.push_back(Ops[Offset + 1]); // op1
  181. +
  182. + if (IsMasked)
  183. + Operands.push_back(Ops[0]); // mask
  184. +
  185. + Operands.push_back(Ops[Offset + 2]); // vxrm
  186. + Operands.push_back(Ops[Offset + 3]); // vl
  187. +
  188. + if (IsMasked)
  189. + Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
  190. +
  191. + IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), Ops.back()->getType()};
  192. + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
  193. + return Builder.CreateCall(F, Operands, "");
  194. + }
  195. +}] in {
  196. // 13.5. Vector Narrowing Fixed-Point Clip Instructions
  197. -defm vnclipu : RVVUnsignedNShiftBuiltinSet;
  198. -defm vnclip : RVVSignedNShiftBuiltinSet;
  199. + defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode;
  200. + defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode;
  201. +}
  202. // 14. Vector Floating-Point Instructions
  203. // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
  204. diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
  205. index e9c88f468076..7103bb5f5e39 100644
  206. --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
  207. +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
  208. @@ -586,6 +586,17 @@ let TargetPrefix = "riscv" in {
  209. let ScalarOperand = 2;
  210. let VLOperand = 3;
  211. }
  212. + // For Saturating binary operations with rounding-mode operand
  213. + // The destination vector type is the same as first source vector.
  214. + // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
  215. + class RISCVSaturatingBinaryAAXUnMaskedRoundingMode
  216. + : Intrinsic<[llvm_anyvector_ty],
  217. + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  218. + llvm_anyint_ty, LLVMMatchType<2>],
  219. + [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
  220. + let ScalarOperand = 2;
  221. + let VLOperand = 4;
  222. + }
  223. // For Saturating binary operations with mask.
  224. // The destination vector type is the same as first source vector.
  225. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  226. @@ -598,6 +609,18 @@ let TargetPrefix = "riscv" in {
  227. let ScalarOperand = 2;
  228. let VLOperand = 4;
  229. }
  230. + // For Saturating binary operations with mask and rounding-mode operand
  231. + // The destination vector type is the same as first source vector.
  232. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  233. + class RISCVSaturatingBinaryAAXMaskedRoundingMode
  234. + : Intrinsic<[llvm_anyvector_ty],
  235. + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  236. + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  237. + LLVMMatchType<2>, LLVMMatchType<2>],
  238. + [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
  239. + let ScalarOperand = 2;
  240. + let VLOperand = 5;
  241. + }
  242. // For Saturating binary operations.
  243. // The destination vector type is the same as first source vector.
  244. // The second source operand matches the destination type or is an XLen scalar.
  245. @@ -609,6 +632,15 @@ let TargetPrefix = "riscv" in {
  246. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  247. let VLOperand = 3;
  248. }
  249. +
  250. + // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
  251. + class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
  252. + : Intrinsic<[llvm_anyvector_ty],
  253. + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  254. + llvm_anyint_ty, LLVMMatchType<2>],
  255. + [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
  256. + let VLOperand = 4;
  257. + }
  258. // For Saturating binary operations with mask.
  259. // The destination vector type is the same as first source vector.
  260. // The second source operand matches the destination type or is an XLen scalar.
  261. @@ -621,6 +653,17 @@ let TargetPrefix = "riscv" in {
  262. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  263. let VLOperand = 4;
  264. }
  265. +
  266. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  267. + class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
  268. + : Intrinsic<[llvm_anyvector_ty],
  269. + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  270. + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  271. + LLVMMatchType<2>, LLVMMatchType<2>],
  272. + [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
  273. + let VLOperand = 5;
  274. + }
  275. +
  276. // For Saturating binary operations.
  277. // The destination vector type is NOT the same as first source vector.
  278. // The second source operand matches the destination type or is an XLen scalar.
  279. @@ -632,6 +675,16 @@ let TargetPrefix = "riscv" in {
  280. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  281. let VLOperand = 3;
  282. }
  283. +
  284. + // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
  285. + class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
  286. + : Intrinsic<[llvm_anyvector_ty],
  287. + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  288. + llvm_anyint_ty, LLVMMatchType<3>],
  289. + [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
  290. + let VLOperand = 4;
  291. + }
  292. +
  293. // For Saturating binary operations with mask.
  294. // The destination vector type is NOT the same as first source vector (with mask).
  295. // The second source operand matches the destination type or is an XLen scalar.
  296. @@ -644,6 +697,17 @@ let TargetPrefix = "riscv" in {
  297. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  298. let VLOperand = 4;
  299. }
  300. +
  301. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  302. + class RISCVSaturatingBinaryABShiftMaskedRoundingMode
  303. + : Intrinsic<[llvm_anyvector_ty],
  304. + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  305. + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  306. + LLVMMatchType<3>, LLVMMatchType<3>],
  307. + [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
  308. + let VLOperand = 5;
  309. + }
  310. +
  311. // Input: (vector_in, vector_in, scalar_in, vl, policy)
  312. class RVVSlideUnMasked
  313. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  314. @@ -1074,14 +1138,26 @@ let TargetPrefix = "riscv" in {
  315. def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
  316. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
  317. }
  318. + multiclass RISCVSaturatingBinaryAAXRoundingMode {
  319. + def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
  320. + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
  321. + }
  322. multiclass RISCVSaturatingBinaryAAShift {
  323. def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked;
  324. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked;
  325. }
  326. + multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
  327. + def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
  328. + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
  329. + }
  330. multiclass RISCVSaturatingBinaryABShift {
  331. def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked;
  332. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked;
  333. }
  334. + multiclass RISCVSaturatingBinaryABShiftRoundingMode {
  335. + def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
  336. + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
  337. + }
  338. multiclass RVVSlide {
  339. def "int_riscv_" # NAME : RVVSlideUnMasked;
  340. def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
  341. @@ -1350,18 +1426,18 @@ let TargetPrefix = "riscv" in {
  342. def "int_riscv_vcompress" : RISCVCompress;
  343. - defm vaaddu : RISCVSaturatingBinaryAAX;
  344. - defm vaadd : RISCVSaturatingBinaryAAX;
  345. - defm vasubu : RISCVSaturatingBinaryAAX;
  346. - defm vasub : RISCVSaturatingBinaryAAX;
  347. + defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode;
  348. + defm vaadd : RISCVSaturatingBinaryAAXRoundingMode;
  349. + defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
  350. + defm vasub : RISCVSaturatingBinaryAAXRoundingMode;
  351. - defm vsmul : RISCVSaturatingBinaryAAX;
  352. + defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;
  353. - defm vssrl : RISCVSaturatingBinaryAAShift;
  354. - defm vssra : RISCVSaturatingBinaryAAShift;
  355. + defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
  356. + defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;
  357. - defm vnclipu : RISCVSaturatingBinaryABShift;
  358. - defm vnclip : RISCVSaturatingBinaryABShift;
  359. + defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
  360. + defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
  361. defm vmfeq : RISCVCompare;
  362. defm vmfne : RISCVCompare;
  363. diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
  364. index f909e0017e2b..db84f0504f21 100644
  365. --- a/llvm/lib/Target/RISCV/CMakeLists.txt
  366. +++ b/llvm/lib/Target/RISCV/CMakeLists.txt
  367. @@ -27,6 +27,7 @@ add_llvm_target(RISCVCodeGen
  368. RISCVFrameLowering.cpp
  369. RISCVGatherScatterLowering.cpp
  370. RISCVInsertVSETVLI.cpp
  371. + RISCVInsertReadWriteCSR.cpp
  372. RISCVInstrInfo.cpp
  373. RISCVISelDAGToDAG.cpp
  374. RISCVISelLowering.cpp
  375. diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
  376. index 2cf2045c1719..eb43f4bf0a02 100644
  377. --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
  378. +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
  379. @@ -102,6 +102,9 @@ enum {
  380. // in bits 63:31. Used by the SExtWRemoval pass.
  381. IsSignExtendingOpWShift = UsesMaskPolicyShift + 1,
  382. IsSignExtendingOpWMask = 1ULL << IsSignExtendingOpWShift,
  383. +
  384. + HasRoundModeOpShift = IsSignExtendingOpWShift + 1,
  385. + HasRoundModeOpMask = 1 << HasRoundModeOpShift,
  386. };
  387. // Match with the definitions in RISCVInstrFormats.td
  388. @@ -176,6 +179,11 @@ static inline bool usesMaskPolicy(uint64_t TSFlags) {
  389. return TSFlags & UsesMaskPolicyMask;
  390. }
  391. +/// \returns true if there is a rounding mode operand for this instruction
  392. +static inline bool hasRoundModeOp(uint64_t TSFlags) {
  393. + return TSFlags & HasRoundModeOpMask;
  394. +}
  395. +
  396. static inline unsigned getMergeOpNum(const MCInstrDesc &Desc) {
  397. assert(hasMergeOp(Desc.TSFlags));
  398. assert(!Desc.isVariadic());
  399. diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
  400. index c42fb070aade..f629bd2faea9 100644
  401. --- a/llvm/lib/Target/RISCV/RISCV.h
  402. +++ b/llvm/lib/Target/RISCV/RISCV.h
  403. @@ -68,6 +68,9 @@ void initializeRISCVExpandAtomicPseudoPass(PassRegistry &);
  404. FunctionPass *createRISCVInsertVSETVLIPass();
  405. void initializeRISCVInsertVSETVLIPass(PassRegistry &);
  406. +FunctionPass *createRISCVInsertReadWriteCSRPass();
  407. +void initializeRISCVInsertReadWriteCSRPass(PassRegistry &);
  408. +
  409. FunctionPass *createRISCVRedundantCopyEliminationPass();
  410. void initializeRISCVRedundantCopyEliminationPass(PassRegistry &);
  411. diff --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
  412. new file mode 100644
  413. index 000000000000..9cc06adf658c
  414. --- /dev/null
  415. +++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
  416. @@ -0,0 +1,115 @@
  417. +//===-- RISCVInsertReadWriteCSR.cpp - Insert Read/Write of RISC-V CSR -----===//
  418. +//
  419. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  420. +// See https://llvm.org/LICENSE.txt for license information.
  421. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  422. +//
  423. +//===----------------------------------------------------------------------===//
  424. +// This file implements the machine function pass to insert read/write of CSR-s
  425. +// of the RISC-V instructions.
  426. +//
  427. +// Currently the pass implements naive insertion of a write to vxrm before an
  428. +// RVV fixed-point instruction.
  429. +//
  430. +//===----------------------------------------------------------------------===//
  431. +
  432. +#include "RISCV.h"
  433. +#include "RISCVSubtarget.h"
  434. +#include "llvm/CodeGen/MachineFunctionPass.h"
  435. +using namespace llvm;
  436. +
  437. +#define DEBUG_TYPE "riscv-insert-read-write-csr"
  438. +#define RISCV_INSERT_READ_WRITE_CSR_NAME "RISC-V Insert Read/Write CSR Pass"
  439. +
  440. +namespace {
  441. +
  442. +class RISCVInsertReadWriteCSR : public MachineFunctionPass {
  443. + const TargetInstrInfo *TII;
  444. +
  445. +public:
  446. + static char ID;
  447. +
  448. + RISCVInsertReadWriteCSR() : MachineFunctionPass(ID) {
  449. + initializeRISCVInsertReadWriteCSRPass(*PassRegistry::getPassRegistry());
  450. + }
  451. +
  452. + bool runOnMachineFunction(MachineFunction &MF) override;
  453. +
  454. + void getAnalysisUsage(AnalysisUsage &AU) const override {
  455. + AU.setPreservesCFG();
  456. + MachineFunctionPass::getAnalysisUsage(AU);
  457. + }
  458. +
  459. + StringRef getPassName() const override {
  460. + return RISCV_INSERT_READ_WRITE_CSR_NAME;
  461. + }
  462. +
  463. +private:
  464. + bool emitWriteVXRM(MachineBasicBlock &MBB);
  465. + std::optional<unsigned> getRoundModeIdx(const MachineInstr &MI);
  466. +};
  467. +
  468. +} // end anonymous namespace
  469. +
  470. +char RISCVInsertReadWriteCSR::ID = 0;
  471. +
  472. +static unsigned PreVXRMValue = 4;
  473. +
  474. +INITIALIZE_PASS(RISCVInsertReadWriteCSR, DEBUG_TYPE,
  475. + RISCV_INSERT_READ_WRITE_CSR_NAME, false, false)
  476. +
  477. +// This function returns the index to the rounding mode immediate value if any,
  478. +// otherwise the function will return None.
  479. +std::optional<unsigned>
  480. +RISCVInsertReadWriteCSR::getRoundModeIdx(const MachineInstr &MI) {
  481. + uint64_t TSFlags = MI.getDesc().TSFlags;
  482. + if (!RISCVII::hasRoundModeOp(TSFlags))
  483. + return std::nullopt;
  484. +
  485. + // The operand order
  486. + // -------------------------------------
  487. + // | n-1 (if any) | n-2 | n-3 | n-4 |
  488. + // | policy | sew | vl | rm |
  489. + // -------------------------------------
  490. + return MI.getNumExplicitOperands() - RISCVII::hasVecPolicyOp(TSFlags) - 3;
  491. +}
  492. +
  493. +// This function inserts a write to vxrm when encountering an RVV fixed-point
  494. +// instruction.
  495. +bool RISCVInsertReadWriteCSR::emitWriteVXRM(MachineBasicBlock &MBB) {
  496. + bool Changed = false;
  497. + for (MachineInstr &MI : MBB) {
  498. + if (auto RoundModeIdx = getRoundModeIdx(MI)) {
  499. + unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm();
  500. + if (VXRMImm == PreVXRMValue)
  501. + continue;
  502. + PreVXRMValue = VXRMImm;
  503. + Changed = true;
  504. + BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm))
  505. + .addImm(VXRMImm);
  506. + MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false,
  507. + /*IsImp*/ true));
  508. + }
  509. + }
  510. + return Changed;
  511. +}
  512. +
  513. +bool RISCVInsertReadWriteCSR::runOnMachineFunction(MachineFunction &MF) {
  514. + // Skip if the vector extension is not enabled.
  515. + const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
  516. + if (!ST.hasVInstructions())
  517. + return false;
  518. +
  519. + TII = ST.getInstrInfo();
  520. +
  521. + bool Changed = false;
  522. +
  523. + for (MachineBasicBlock &MBB : MF)
  524. + Changed |= emitWriteVXRM(MBB);
  525. +
  526. + return Changed;
  527. +}
  528. +
  529. +FunctionPass *llvm::createRISCVInsertReadWriteCSRPass() {
  530. + return new RISCVInsertReadWriteCSR();
  531. +}
  532. diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
  533. index 3a494a5e3b58..f2cd4ed8857f 100644
  534. --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
  535. +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
  536. @@ -211,6 +211,9 @@ class RVInst<dag outs, dag ins, string opcodestr, string argstr,
  537. // in bits 63:31. Used by the SExtWRemoval pass.
  538. bit IsSignExtendingOpW = 0;
  539. let TSFlags{19} = IsSignExtendingOpW;
  540. +
  541. + bit HasRoundModeOp = 0;
  542. + let TSFlags{20} = HasRoundModeOp;
  543. }
  544. // Pseudo instructions
  545. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
  546. index c699a94943d8..75fee3f962d0 100644
  547. --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
  548. +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
  549. @@ -1693,6 +1693,8 @@ def WriteFRM : WriteSysReg<SysRegFRM, [FRM]>;
  550. def WriteFRMImm : WriteSysRegImm<SysRegFRM, [FRM]>;
  551. def SwapFRMImm : SwapSysRegImm<SysRegFRM, [FRM]>;
  552. +def WriteVXRMImm : WriteSysRegImm<SysRegVXRM, [VXRM]>;
  553. +
  554. let hasSideEffects = true in {
  555. def ReadFFLAGS : ReadSysReg<SysRegFFLAGS, [FFLAGS]>;
  556. def WriteFFLAGS : WriteSysReg<SysRegFFLAGS, [FFLAGS]>;
  557. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
  558. index 3f69b5e41cf1..ec7af31a23e2 100644
  559. --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
  560. +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
  561. @@ -1139,6 +1139,65 @@ class VPseudoBinaryNoMaskTU<VReg RetClass,
  562. let HasMergeOp = 1;
  563. }
  564. +class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
  565. + VReg Op1Class,
  566. + DAGOperand Op2Class,
  567. + string Constraint,
  568. + int DummyMask = 1> :
  569. + Pseudo<(outs RetClass:$rd),
  570. + (ins Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew), []>,
  571. + RISCVVPseudo {
  572. + let mayLoad = 0;
  573. + let mayStore = 0;
  574. + let hasSideEffects = 0;
  575. + let Constraints = Constraint;
  576. + let HasVLOp = 1;
  577. + let HasSEWOp = 1;
  578. + let HasDummyMask = DummyMask;
  579. + let HasRoundModeOp = 1;
  580. +}
  581. +
  582. +class VPseudoBinaryNoMaskTURoundingMode<VReg RetClass,
  583. + VReg Op1Class,
  584. + DAGOperand Op2Class,
  585. + string Constraint> :
  586. + Pseudo<(outs RetClass:$rd),
  587. + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
  588. + AVL:$vl, ixlenimm:$sew), []>,
  589. + RISCVVPseudo {
  590. + let mayLoad = 0;
  591. + let mayStore = 0;
  592. + let hasSideEffects = 0;
  593. + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
  594. + let HasVLOp = 1;
  595. + let HasSEWOp = 1;
  596. + let HasDummyMask = 1;
  597. + let HasMergeOp = 1;
  598. + let HasRoundModeOp = 1;
  599. +}
  600. +
  601. +class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
  602. + RegisterClass Op1Class,
  603. + DAGOperand Op2Class,
  604. + string Constraint> :
  605. + Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
  606. + (ins GetVRegNoV0<RetClass>.R:$merge,
  607. + Op1Class:$rs2, Op2Class:$rs1,
  608. + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
  609. + ixlenimm:$sew, ixlenimm:$policy), []>,
  610. + RISCVVPseudo {
  611. + let mayLoad = 0;
  612. + let mayStore = 0;
  613. + let hasSideEffects = 0;
  614. + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
  615. + let HasVLOp = 1;
  616. + let HasSEWOp = 1;
  617. + let HasMergeOp = 1;
  618. + let HasVecPolicyOp = 1;
  619. + let UsesMaskPolicy = 1;
  620. + let HasRoundModeOp = 1;
  621. +}
  622. +
  623. // Special version of VPseudoBinaryNoMask where we pretend the first source is
  624. // tied to the destination.
  625. // This allows maskedoff and rs2 to be the same register.
  626. @@ -1923,6 +1982,24 @@ multiclass VPseudoBinary<VReg RetClass,
  627. }
  628. }
  629. +multiclass VPseudoBinaryRoundingMode<VReg RetClass,
  630. + VReg Op1Class,
  631. + DAGOperand Op2Class,
  632. + LMULInfo MInfo,
  633. + string Constraint = ""> {
  634. + let VLMul = MInfo.value in {
  635. + def "_" # MInfo.MX :
  636. + VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class, Constraint>;
  637. + def "_" # MInfo.MX # "_TU" :
  638. + VPseudoBinaryNoMaskTURoundingMode<RetClass, Op1Class, Op2Class,
  639. + Constraint>;
  640. + def "_" # MInfo.MX # "_MASK" :
  641. + VPseudoBinaryMaskPolicyRoundingMode<RetClass, Op1Class, Op2Class,
  642. + Constraint>,
  643. + RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
  644. + }
  645. +}
  646. +
  647. multiclass VPseudoBinaryM<VReg RetClass,
  648. VReg Op1Class,
  649. DAGOperand Op2Class,
  650. @@ -1971,6 +2048,10 @@ multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = ""> {
  651. defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
  652. }
  653. +multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = ""> {
  654. + defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
  655. +}
  656. +
  657. // Similar to VPseudoBinaryV_VV, but uses MxListF.
  658. multiclass VPseudoBinaryFV_VV<LMULInfo m, string Constraint = ""> {
  659. defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
  660. @@ -2000,6 +2081,10 @@ multiclass VPseudoBinaryV_VX<LMULInfo m, string Constraint = ""> {
  661. defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
  662. }
  663. +multiclass VPseudoBinaryV_VX_RM<LMULInfo m, string Constraint = ""> {
  664. + defm "_VX" : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, GPR, m, Constraint>;
  665. +}
  666. +
  667. multiclass VPseudoVSLD1_VX<string Constraint = ""> {
  668. foreach m = MxList in {
  669. defvar mx = m.MX;
  670. @@ -2036,6 +2121,10 @@ multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, LMULInfo m, string Constra
  671. defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
  672. }
  673. +multiclass VPseudoBinaryV_VI_RM<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
  674. + defm _VI : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, ImmType, m, Constraint>;
  675. +}
  676. +
  677. multiclass VPseudoVALU_MM {
  678. foreach m = MxList in {
  679. defvar mx = m.MX;
  680. @@ -2098,16 +2187,31 @@ multiclass VPseudoBinaryV_WV<LMULInfo m> {
  681. !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
  682. }
  683. +multiclass VPseudoBinaryV_WV_RM<LMULInfo m> {
  684. + defm _WV : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, m.vrclass, m,
  685. + !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
  686. +}
  687. +
  688. multiclass VPseudoBinaryV_WX<LMULInfo m> {
  689. defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
  690. !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
  691. }
  692. +multiclass VPseudoBinaryV_WX_RM<LMULInfo m> {
  693. + defm _WX : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, GPR, m,
  694. + !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
  695. +}
  696. +
  697. multiclass VPseudoBinaryV_WI<LMULInfo m> {
  698. defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
  699. !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
  700. }
  701. +multiclass VPseudoBinaryV_WI_RM<LMULInfo m> {
  702. + defm _WI : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, uimm5, m,
  703. + !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
  704. +}
  705. +
  706. // For vadc and vsbc, the instruction encoding is reserved if the destination
  707. // vector register is v0.
  708. // For vadc and vsbc, CarryIn == 1 and CarryOut == 0
  709. @@ -2419,6 +2523,23 @@ multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""
  710. }
  711. }
  712. +multiclass VPseudoVSALU_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint = ""> {
  713. + foreach m = MxList in {
  714. + defvar mx = m.MX;
  715. + defvar WriteVSALUV_MX = !cast<SchedWrite>("WriteVSALUV_" # mx);
  716. + defvar WriteVSALUX_MX = !cast<SchedWrite>("WriteVSALUX_" # mx);
  717. + defvar WriteVSALUI_MX = !cast<SchedWrite>("WriteVSALUI_" # mx);
  718. + defvar ReadVSALUV_MX = !cast<SchedRead>("ReadVSALUV_" # mx);
  719. + defvar ReadVSALUX_MX = !cast<SchedRead>("ReadVSALUX_" # mx);
  720. +
  721. + defm "" : VPseudoBinaryV_VV_RM<m, Constraint>,
  722. + Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>;
  723. + defm "" : VPseudoBinaryV_VX_RM<m, Constraint>,
  724. + Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>;
  725. + defm "" : VPseudoBinaryV_VI_RM<ImmType, m, Constraint>,
  726. + Sched<[WriteVSALUI_MX, ReadVSALUV_MX, ReadVMask]>;
  727. + }
  728. +}
  729. multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
  730. foreach m = MxList in {
  731. @@ -2456,6 +2577,24 @@ multiclass VPseudoVSSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""
  732. }
  733. }
  734. +multiclass VPseudoVSSHT_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint = ""> {
  735. + foreach m = MxList in {
  736. + defvar mx = m.MX;
  737. + defvar WriteVSShiftV_MX = !cast<SchedWrite>("WriteVSShiftV_" # mx);
  738. + defvar WriteVSShiftX_MX = !cast<SchedWrite>("WriteVSShiftX_" # mx);
  739. + defvar WriteVSShiftI_MX = !cast<SchedWrite>("WriteVSShiftI_" # mx);
  740. + defvar ReadVSShiftV_MX = !cast<SchedRead>("ReadVSShiftV_" # mx);
  741. + defvar ReadVSShiftX_MX = !cast<SchedRead>("ReadVSShiftX_" # mx);
  742. +
  743. + defm "" : VPseudoBinaryV_VV_RM<m, Constraint>,
  744. + Sched<[WriteVSShiftV_MX, ReadVSShiftV_MX, ReadVSShiftV_MX, ReadVMask]>;
  745. + defm "" : VPseudoBinaryV_VX_RM<m, Constraint>,
  746. + Sched<[WriteVSShiftX_MX, ReadVSShiftV_MX, ReadVSShiftX_MX, ReadVMask]>;
  747. + defm "" : VPseudoBinaryV_VI_RM<ImmType, m, Constraint>,
  748. + Sched<[WriteVSShiftI_MX, ReadVSShiftV_MX, ReadVMask]>;
  749. + }
  750. +}
  751. +
  752. multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
  753. foreach m = MxList in {
  754. defvar mx = m.MX;
  755. @@ -2489,6 +2628,21 @@ multiclass VPseudoVSALU_VV_VX {
  756. }
  757. }
  758. +multiclass VPseudoVSALU_VV_VX_RM {
  759. + foreach m = MxList in {
  760. + defvar mx = m.MX;
  761. + defvar WriteVSALUV_MX = !cast<SchedWrite>("WriteVSALUV_" # mx);
  762. + defvar WriteVSALUX_MX = !cast<SchedWrite>("WriteVSALUX_" # mx);
  763. + defvar ReadVSALUV_MX = !cast<SchedRead>("ReadVSALUV_" # mx);
  764. + defvar ReadVSALUX_MX = !cast<SchedRead>("ReadVSALUX_" # mx);
  765. +
  766. + defm "" : VPseudoBinaryV_VV_RM<m>,
  767. + Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>;
  768. + defm "" : VPseudoBinaryV_VX_RM<m>,
  769. + Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>;
  770. + }
  771. +}
  772. +
  773. multiclass VPseudoVSMUL_VV_VX {
  774. foreach m = MxList in {
  775. defvar mx = m.MX;
  776. @@ -2504,6 +2658,21 @@ multiclass VPseudoVSMUL_VV_VX {
  777. }
  778. }
  779. +multiclass VPseudoVSMUL_VV_VX_RM {
  780. + foreach m = MxList in {
  781. + defvar mx = m.MX;
  782. + defvar WriteVSMulV_MX = !cast<SchedWrite>("WriteVSMulV_" # mx);
  783. + defvar WriteVSMulX_MX = !cast<SchedWrite>("WriteVSMulX_" # mx);
  784. + defvar ReadVSMulV_MX = !cast<SchedRead>("ReadVSMulV_" # mx);
  785. + defvar ReadVSMulX_MX = !cast<SchedRead>("ReadVSMulX_" # mx);
  786. +
  787. + defm "" : VPseudoBinaryV_VV_RM<m>,
  788. + Sched<[WriteVSMulV_MX, ReadVSMulV_MX, ReadVSMulV_MX, ReadVMask]>;
  789. + defm "" : VPseudoBinaryV_VX_RM<m>,
  790. + Sched<[WriteVSMulX_MX, ReadVSMulV_MX, ReadVSMulX_MX, ReadVMask]>;
  791. + }
  792. +}
  793. +
  794. multiclass VPseudoVAALU_VV_VX {
  795. foreach m = MxList in {
  796. defvar mx = m.MX;
  797. @@ -2519,6 +2688,21 @@ multiclass VPseudoVAALU_VV_VX {
  798. }
  799. }
  800. +multiclass VPseudoVAALU_VV_VX_RM {
  801. + foreach m = MxList in {
  802. + defvar mx = m.MX;
  803. + defvar WriteVAALUV_MX = !cast<SchedWrite>("WriteVAALUV_" # mx);
  804. + defvar WriteVAALUX_MX = !cast<SchedWrite>("WriteVAALUX_" # mx);
  805. + defvar ReadVAALUV_MX = !cast<SchedRead>("ReadVAALUV_" # mx);
  806. + defvar ReadVAALUX_MX = !cast<SchedRead>("ReadVAALUX_" # mx);
  807. +
  808. + defm "" : VPseudoBinaryV_VV_RM<m>,
  809. + Sched<[WriteVAALUV_MX, ReadVAALUV_MX, ReadVAALUV_MX, ReadVMask]>;
  810. + defm "" : VPseudoBinaryV_VX_RM<m>,
  811. + Sched<[WriteVAALUX_MX, ReadVAALUV_MX, ReadVAALUX_MX, ReadVMask]>;
  812. + }
  813. +}
  814. +
  815. multiclass VPseudoVMINMAX_VV_VX {
  816. foreach m = MxList in {
  817. defvar mx = m.MX;
  818. @@ -3002,6 +3186,24 @@ multiclass VPseudoVNCLP_WV_WX_WI {
  819. }
  820. }
  821. +multiclass VPseudoVNCLP_WV_WX_WI_RM {
  822. + foreach m = MxListW in {
  823. + defvar mx = m.MX;
  824. + defvar WriteVNClipV_MX = !cast<SchedWrite>("WriteVNClipV_" # mx);
  825. + defvar WriteVNClipX_MX = !cast<SchedWrite>("WriteVNClipX_" # mx);
  826. + defvar WriteVNClipI_MX = !cast<SchedWrite>("WriteVNClipI_" # mx);
  827. + defvar ReadVNClipV_MX = !cast<SchedRead>("ReadVNClipV_" # mx);
  828. + defvar ReadVNClipX_MX = !cast<SchedRead>("ReadVNClipX_" # mx);
  829. +
  830. + defm "" : VPseudoBinaryV_WV_RM<m>,
  831. + Sched<[WriteVNClipV_MX, ReadVNClipV_MX, ReadVNClipV_MX, ReadVMask]>;
  832. + defm "" : VPseudoBinaryV_WX_RM<m>,
  833. + Sched<[WriteVNClipX_MX, ReadVNClipV_MX, ReadVNClipX_MX, ReadVMask]>;
  834. + defm "" : VPseudoBinaryV_WI_RM<m>,
  835. + Sched<[WriteVNClipI_MX, ReadVNClipV_MX, ReadVMask]>;
  836. + }
  837. +}
  838. +
  839. multiclass VPseudoVNSHT_WV_WX_WI {
  840. foreach m = MxListW in {
  841. defvar mx = m.MX;
  842. @@ -3881,6 +4083,48 @@ class VPatBinaryNoMaskTU<string intrinsic_name,
  843. (op2_type op2_kind:$rs2),
  844. GPR:$vl, sew)>;
  845. +class VPatBinaryNoMaskTARoundingMode<string intrinsic_name,
  846. + string inst,
  847. + ValueType result_type,
  848. + ValueType op1_type,
  849. + ValueType op2_type,
  850. + int sew,
  851. + VReg op1_reg_class,
  852. + DAGOperand op2_kind> :
  853. + Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
  854. + (result_type (undef)),
  855. + (op1_type op1_reg_class:$rs1),
  856. + (op2_type op2_kind:$rs2),
  857. + (XLenVT timm:$round),
  858. + VLOpFrag)),
  859. + (!cast<Instruction>(inst)
  860. + (op1_type op1_reg_class:$rs1),
  861. + (op2_type op2_kind:$rs2),
  862. + (XLenVT timm:$round),
  863. + GPR:$vl, sew)>;
  864. +
  865. +class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
  866. + string inst,
  867. + ValueType result_type,
  868. + ValueType op1_type,
  869. + ValueType op2_type,
  870. + int sew,
  871. + VReg result_reg_class,
  872. + VReg op1_reg_class,
  873. + DAGOperand op2_kind> :
  874. + Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
  875. + (result_type result_reg_class:$merge),
  876. + (op1_type op1_reg_class:$rs1),
  877. + (op2_type op2_kind:$rs2),
  878. + (XLenVT timm:$round),
  879. + VLOpFrag)),
  880. + (!cast<Instruction>(inst#"_TU")
  881. + (result_type result_reg_class:$merge),
  882. + (op1_type op1_reg_class:$rs1),
  883. + (op2_type op2_kind:$rs2),
  884. + (XLenVT timm:$round),
  885. + GPR:$vl, sew)>;
  886. +
  887. // Same as above but source operands are swapped.
  888. class VPatBinaryNoMaskSwapped<string intrinsic_name,
  889. string inst,
  890. @@ -3943,6 +4187,31 @@ class VPatBinaryMaskTA<string intrinsic_name,
  891. (op2_type op2_kind:$rs2),
  892. (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
  893. +class VPatBinaryMaskTARoundingMode<string intrinsic_name,
  894. + string inst,
  895. + ValueType result_type,
  896. + ValueType op1_type,
  897. + ValueType op2_type,
  898. + ValueType mask_type,
  899. + int sew,
  900. + VReg result_reg_class,
  901. + VReg op1_reg_class,
  902. + DAGOperand op2_kind> :
  903. + Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
  904. + (result_type result_reg_class:$merge),
  905. + (op1_type op1_reg_class:$rs1),
  906. + (op2_type op2_kind:$rs2),
  907. + (mask_type V0),
  908. + (XLenVT timm:$round),
  909. + VLOpFrag, (XLenVT timm:$policy))),
  910. + (!cast<Instruction>(inst#"_MASK")
  911. + (result_type result_reg_class:$merge),
  912. + (op1_type op1_reg_class:$rs1),
  913. + (op2_type op2_kind:$rs2),
  914. + (mask_type V0),
  915. + (XLenVT timm:$round),
  916. + GPR:$vl, sew, (XLenVT timm:$policy))>;
  917. +
  918. // Same as above but source operands are swapped.
  919. class VPatBinaryMaskSwapped<string intrinsic_name,
  920. string inst,
  921. @@ -4261,6 +4530,26 @@ multiclass VPatBinaryTA<string intrinsic,
  922. op2_kind>;
  923. }
  924. +multiclass VPatBinaryTARoundingMode<string intrinsic,
  925. + string inst,
  926. + ValueType result_type,
  927. + ValueType op1_type,
  928. + ValueType op2_type,
  929. + ValueType mask_type,
  930. + int sew,
  931. + VReg result_reg_class,
  932. + VReg op1_reg_class,
  933. + DAGOperand op2_kind>
  934. +{
  935. + def : VPatBinaryNoMaskTARoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
  936. + sew, op1_reg_class, op2_kind>;
  937. + def : VPatBinaryNoMaskTURoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
  938. + sew, result_reg_class, op1_reg_class, op2_kind>;
  939. + def : VPatBinaryMaskTARoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
  940. + mask_type, sew, result_reg_class, op1_reg_class,
  941. + op2_kind>;
  942. +}
  943. +
  944. multiclass VPatBinarySwapped<string intrinsic,
  945. string inst,
  946. ValueType result_type,
  947. @@ -4387,6 +4676,15 @@ multiclass VPatBinaryV_VV<string intrinsic, string instruction,
  948. vti.RegClass, vti.RegClass>;
  949. }
  950. +multiclass VPatBinaryV_VVRoundingMode<string intrinsic, string instruction,
  951. + list<VTypeInfo> vtilist> {
  952. + foreach vti = vtilist in
  953. + defm : VPatBinaryTARoundingMode<intrinsic, instruction # "_VV_" # vti.LMul.MX,
  954. + vti.Vector, vti.Vector, vti.Vector,vti.Mask,
  955. + vti.Log2SEW, vti.RegClass,
  956. + vti.RegClass, vti.RegClass>;
  957. +}
  958. +
  959. multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
  960. list<VTypeInfo> vtilist> {
  961. foreach vti = vtilist in {
  962. @@ -4428,6 +4726,17 @@ multiclass VPatBinaryV_VX<string intrinsic, string instruction,
  963. }
  964. }
  965. +multiclass VPatBinaryV_VXRoundingMode<string intrinsic, string instruction,
  966. + list<VTypeInfo> vtilist> {
  967. + foreach vti = vtilist in {
  968. + defvar kind = "V"#vti.ScalarSuffix;
  969. + defm : VPatBinaryTARoundingMode<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
  970. + vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
  971. + vti.Log2SEW, vti.RegClass,
  972. + vti.RegClass, vti.ScalarRegClass>;
  973. + }
  974. +}
  975. +
  976. multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
  977. list<VTypeInfo> vtilist> {
  978. foreach vti = vtilist in
  979. @@ -4446,6 +4755,15 @@ multiclass VPatBinaryV_VI<string intrinsic, string instruction,
  980. vti.RegClass, imm_type>;
  981. }
  982. +multiclass VPatBinaryV_VIRoundingMode<string intrinsic, string instruction,
  983. + list<VTypeInfo> vtilist, Operand imm_type> {
  984. + foreach vti = vtilist in
  985. + defm : VPatBinaryTARoundingMode<intrinsic, instruction # "_VI_" # vti.LMul.MX,
  986. + vti.Vector, vti.Vector, XLenVT, vti.Mask,
  987. + vti.Log2SEW, vti.RegClass,
  988. + vti.RegClass, imm_type>;
  989. +}
  990. +
  991. multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
  992. foreach mti = AllMasks in
  993. def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
  994. @@ -4529,6 +4847,18 @@ multiclass VPatBinaryV_WV<string intrinsic, string instruction,
  995. }
  996. }
  997. +multiclass VPatBinaryV_WVRoundingMode<string intrinsic, string instruction,
  998. + list<VTypeInfoToWide> vtilist> {
  999. + foreach VtiToWti = vtilist in {
  1000. + defvar Vti = VtiToWti.Vti;
  1001. + defvar Wti = VtiToWti.Wti;
  1002. + defm : VPatBinaryTARoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
  1003. + Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
  1004. + Vti.Log2SEW, Vti.RegClass,
  1005. + Wti.RegClass, Vti.RegClass>;
  1006. + }
  1007. +}
  1008. +
  1009. multiclass VPatBinaryV_WX<string intrinsic, string instruction,
  1010. list<VTypeInfoToWide> vtilist> {
  1011. foreach VtiToWti = vtilist in {
  1012. @@ -4542,6 +4872,19 @@ multiclass VPatBinaryV_WX<string intrinsic, string instruction,
  1013. }
  1014. }
  1015. +multiclass VPatBinaryV_WXRoundingMode<string intrinsic, string instruction,
  1016. + list<VTypeInfoToWide> vtilist> {
  1017. + foreach VtiToWti = vtilist in {
  1018. + defvar Vti = VtiToWti.Vti;
  1019. + defvar Wti = VtiToWti.Wti;
  1020. + defvar kind = "W"#Vti.ScalarSuffix;
  1021. + defm : VPatBinaryTARoundingMode<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
  1022. + Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
  1023. + Vti.Log2SEW, Vti.RegClass,
  1024. + Wti.RegClass, Vti.ScalarRegClass>;
  1025. + }
  1026. +}
  1027. +
  1028. multiclass VPatBinaryV_WI<string intrinsic, string instruction,
  1029. list<VTypeInfoToWide> vtilist> {
  1030. foreach VtiToWti = vtilist in {
  1031. @@ -4554,6 +4897,18 @@ multiclass VPatBinaryV_WI<string intrinsic, string instruction,
  1032. }
  1033. }
  1034. +multiclass VPatBinaryV_WIRoundingMode<string intrinsic, string instruction,
  1035. + list<VTypeInfoToWide> vtilist> {
  1036. + foreach VtiToWti = vtilist in {
  1037. + defvar Vti = VtiToWti.Vti;
  1038. + defvar Wti = VtiToWti.Wti;
  1039. + defm : VPatBinaryTARoundingMode<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
  1040. + Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
  1041. + Vti.Log2SEW, Vti.RegClass,
  1042. + Wti.RegClass, uimm5>;
  1043. + }
  1044. +}
  1045. +
  1046. multiclass VPatBinaryV_VM<string intrinsic, string instruction,
  1047. bit CarryOut = 0,
  1048. list<VTypeInfo> vtilist = AllIntegerVectors> {
  1049. @@ -4688,11 +5043,22 @@ multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
  1050. VPatBinaryV_VX<intrinsic, instruction, vtilist>,
  1051. VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
  1052. +multiclass VPatBinaryV_VV_VX_VIRoundingMode<string intrinsic, string instruction,
  1053. + list<VTypeInfo> vtilist, Operand ImmType = simm5>
  1054. + : VPatBinaryV_VVRoundingMode<intrinsic, instruction, vtilist>,
  1055. + VPatBinaryV_VXRoundingMode<intrinsic, instruction, vtilist>,
  1056. + VPatBinaryV_VIRoundingMode<intrinsic, instruction, vtilist, ImmType>;
  1057. +
  1058. multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
  1059. list<VTypeInfo> vtilist>
  1060. : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
  1061. VPatBinaryV_VX<intrinsic, instruction, vtilist>;
  1062. +multiclass VPatBinaryV_VV_VXRoundingMode<string intrinsic, string instruction,
  1063. + list<VTypeInfo> vtilist>
  1064. + : VPatBinaryV_VVRoundingMode<intrinsic, instruction, vtilist>,
  1065. + VPatBinaryV_VXRoundingMode<intrinsic, instruction, vtilist>;
  1066. +
  1067. multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
  1068. list<VTypeInfo> vtilist>
  1069. : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
  1070. @@ -4714,6 +5080,12 @@ multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
  1071. VPatBinaryV_WX<intrinsic, instruction, vtilist>,
  1072. VPatBinaryV_WI<intrinsic, instruction, vtilist>;
  1073. +multiclass VPatBinaryV_WV_WX_WIRoundingMode<string intrinsic, string instruction,
  1074. + list<VTypeInfoToWide> vtilist>
  1075. + : VPatBinaryV_WVRoundingMode<intrinsic, instruction, vtilist>,
  1076. + VPatBinaryV_WXRoundingMode<intrinsic, instruction, vtilist>,
  1077. + VPatBinaryV_WIRoundingMode<intrinsic, instruction, vtilist>;
  1078. +
  1079. multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
  1080. : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
  1081. VPatBinaryV_XM_TAIL<intrinsic, instruction>,
  1082. @@ -5382,34 +5754,34 @@ let Defs = [VXSAT], hasSideEffects = 1 in {
  1083. //===----------------------------------------------------------------------===//
  1084. // 12.2. Vector Single-Width Averaging Add and Subtract
  1085. //===----------------------------------------------------------------------===//
  1086. -let Uses = [VXRM], hasSideEffects = 1 in {
  1087. - defm PseudoVAADDU : VPseudoVAALU_VV_VX;
  1088. - defm PseudoVAADD : VPseudoVAALU_VV_VX;
  1089. - defm PseudoVASUBU : VPseudoVAALU_VV_VX;
  1090. - defm PseudoVASUB : VPseudoVAALU_VV_VX;
  1091. +let hasSideEffects = 1 in {
  1092. + defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM;
  1093. + defm PseudoVAADD : VPseudoVAALU_VV_VX_RM;
  1094. + defm PseudoVASUBU : VPseudoVAALU_VV_VX_RM;
  1095. + defm PseudoVASUB : VPseudoVAALU_VV_VX_RM;
  1096. }
  1097. //===----------------------------------------------------------------------===//
  1098. // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
  1099. //===----------------------------------------------------------------------===//
  1100. -let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
  1101. - defm PseudoVSMUL : VPseudoVSMUL_VV_VX;
  1102. +let Defs = [VXSAT], hasSideEffects = 1 in {
  1103. + defm PseudoVSMUL : VPseudoVSMUL_VV_VX_RM;
  1104. }
  1105. //===----------------------------------------------------------------------===//
  1106. // 12.4. Vector Single-Width Scaling Shift Instructions
  1107. //===----------------------------------------------------------------------===//
  1108. -let Uses = [VXRM], hasSideEffects = 1 in {
  1109. - defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI<uimm5>;
  1110. - defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI<uimm5>;
  1111. +let hasSideEffects = 1 in {
  1112. + defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
  1113. + defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
  1114. }
  1115. //===----------------------------------------------------------------------===//
  1116. // 12.5. Vector Narrowing Fixed-Point Clip Instructions
  1117. //===----------------------------------------------------------------------===//
  1118. -let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
  1119. - defm PseudoVNCLIP : VPseudoVNCLP_WV_WX_WI;
  1120. - defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI;
  1121. +let Defs = [VXSAT], hasSideEffects = 1 in {
  1122. + defm PseudoVNCLIP : VPseudoVNCLP_WV_WX_WI_RM;
  1123. + defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI_RM;
  1124. }
  1125. } // Predicates = [HasVInstructions]
  1126. @@ -6040,30 +6412,36 @@ defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
  1127. //===----------------------------------------------------------------------===//
  1128. // 12.2. Vector Single-Width Averaging Add and Subtract
  1129. //===----------------------------------------------------------------------===//
  1130. -defm : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>;
  1131. -defm : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>;
  1132. -defm : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>;
  1133. -defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>;
  1134. +defm : VPatBinaryV_VV_VXRoundingMode<"int_riscv_vaaddu", "PseudoVAADDU",
  1135. + AllIntegerVectors>;
  1136. +defm : VPatBinaryV_VV_VXRoundingMode<"int_riscv_vasubu", "PseudoVASUBU",
  1137. + AllIntegerVectors>;
  1138. +defm : VPatBinaryV_VV_VXRoundingMode<"int_riscv_vasub", "PseudoVASUB",
  1139. + AllIntegerVectors>;
  1140. +defm : VPatBinaryV_VV_VXRoundingMode<"int_riscv_vaadd", "PseudoVAADD",
  1141. + AllIntegerVectors>;
  1142. //===----------------------------------------------------------------------===//
  1143. // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
  1144. //===----------------------------------------------------------------------===//
  1145. -defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>;
  1146. +defm : VPatBinaryV_VV_VXRoundingMode<"int_riscv_vsmul", "PseudoVSMUL",
  1147. + AllIntegerVectors>;
  1148. //===----------------------------------------------------------------------===//
  1149. // 12.4. Vector Single-Width Scaling Shift Instructions
  1150. //===----------------------------------------------------------------------===//
  1151. -defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
  1152. - uimm5>;
  1153. -defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
  1154. - uimm5>;
  1155. +defm : VPatBinaryV_VV_VX_VIRoundingMode<"int_riscv_vssrl", "PseudoVSSRL",
  1156. + AllIntegerVectors, uimm5>;
  1157. +defm : VPatBinaryV_VV_VX_VIRoundingMode<"int_riscv_vssra", "PseudoVSSRA",
  1158. + AllIntegerVectors, uimm5>;
  1159. //===----------------------------------------------------------------------===//
  1160. // 12.5. Vector Narrowing Fixed-Point Clip Instructions
  1161. //===----------------------------------------------------------------------===//
  1162. -defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
  1163. -defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
  1164. -
  1165. +defm : VPatBinaryV_WV_WX_WIRoundingMode<"int_riscv_vnclipu", "PseudoVNCLIPU",
  1166. + AllWidenableIntVectors>;
  1167. +defm : VPatBinaryV_WV_WX_WIRoundingMode<"int_riscv_vnclip", "PseudoVNCLIP",
  1168. + AllWidenableIntVectors>;
  1169. } // Predicates = [HasVInstructions]
  1170. //===----------------------------------------------------------------------===//
  1171. diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
  1172. index 281918259cdb..7455df64c38a 100644
  1173. --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
  1174. +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
  1175. @@ -159,6 +159,8 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
  1176. --NumOps;
  1177. if (RISCVII::hasVLOp(TSFlags))
  1178. --NumOps;
  1179. + if (RISCVII::hasRoundModeOp(TSFlags))
  1180. + --NumOps;
  1181. if (RISCVII::hasSEWOp(TSFlags))
  1182. --NumOps;
  1183. diff --git a/llvm/lib/Target/RISCV/RISCVSystemOperands.td b/llvm/lib/Target/RISCV/RISCVSystemOperands.td
  1184. index b9aa25b321b0..ced58fbb2a6d 100644
  1185. --- a/llvm/lib/Target/RISCV/RISCVSystemOperands.td
  1186. +++ b/llvm/lib/Target/RISCV/RISCVSystemOperands.td
  1187. @@ -350,7 +350,7 @@ def : SysReg<"dscratch1", 0x7B3>;
  1188. //===----------------------------------------------------------------------===//
  1189. def : SysReg<"vstart", 0x008>;
  1190. def : SysReg<"vxsat", 0x009>;
  1191. -def : SysReg<"vxrm", 0x00A>;
  1192. +def SysRegVXRM : SysReg<"vxrm", 0x00A>;
  1193. def : SysReg<"vcsr", 0x00F>;
  1194. def : SysReg<"vl", 0xC20>;
  1195. def : SysReg<"vtype", 0xC21>;
  1196. diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
  1197. index cc881406666c..28908e88f39f 100644
  1198. --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
  1199. +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
  1200. @@ -81,6 +81,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
  1201. initializeRISCVPreRAExpandPseudoPass(*PR);
  1202. initializeRISCVExpandPseudoPass(*PR);
  1203. initializeRISCVInsertVSETVLIPass(*PR);
  1204. + initializeRISCVInsertReadWriteCSRPass(*PR);
  1205. initializeRISCVDAGToDAGISelPass(*PR);
  1206. }
  1207. @@ -354,6 +355,7 @@ void RISCVPassConfig::addPreRegAlloc() {
  1208. if (TM->getOptLevel() != CodeGenOpt::None)
  1209. addPass(createRISCVMergeBaseOffsetOptPass());
  1210. addPass(createRISCVInsertVSETVLIPass());
  1211. + addPass(createRISCVInsertReadWriteCSRPass());
  1212. }
  1213. void RISCVPassConfig::addPostRegAlloc() {
  1214. --
  1215. 2.25.1