emit_riscv.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665
  1. /*
  2. * Basic macros to emit RISC-V RV64IM instructions and some utils
  3. * Copyright (C) 2019 kub
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. #define HOST_REGS 32
  9. // RISC-V ABI: params: x10-x17, return: x10-x11, temp: x1(ra),x5-x7,x28-x31
  10. // saved: x8(fp),x9,x18-x27, reserved: x0(zero), x4(tp), x3(gp), x2(sp)
  11. // x28-x31(t3-t6) are used internally by the code emitter
  12. #define RET_REG 10 // a0
  13. #define PARAM_REGS { 10, 11, 12, 13, 14, 15, 16, 17 } // a0-a7
  14. #define PRESERVED_REGS { 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 } // s1-s11
  15. #define TEMPORARY_REGS { 5, 6, 7 } // t0-t2
  16. #define CONTEXT_REG 9 // s1
  17. #define STATIC_SH2_REGS { SHR_SR,27 , SHR_R0,26 , SHR_R0+1,25 }
  18. // registers usable for user code: r1-r25, others reserved or special
  19. #define Z0 0 // zero register
  20. #define GP 3 // global pointer
  21. #define SP 2 // stack pointer
  22. #define FP 8 // frame pointer
  23. #define LR 1 // link register
  24. // internally used by code emitter:
  25. #define AT 31 // used to hold intermediate results
  26. #define FNZ 30 // emulated processor flags: N (bit 31) ,Z (all bits)
  27. #define FC 29 // emulated processor flags: C (bit 0), others 0
  28. #define FV 28 // emulated processor flags: Nt^Ns (bit 31). others x
  29. // All operations but ptr ops are using the lower 32 bits of the registers.
  30. // The upper 32 bits always contain the sign extension from the lower 32 bits.
  31. // unified conditions; virtual, not corresponding to anything real on RISC-V
  32. #define DCOND_EQ 0x0
  33. #define DCOND_NE 0x1
  34. #define DCOND_HS 0x2
  35. #define DCOND_LO 0x3
  36. #define DCOND_MI 0x4
  37. #define DCOND_PL 0x5
  38. #define DCOND_VS 0x6
  39. #define DCOND_VC 0x7
  40. #define DCOND_HI 0x8
  41. #define DCOND_LS 0x9
  42. #define DCOND_GE 0xa
  43. #define DCOND_LT 0xb
  44. #define DCOND_GT 0xc
  45. #define DCOND_LE 0xd
  46. #define DCOND_CS DCOND_LO
  47. #define DCOND_CC DCOND_HS
  48. // unified insn
  49. #define R5_INSN(b25, b20, b15, b12, b7, op) \
  50. (((b25)<<25)|((b20)<<20)|((b15)<<15)|((b12)<<12)|((b7)<<7)|((op)<<0))
  51. #define _ 0 //marker for "field unused"
  52. #define _CB(v,l,s,d) ((((v)>>(s))&((1<<(l))-1))<<(d)) // copy l bits
  53. #define R5_R_INSN(op, f1, f2, rd, rs, rt) \
  54. R5_INSN(f2, rt, rs, f1, rd, op)
  55. #define R5_I_INSN(op, f1, rd, rs, imm) \
  56. R5_INSN(_, _CB(imm,12,0,0), rs, f1, rd, op)
  57. #define R5_S_INSN(op, f1, rt, rs, imm) \
  58. R5_INSN(_CB(imm,7,5,0), rt, rs, f1, _CB(imm,5,0,0), op)
  59. #define R5_U_INSN(op, rd, imm) \
  60. R5_INSN(_,_,_, _CB(imm,20,12,0), rd, op)
  61. // oy vey... R5 immediate encoding in branches is really unwieldy :-/
  62. #define R5_B_INSN(op, f1, rt, rs, imm) \
  63. R5_INSN(_CB(imm,1,12,6)|_CB(imm,6,5,0), rt, rs, f1, \
  64. _CB(imm,4,1,1)|_CB(imm,1,11,0), op)
  65. #define R5_J_INSN(op, rd, imm) \
  66. R5_INSN(_CB(imm,1,20,6)|_CB(imm,6,5,0), _CB(imm,4,1,1)|_CB(imm,1,11,0),\
  67. _CB(imm,8,12,0), rd, op)
  68. // opcode
  69. enum { OP_LUI=0x37, OP_AUIPC=0x17, OP_JAL=0x6f, // 20-bit immediate
  70. OP_JALR=0x67, OP_BCOND=0x63, OP_LD=0x03, OP_ST=0x23, // 12-bit immediate
  71. OP_IMM=0x13, OP_REG=0x33, OP_IMM32=0x1b, OP_REG32=0x3b };
  72. // func3
  73. enum { F1_ADD, F1_SL, F1_SLT, F1_SLTU, F1_XOR, F1_SR, F1_OR, F1_AND };// IMM/REG
  74. enum { F1_MUL, F1_MULH, F1_MULHSU, F1_MULHU, F1_DIV, F1_DIVU, F1_REM, F1_REMU };
  75. enum { F1_BEQ, F1_BNE, F1_BLT=4, F1_BGE, F1_BLTU, F1_BGEU }; // BCOND
  76. enum { F1_B, F1_H, F1_W, F1_D, F1_BU, F1_HU, F1_WU }; // LD/ST
  77. // func7
  78. enum { F2_ALT=0x20, F2_MULDIV=0x01 };
  79. #define __(n) o##n // enum marker for "undefined"
  80. #define R5_NOP R5_I_INSN(OP_IMM, F1_ADD, Z0, Z0, 0) // nop: ADDI r0, r0, #0
  81. // arithmetic/logical
  82. // rd = rs OP rt
  83. #define R5_ADD_REG(rd, rs, rt) \
  84. R5_R_INSN(OP_REG, F1_ADD, _, rd, rs, rt)
  85. #define R5_SUB_REG(rd, rs, rt) \
  86. R5_R_INSN(OP_REG, F1_ADD, F2_ALT, rd, rs, rt)
  87. #define R5_NEG_REG(rd, rt) \
  88. R5_SUB_REG(rd, Z0, rt)
  89. #define R5_XOR_REG(rd, rs, rt) \
  90. R5_R_INSN(OP_REG, F1_XOR, _, rd, rs, rt)
  91. #define R5_OR_REG(rd, rs, rt) \
  92. R5_R_INSN(OP_REG, F1_OR , _, rd, rs, rt)
  93. #define R5_AND_REG(rd, rs, rt) \
  94. R5_R_INSN(OP_REG, F1_AND, _, rd, rs, rt)
  95. // rd = rs SHIFT rt
  96. #define R5_LSL_REG(rd, rs, rt) \
  97. R5_R_INSN(OP_REG, F1_SL , _, rd, rs, rt)
  98. #define R5_LSR_REG(rd, rs, rt) \
  99. R5_R_INSN(OP_REG, F1_SR , _, rd, rs, rt)
  100. #define R5_ASR_REG(rd, rs, rt) \
  101. R5_R_INSN(OP_REG, F1_SR , F2_ALT, rd, rs, rt)
  102. // rd = (rs < rt)
  103. #define R5_SLT_REG(rd, rs, rt) \
  104. R5_R_INSN(OP_REG, F1_SLT, _, rd, rs, rt)
  105. #define R5_SLTU_REG(rd, rs, rt) \
  106. R5_R_INSN(OP_REG, F1_SLTU,_, rd, rs, rt)
  107. // rd = rs OP imm12
  108. #define R5_ADD_IMM(rd, rs, imm12) \
  109. R5_I_INSN(OP_IMM, F1_ADD , rd, rs, imm12)
  110. #define R5_XOR_IMM(rd, rs, imm12) \
  111. R5_I_INSN(OP_IMM, F1_XOR , rd, rs, imm12)
  112. #define R5_OR_IMM(rd, rs, imm12) \
  113. R5_I_INSN(OP_IMM, F1_OR , rd, rs, imm12)
  114. #define R5_AND_IMM(rd, rs, imm12) \
  115. R5_I_INSN(OP_IMM, F1_AND , rd, rs, imm12)
  116. #define R5_MOV_REG(rd, rs) \
  117. R5_ADD_IMM(rd, rs, 0)
  118. #define R5_MVN_REG(rd, rs) \
  119. R5_XOR_IMM(rd, rs, -1)
  120. // rd = (imm12 << (0|12))
  121. #define R5_MOV_IMM(rd, imm12) \
  122. R5_OR_IMM(rd, Z0, imm12)
  123. #define R5_MOVT_IMM(rd, imm20) \
  124. R5_U_INSN(OP_LUI, rd, imm20)
  125. #define R5_MOVA_IMM(rd, imm20) \
  126. R5_U_INSN(OP_AUIPC, rd, imm20)
  127. // rd = rs SHIFT imm5/imm6
  128. #define R5_LSL_IMM(rd, rs, bits) \
  129. R5_R_INSN(OP_IMM, F1_SL , _, rd, rs, bits)
  130. #define R5_LSR_IMM(rd, rs, bits) \
  131. R5_R_INSN(OP_IMM, F1_SR , _, rd, rs, bits)
  132. #define R5_ASR_IMM(rd, rs, bits) \
  133. R5_R_INSN(OP_IMM, F1_SR , F2_ALT, rd, rs, bits)
  134. // rd = (rs < imm12)
  135. #define R5_SLT_IMM(rd, rs, imm12) \
  136. R5_I_INSN(OP_IMM, F1_SLT , rd, rs, imm12)
  137. #define R5_SLTU_IMM(rd, rs, imm12) \
  138. R5_I_INSN(OP_IMM, F1_SLTU, rd, rs, imm12)
  139. // multiplication
  140. #define R5_MULHU(rd, rs, rt) \
  141. R5_R_INSN(OP_REG, F1_MULHU, F2_MULDIV, rd, rs, rt)
  142. #define R5_MULHS(rd, rs, rt) \
  143. R5_R_INSN(OP_REG, F1_MULH, F2_MULDIV, rd, rs, rt)
  144. #define R5_MUL(rd, rs, rt) \
  145. R5_R_INSN(OP_REG, F1_MUL, F2_MULDIV, rd, rs, rt)
  146. // branching
  147. #define R5_J(imm20) \
  148. R5_J_INSN(OP_JAL, Z0, imm20)
  149. #define R5_JAL(rd, imm20) \
  150. R5_J_INSN(OP_JAL, rd, imm20)
  151. #define R5_JR(rs, offs12) \
  152. R5_I_INSN(OP_JALR, _, Z0, rs, offs12)
  153. #define R5_JALR(rd, rs, offs12) \
  154. R5_I_INSN(OP_JALR, _, rd, rs, offs12)
  155. // conditional branches; no condition code, these compare rs against rt
  156. #define R5_BCOND(cond, rs, rt, offs13) \
  157. R5_B_INSN(OP_BCOND, cond, rt, rs, offs13)
  158. #define R5_BCONDZ(cond, rs, offs13) \
  159. R5_B_INSN(OP_BCOND, cond, Z0, rs, offs13)
  160. #define R5_B(offs13) \
  161. R5_BCOND(F1_BEQ, Z0, Z0, offs13)
  162. // load/store indexed base
  163. #define R5_LW(rd, rs, offs12) \
  164. R5_I_INSN(OP_LD, F1_W, rd, rs, offs12)
  165. #define R5_LH(rd, rs, offs12) \
  166. R5_I_INSN(OP_LD, F1_H, rd, rs, offs12)
  167. #define R5_LB(rd, rs, offs12) \
  168. R5_I_INSN(OP_LD, F1_B, rd, rs, offs12)
  169. #define R5_LHU(rd, rs, offs12) \
  170. R5_I_INSN(OP_LD, F1_HU, rd, rs, offs12)
  171. #define R5_LBU(rd, rs, offs12) \
  172. R5_I_INSN(OP_LD, F1_BU, rd, rs, offs12)
  173. #define R5_SW(rt, rs, offs12) \
  174. R5_S_INSN(OP_ST, F1_W, rt, rs, offs12)
  175. #define R5_SH(rt, rs, offs12) \
  176. R5_S_INSN(OP_ST, F1_H, rt, rs, offs12)
  177. #define R5_SB(rt, rs, offs12) \
  178. R5_S_INSN(OP_ST, F1_B, rt, rs, offs12)
  179. // pointer operations
  180. #if __riscv_xlen == 64
  181. #define R5_OP32 (OP_REG32 ^ OP_REG)
  182. #define F1_P F1_D
  183. #define PTR_SCALE 3
  184. // NB: must split 64 bit result into 2 32 bit registers
  185. // NB: expects 32 bit values in s1+s2, correctly sign extended to 64 bits
  186. #define EMIT_R5_MULLU_REG(dlo, dhi, s1, s2) do { \
  187. EMIT(R5_MUL(dlo, s1, s2)); \
  188. EMIT(R5_ASR_IMM(dhi, dlo, 32)); \
  189. EMIT(R5_ADDW_IMM(dlo, dlo, 0)); \
  190. } while (0)
  191. #define EMIT_R5_MULLS_REG(dlo, dhi, s1, s2) \
  192. EMIT_R5_MULLU_REG(dlo, dhi, s1, s2)
  193. #else
  194. #define R5_OP32 0
  195. #define F1_P F1_W
  196. #define PTR_SCALE 2
  197. #define EMIT_R5_MULLU_REG(dlo, dhi, s1, s2) do { \
  198. int at = (dhi == s1 || dhi == s2 ? AT : dhi); \
  199. EMIT(R5_MULHU(at, s1, s2)); \
  200. EMIT(R5_MUL(dlo, s1, s2)); \
  201. if (at != dhi) emith_move_r_r(dhi, at); \
  202. } while (0)
  203. #define EMIT_R5_MULLS_REG(dlo, dhi, s1, s2) do { \
  204. int at = (dhi == s1 || dhi == s2 ? AT : dhi); \
  205. EMIT(R5_MULHS(at, s1, s2)); \
  206. EMIT(R5_MUL(dlo, s1, s2)); \
  207. if (at != dhi) emith_move_r_r(dhi, at); \
  208. } while (0)
  209. #endif
  210. #define R5_ADDW_REG(rd, rs, rt) (R5_ADD_REG(rd, rs, rt)^R5_OP32)
  211. #define R5_SUBW_REG(rd, rs, rt) (R5_SUB_REG(rd, rs, rt)^R5_OP32)
  212. #define R5_LSLW_REG(rd, rs, rt) (R5_LSL_REG(rd, rs, rt)^R5_OP32)
  213. #define R5_LSRW_REG(rd, rs, rt) (R5_LSR_REG(rd, rs, rt)^R5_OP32)
  214. #define R5_ASRW_REG(rd, rs, rt) (R5_ASR_REG(rd, rs, rt)^R5_OP32)
  215. #define R5_NEGW_REG(rd, rt) (R5_NEG_REG(rd, rt) ^R5_OP32)
  216. #define R5_MULW(rd, rs, rt) (R5_MUL(rd, rs, rt) ^R5_OP32)
  217. #define R5_ADDW_IMM(rd, rs, imm) (R5_ADD_IMM(rd, rs, imm) ^R5_OP32)
  218. #define R5_LSLW_IMM(rd, rs, bits) (R5_LSL_IMM(rd, rs, bits)^R5_OP32)
  219. #define R5_LSRW_IMM(rd, rs, bits) (R5_LSR_IMM(rd, rs, bits)^R5_OP32)
  220. #define R5_ASRW_IMM(rd, rs, bits) (R5_ASR_IMM(rd, rs, bits)^R5_OP32)
  221. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  222. #define EMIT_PTR(ptr, x) \
  223. do { \
  224. *(u32 *)(ptr) = x; \
  225. ptr = (void *)((u8 *)(ptr) + sizeof(u32)); \
  226. } while (0)
  227. #define EMIT(op) \
  228. do { \
  229. EMIT_PTR(tcache_ptr, op); \
  230. COUNT_OP; \
  231. } while (0)
  232. // if-then-else conditional execution helpers
  233. #define JMP_POS(ptr) { \
  234. ptr = tcache_ptr; \
  235. EMIT(R5_B(0)); \
  236. }
  237. #define JMP_EMIT(cond, ptr) { \
  238. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
  239. EMIT_PTR(ptr, R5_BCOND(cond_m, cond_r, cond_s, val_ & 0x00001fff)); \
  240. }
  241. #define JMP_EMIT_NC(ptr) { \
  242. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
  243. EMIT_PTR(ptr, R5_B(val_ & 0x00001fff)); \
  244. }
  245. #define EMITH_JMP_START(cond) { \
  246. int cond_r, cond_s, cond_m = emith_cond_check(cond, &cond_r, &cond_s); \
  247. u8 *cond_ptr; \
  248. JMP_POS(cond_ptr)
  249. #define EMITH_JMP_END(cond) \
  250. JMP_EMIT(cond, cond_ptr); \
  251. }
  252. #define EMITH_JMP3_START(cond) { \
  253. int cond_r, cond_s, cond_m = emith_cond_check(cond, &cond_r, &cond_s); \
  254. u8 *cond_ptr, *else_ptr; \
  255. JMP_POS(cond_ptr)
  256. #define EMITH_JMP3_MID(cond) \
  257. JMP_POS(else_ptr); \
  258. JMP_EMIT(cond, cond_ptr);
  259. #define EMITH_JMP3_END() \
  260. JMP_EMIT_NC(else_ptr); \
  261. }
  262. // "simple" jump (no more than a few insns)
  263. // ARM32 will use conditional instructions here
  264. #define EMITH_SJMP_START EMITH_JMP_START
  265. #define EMITH_SJMP_END EMITH_JMP_END
  266. #define EMITH_SJMP3_START EMITH_JMP3_START
  267. #define EMITH_SJMP3_MID EMITH_JMP3_MID
  268. #define EMITH_SJMP3_END EMITH_JMP3_END
  269. #define EMITH_SJMP2_START(cond) \
  270. EMITH_SJMP3_START(cond)
  271. #define EMITH_SJMP2_MID(cond) \
  272. EMITH_SJMP3_MID(cond)
  273. #define EMITH_SJMP2_END(cond) \
  274. EMITH_SJMP3_END()
  275. // flag register emulation. this is modelled after arm/x86.
  276. // the FNZ register stores the result of the last flag setting operation for
  277. // N and Z flag, used for EQ,NE,MI,PL branches.
  278. // the FC register stores the C flag (used for HI,HS,LO,LS,CC,CS).
  279. // the FV register stores information for V flag calculation (used for
  280. // GT,GE,LT,LE,VC,VS). V flag is costly and only fully calculated when needed.
  281. // the core registers may be temp registers, since the condition after calls
  282. // is undefined anyway.
  283. // flag emulation creates 2 (ie cmp #0/beq) up to 9 (ie adcf/ble) extra insns.
  284. // flag handling shortcuts may reduce this by 1-4 insns, see emith_cond_check()
  285. static int emith_cmp_rs, emith_cmp_rt; // registers used in cmp_r_r/cmp_r_imm
  286. static s32 emith_cmp_imm; // immediate value used in cmp_r_imm
  287. enum { _FHC=1, _FHV=2 } emith_flg_hint; // C/V flag usage hinted by compiler
  288. static int emith_flg_noV; // V flag known not to be set
  289. #define EMITH_HINT_COND(cond) do { \
  290. /* only need to check cond>>1 since the lowest bit inverts the cond */ \
  291. unsigned _mv = BITMASK3(DCOND_VS>>1,DCOND_GE>>1,DCOND_GT>>1); \
  292. unsigned _mc = _mv | BITMASK2(DCOND_HS>>1,DCOND_HI>>1); \
  293. emith_flg_hint = (_mv & BITMASK1(cond >> 1) ? _FHV : 0); \
  294. emith_flg_hint |= (_mc & BITMASK1(cond >> 1) ? _FHC : 0); \
  295. } while (0)
  296. // store minimal cc information: rd, rt^rs, carry
  297. // NB: the result *must* first go to FNZ, in case rd == rs or rd == rt.
  298. // NB: for adcf and sbcf, carry-in must be dealt with separately (see there)
  299. static void emith_set_arith_flags(int rd, int rs, int rt, s32 imm, int sub)
  300. {
  301. if (emith_flg_hint & _FHC) {
  302. if (sub) // C = sub:rt<rd, add:rd<rt
  303. EMIT(R5_SLTU_REG(FC, rs, FNZ));
  304. else EMIT(R5_SLTU_REG(FC, FNZ, rs));// C in FC, bit 0
  305. }
  306. if (emith_flg_hint & _FHV) {
  307. emith_flg_noV = 0;
  308. if (rt > Z0) // Nt^Ns in FV, bit 31
  309. EMIT(R5_XOR_REG(FV, rs, rt));
  310. else if (rt == Z0 || imm == 0)
  311. emith_flg_noV = 1; // imm #0 can't overflow
  312. else if ((imm < 0) == !sub)
  313. EMIT(R5_XOR_IMM(FV, rs, -1));
  314. else if ((imm > 0) == !sub)
  315. EMIT(R5_XOR_REG(FV, rs, Z0));
  316. }
  317. // full V = Nd^Nt^Ns^C calculation is deferred until really needed
  318. if (rd && rd != FNZ)
  319. EMIT(R5_MOV_REG(rd, FNZ)); // N,Z via result value in FNZ
  320. emith_cmp_rs = emith_cmp_rt = -1;
  321. }
  322. // since R5 has less-than and compare-branch insns, handle cmp separately by
  323. // storing the involved regs for later use in one of those R5 insns.
  324. // This works for all conditions but VC/VS, but this is fortunately never used.
  325. static void emith_set_compare_flags(int rs, int rt, s32 imm)
  326. {
  327. emith_cmp_rt = rt;
  328. emith_cmp_rs = rs;
  329. emith_cmp_imm = imm;
  330. }
  331. // data processing, register
  332. #define emith_move_r_r_ptr(d, s) \
  333. EMIT(R5_MOV_REG(d, s))
  334. #define emith_move_r_r_ptr_c(cond, d, s) \
  335. emith_move_r_r_ptr(d, s)
  336. #define emith_move_r_r(d, s) \
  337. emith_move_r_r_ptr(d, s)
  338. #define emith_move_r_r_c(cond, d, s) \
  339. emith_move_r_r(d, s)
  340. #define emith_mvn_r_r(d, s) \
  341. EMIT(R5_MVN_REG(d, s))
  342. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  343. if (simm) { \
  344. EMIT(R5_LSL_IMM(AT, s2, simm)); \
  345. EMIT(R5_ADD_REG(d, s1, AT)); \
  346. } else EMIT(R5_ADD_REG(d, s1, s2)); \
  347. } while (0)
  348. #define emith_add_r_r_r_lsl(d, s1, s2, simm) do { \
  349. if (simm) { \
  350. EMIT(R5_LSLW_IMM(AT, s2, simm)); \
  351. EMIT(R5_ADDW_REG(d, s1, AT)); \
  352. } else EMIT(R5_ADDW_REG(d, s1, s2)); \
  353. } while (0)
  354. #define emith_add_r_r_r_lsr(d, s1, s2, simm) do { \
  355. if (simm) { \
  356. EMIT(R5_LSRW_IMM(AT, s2, simm)); \
  357. EMIT(R5_ADDW_REG(d, s1, AT)); \
  358. } else EMIT(R5_ADDW_REG(d, s1, s2)); \
  359. } while (0)
  360. #define emith_addf_r_r_r_lsl(d, s1, s2, simm) do { \
  361. if (simm) { \
  362. EMIT(R5_LSLW_IMM(AT, s2, simm)); \
  363. EMIT(R5_ADDW_REG(FNZ, s1, AT)); \
  364. emith_set_arith_flags(d, s1, AT, 0, 0); \
  365. } else { \
  366. EMIT(R5_ADDW_REG(FNZ, s1, s2)); \
  367. emith_set_arith_flags(d, s1, s2, 0, 0); \
  368. } \
  369. } while (0)
  370. #define emith_addf_r_r_r_lsr(d, s1, s2, simm) do { \
  371. if (simm) { \
  372. EMIT(R5_LSRW_IMM(AT, s2, simm)); \
  373. EMIT(R5_ADDW_REG(FNZ, s1, AT)); \
  374. emith_set_arith_flags(d, s1, AT, 0, 0); \
  375. } else { \
  376. EMIT(R5_ADDW_REG(FNZ, s1, s2)); \
  377. emith_set_arith_flags(d, s1, s2, 0, 0); \
  378. } \
  379. } while (0)
  380. #define emith_sub_r_r_r_lsl(d, s1, s2, simm) do { \
  381. if (simm) { \
  382. EMIT(R5_LSLW_IMM(AT, s2, simm)); \
  383. EMIT(R5_SUBW_REG(d, s1, AT)); \
  384. } else EMIT(R5_SUBW_REG(d, s1, s2)); \
  385. } while (0)
  386. #define emith_subf_r_r_r_lsl(d, s1, s2, simm) do { \
  387. if (simm) { \
  388. EMIT(R5_LSLW_IMM(AT, s2, simm)); \
  389. EMIT(R5_SUBW_REG(FNZ, s1, AT)); \
  390. emith_set_arith_flags(d, s1, AT, 0, 1); \
  391. } else { \
  392. EMIT(R5_SUBW_REG(FNZ, s1, s2)); \
  393. emith_set_arith_flags(d, s1, s2, 0, 1); \
  394. } \
  395. } while (0)
  396. #define emith_or_r_r_r_lsl(d, s1, s2, simm) do { \
  397. if (simm) { \
  398. EMIT(R5_LSLW_IMM(AT, s2, simm)); \
  399. EMIT(R5_OR_REG(d, s1, AT)); \
  400. } else EMIT(R5_OR_REG(d, s1, s2)); \
  401. } while (0)
  402. #define emith_or_r_r_r_lsr(d, s1, s2, simm) do { \
  403. if (simm) { \
  404. EMIT(R5_LSRW_IMM(AT, s2, simm)); \
  405. EMIT(R5_OR_REG(d, s1, AT)); \
  406. } else EMIT(R5_OR_REG(d, s1, s2)); \
  407. } while (0)
  408. #define emith_eor_r_r_r_lsl(d, s1, s2, simm) do { \
  409. if (simm) { \
  410. EMIT(R5_LSLW_IMM(AT, s2, simm)); \
  411. EMIT(R5_XOR_REG(d, s1, AT)); \
  412. } else EMIT(R5_XOR_REG(d, s1, s2)); \
  413. } while (0)
  414. #define emith_eor_r_r_r_lsr(d, s1, s2, simm) do { \
  415. if (simm) { \
  416. EMIT(R5_LSRW_IMM(AT, s2, simm)); \
  417. EMIT(R5_XOR_REG(d, s1, AT)); \
  418. } else EMIT(R5_XOR_REG(d, s1, s2)); \
  419. } while (0)
  420. #define emith_and_r_r_r_lsl(d, s1, s2, simm) do { \
  421. if (simm) { \
  422. EMIT(R5_LSLW_IMM(AT, s2, simm)); \
  423. EMIT(R5_AND_REG(d, s1, AT)); \
  424. } else EMIT(R5_AND_REG(d, s1, s2)); \
  425. } while (0)
  426. #define emith_or_r_r_lsl(d, s, lslimm) \
  427. emith_or_r_r_r_lsl(d, d, s, lslimm)
  428. #define emith_or_r_r_lsr(d, s, lsrimm) \
  429. emith_or_r_r_r_lsr(d, d, s, lsrimm)
  430. #define emith_eor_r_r_lsl(d, s, lslimm) \
  431. emith_eor_r_r_r_lsl(d, d, s, lslimm)
  432. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  433. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  434. #define emith_add_r_r_r(d, s1, s2) \
  435. emith_add_r_r_r_lsl(d, s1, s2, 0)
  436. #define emith_addf_r_r_r_ptr(d, s1, s2) \
  437. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  438. #define emith_addf_r_r_r(d, s1, s2) \
  439. emith_addf_r_r_r_ptr(d, s1, s2)
  440. #define emith_sub_r_r_r(d, s1, s2) \
  441. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  442. #define emith_subf_r_r_r(d, s1, s2) \
  443. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  444. #define emith_or_r_r_r(d, s1, s2) \
  445. emith_or_r_r_r_lsl(d, s1, s2, 0)
  446. #define emith_eor_r_r_r(d, s1, s2) \
  447. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  448. #define emith_and_r_r_r(d, s1, s2) \
  449. emith_and_r_r_r_lsl(d, s1, s2, 0)
  450. #define emith_add_r_r_ptr(d, s) \
  451. emith_add_r_r_r_lsl_ptr(d, d, s, 0)
  452. #define emith_add_r_r(d, s) \
  453. emith_add_r_r_r(d, d, s)
  454. #define emith_sub_r_r(d, s) \
  455. emith_sub_r_r_r(d, d, s)
  456. #define emith_neg_r_r(d, s) \
  457. EMIT(R5_NEGW_REG(d, s))
  458. #define emith_adc_r_r_r(d, s1, s2) do { \
  459. emith_add_r_r_r(AT, s2, FC); \
  460. emith_add_r_r_r(d, s1, AT); \
  461. } while (0)
  462. #define emith_sbc_r_r_r(d, s1, s2) do { \
  463. emith_add_r_r_r(AT, s2, FC); \
  464. emith_sub_r_r_r(d, s1, AT); \
  465. } while (0)
  466. #define emith_adc_r_r(d, s) \
  467. emith_adc_r_r_r(d, d, s)
  468. #define emith_negc_r_r(d, s) \
  469. emith_sbc_r_r_r(d, Z0, s)
  470. // NB: the incoming carry Cin can cause Cout if s2+Cin=0 (or s1+Cin=0 FWIW)
  471. // moreover, if s2+Cin=0 caused Cout, s1+s2+Cin=s1+0 can't cause another Cout
  472. #define emith_adcf_r_r_r(d, s1, s2) do { \
  473. emith_add_r_r_r(FNZ, s2, FC); \
  474. EMIT(R5_SLTU_REG(AT, FNZ, FC)); \
  475. emith_add_r_r_r(FNZ, s1, FNZ); \
  476. emith_set_arith_flags(d, s1, s2, 0, 0); \
  477. emith_or_r_r(FC, AT); \
  478. } while (0)
  479. #define emith_sbcf_r_r_r(d, s1, s2) do { \
  480. emith_add_r_r_r(FNZ, s2, FC); \
  481. EMIT(R5_SLTU_REG(AT, FNZ, FC)); \
  482. emith_sub_r_r_r(FNZ, s1, FNZ); \
  483. emith_set_arith_flags(d, s1, s2, 0, 1); \
  484. emith_or_r_r(FC, AT); \
  485. } while (0)
  486. #define emith_and_r_r(d, s) \
  487. emith_and_r_r_r(d, d, s)
  488. #define emith_and_r_r_c(cond, d, s) \
  489. emith_and_r_r(d, s)
  490. #define emith_or_r_r(d, s) \
  491. emith_or_r_r_r(d, d, s)
  492. #define emith_eor_r_r(d, s) \
  493. emith_eor_r_r_r(d, d, s)
  494. #define emith_tst_r_r_ptr(d, s) do { \
  495. if (d != s) { \
  496. emith_and_r_r_r(FNZ, d, s); \
  497. emith_cmp_rs = emith_cmp_rt = -1; \
  498. } else emith_cmp_rs = s, emith_cmp_rt = Z0; \
  499. } while (0)
  500. #define emith_tst_r_r(d, s) \
  501. emith_tst_r_r_ptr(d, s)
  502. #define emith_teq_r_r(d, s) do { \
  503. emith_eor_r_r_r(FNZ, d, s); \
  504. emith_cmp_rs = emith_cmp_rt = -1; \
  505. } while (0)
  506. #define emith_cmp_r_r(d, s) \
  507. emith_set_compare_flags(d, s, 0)
  508. // emith_subf_r_r_r(FNZ, d, s)
  509. #define emith_addf_r_r(d, s) \
  510. emith_addf_r_r_r(d, d, s)
  511. #define emith_subf_r_r(d, s) \
  512. emith_subf_r_r_r(d, d, s)
  513. #define emith_adcf_r_r(d, s) \
  514. emith_adcf_r_r_r(d, d, s)
  515. #define emith_sbcf_r_r(d, s) \
  516. emith_sbcf_r_r_r(d, d, s)
  517. #define emith_negcf_r_r(d, s) \
  518. emith_sbcf_r_r_r(d, Z0, s)
  519. // move immediate
  520. #define MAX_HOST_LITERALS 32 // pool must be smaller than 4 KB
  521. static uintptr_t literal_pool[MAX_HOST_LITERALS];
  522. static u32 *literal_insn[MAX_HOST_LITERALS];
  523. static int literal_pindex, literal_iindex;
  524. static inline int emith_pool_literal(uintptr_t imm)
  525. {
  526. int idx = literal_pindex - 8; // max look behind in pool
  527. // see if one of the last literals was the same
  528. for (idx = (idx < 0 ? 0 : idx); idx < literal_pindex; idx++)
  529. if (imm == literal_pool[idx])
  530. break;
  531. if (idx == literal_pindex) // store new literal
  532. literal_pool[literal_pindex++] = imm;
  533. return idx;
  534. }
  535. static void emith_pool_commit(int jumpover)
  536. {
  537. int i, sz = literal_pindex * sizeof(uintptr_t);
  538. u8 *pool = (u8 *)tcache_ptr;
  539. // nothing to commit if pool is empty
  540. if (sz == 0)
  541. return;
  542. // align pool to pointer size
  543. if (jumpover)
  544. pool += sizeof(u32);
  545. i = (uintptr_t)pool & (sizeof(void *)-1);
  546. pool += (i ? sizeof(void *)-i : 0);
  547. // need branch over pool if not at block end
  548. if (jumpover)
  549. EMIT(R5_B(sz + (pool-(u8 *)tcache_ptr)));
  550. // safety check - pool must be after insns and reachable
  551. if ((u32)(pool - (u8 *)literal_insn[0] + 8) > 0x7ff) {
  552. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY,
  553. "pool offset out of range");
  554. exit(1);
  555. }
  556. // copy pool and adjust addresses in insns accessing the pool
  557. memcpy(pool, literal_pool, sz);
  558. for (i = 0; i < literal_iindex; i++) {
  559. *literal_insn[i] += ((u8 *)pool - (u8 *)literal_insn[i]) << 20;
  560. }
  561. // count pool constants as insns for statistics
  562. for (i = 0; i < literal_pindex * sizeof(uintptr_t)/sizeof(u32); i++)
  563. COUNT_OP;
  564. tcache_ptr = (void *)((u8 *)pool + sz);
  565. literal_pindex = literal_iindex = 0;
  566. }
  567. static void emith_pool_check(void)
  568. {
  569. // check if pool must be committed
  570. if (literal_iindex > MAX_HOST_LITERALS-4 || (literal_pindex &&
  571. (u8 *)tcache_ptr - (u8 *)literal_insn[0] > 0x700))
  572. // pool full, or displacement is approaching the limit
  573. emith_pool_commit(1);
  574. }
  575. static void emith_move_imm(int r, uintptr_t imm)
  576. {
  577. u32 lui = imm + _CB(imm,1,11,12);
  578. if (lui >> 12) {
  579. // take out the effect of the sign extension of ADDI
  580. EMIT(R5_MOVT_IMM(r, lui));
  581. if (imm & 0xfff)
  582. EMIT(R5_ADD_IMM(r, r, imm));
  583. } else
  584. EMIT(R5_ADD_IMM(r, Z0, imm));
  585. }
  586. static void emith_move_ptr_imm(int r, uintptr_t imm)
  587. {
  588. #if __riscv_xlen == 64
  589. if ((s32)imm != imm) {
  590. int idx;
  591. if (literal_iindex >= MAX_HOST_LITERALS)
  592. emith_pool_commit(1);
  593. idx = emith_pool_literal(imm);
  594. EMIT(R5_MOVA_IMM(AT, 0)); // loads PC of MOVA insn... + 4 in LD
  595. literal_insn[literal_iindex++] = (u32 *)tcache_ptr;
  596. EMIT(R5_I_INSN(OP_LD, F1_P, r, AT, idx*sizeof(uintptr_t) + 4));
  597. } else
  598. #endif
  599. emith_move_imm(r, imm);
  600. }
  601. #define emith_move_r_ptr_imm(r, imm) \
  602. emith_move_ptr_imm(r, (uintptr_t)(imm))
  603. #define emith_move_r_imm(r, imm) \
  604. emith_move_imm(r, (u32)(imm))
  605. #define emith_move_r_imm_c(cond, r, imm) \
  606. emith_move_r_imm(r, imm)
  607. #define emith_move_r_imm_s8_patchable(r, imm) \
  608. EMIT(R5_ADD_IMM(r, Z0, (s8)(imm)))
  609. #define emith_move_r_imm_s8_patch(ptr, imm) do { \
  610. u32 *ptr_ = (u32 *)ptr; \
  611. EMIT_PTR(ptr_, (*ptr_ & 0x000fffff) | ((u16)(s8)(imm)<<20)); \
  612. } while (0)
  613. // arithmetic/logical, immediate - R5 always takes a signed 12 bit immediate
  614. static void emith_op_imm(int f1, int rd, int rs, u32 imm)
  615. {
  616. int op32 = (f1 == F1_ADD ? R5_OP32 : 0);
  617. if ((imm + _CB(imm,1,11,12)) >> 12) {
  618. emith_move_r_imm(AT, imm);
  619. EMIT(R5_R_INSN(OP_REG^op32, f1&7,_, rd, rs, AT));
  620. } else if (imm + (f1 == F1_AND) || rd != rs)
  621. EMIT(R5_I_INSN(OP_IMM^op32, f1&7, rd, rs, imm));
  622. }
  623. // arithmetic, immediate - can only be ADDI, since SUBI doesn't exist
  624. #define emith_add_r_imm(r, imm) \
  625. emith_add_r_r_imm(r, r, imm)
  626. #define emith_add_r_imm_c(cond, r, imm) \
  627. emith_add_r_imm(r, imm)
  628. #define emith_addf_r_imm(r, imm) \
  629. emith_addf_r_r_imm(r, imm)
  630. #define emith_sub_r_imm(r, imm) \
  631. emith_sub_r_r_imm(r, r, imm)
  632. #define emith_sub_r_imm_c(cond, r, imm) \
  633. emith_sub_r_imm(r, imm)
  634. #define emith_subf_r_imm(r, imm) \
  635. emith_subf_r_r_imm(r, r, imm)
  636. #define emith_adc_r_imm(r, imm) \
  637. emith_adc_r_r_imm(r, r, imm);
  638. #define emith_adcf_r_imm(r, imm) \
  639. emith_adcf_r_r_imm(r, r, imm)
  640. #define emith_cmp_r_imm(r, imm) \
  641. emith_set_compare_flags(r, -1, imm)
  642. // emith_subf_r_r_imm(FNZ, r, imm)
  643. #define emith_add_r_r_ptr_imm(d, s, imm) \
  644. emith_op_imm(F1_ADD|F2_ALT, d, s, imm)
  645. #define emith_add_r_r_imm(d, s, imm) \
  646. emith_op_imm(F1_ADD, d, s, imm)
  647. #define emith_addf_r_r_imm(d, s, imm) do { \
  648. emith_add_r_r_imm(FNZ, s, imm); \
  649. emith_set_arith_flags(d, s, -1, imm, 0); \
  650. } while (0)
  651. #define emith_adc_r_r_imm(d, s, imm) do { \
  652. emith_add_r_r_r(AT, s, FC); \
  653. emith_add_r_r_imm(d, AT, imm); \
  654. } while (0)
  655. #define emith_adcf_r_r_imm(d, s, imm) do { \
  656. if (imm == 0) { \
  657. emith_add_r_r_r(FNZ, s, FC); \
  658. emith_set_arith_flags(d, s, -1, 1, 0); \
  659. } else { \
  660. emith_add_r_r_r(FNZ, s, FC); \
  661. EMIT(R5_SLTU_REG(AT, FNZ, FC)); \
  662. emith_add_r_r_imm(FNZ, FNZ, imm); \
  663. emith_set_arith_flags(d, s, -1, imm, 0); \
  664. emith_or_r_r(FC, AT); \
  665. } \
  666. } while (0)
  667. // NB: no SUBI in R5, since ADDI takes a signed imm
  668. #define emith_sub_r_r_imm(d, s, imm) \
  669. emith_add_r_r_imm(d, s, -(imm))
  670. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  671. emith_sub_r_r_imm(d, s, imm)
  672. #define emith_subf_r_r_imm(d, s, imm) do { \
  673. emith_sub_r_r_imm(FNZ, s, imm); \
  674. emith_set_arith_flags(d, s, -1, imm, 1); \
  675. } while (0)
  676. // logical, immediate
  677. #define emith_and_r_imm(r, imm) \
  678. emith_op_imm(F1_AND, r, r, imm)
  679. #define emith_or_r_imm(r, imm) \
  680. emith_op_imm(F1_OR, r, r, imm)
  681. #define emith_or_r_imm_c(cond, r, imm) \
  682. emith_or_r_imm(r, imm)
  683. #define emith_eor_r_imm_ptr(r, imm) \
  684. emith_op_imm(F1_XOR, r, r, imm)
  685. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  686. emith_eor_r_imm_ptr(r, imm)
  687. #define emith_eor_r_imm(r, imm) \
  688. emith_eor_r_imm_ptr(r, imm)
  689. #define emith_eor_r_imm_c(cond, r, imm) \
  690. emith_eor_r_imm(r, imm)
  691. /* NB: BIC #imm not available in R5; use AND #~imm instead */
  692. #define emith_bic_r_imm(r, imm) \
  693. emith_op_imm(F1_AND, r, r, ~(imm))
  694. #define emith_bic_r_imm_c(cond, r, imm) \
  695. emith_bic_r_imm(r, imm)
  696. #define emith_tst_r_imm(r, imm) do { \
  697. emith_op_imm(F1_AND, FNZ, r, imm); \
  698. emith_cmp_rs = emith_cmp_rt = -1; \
  699. } while (0)
  700. #define emith_tst_r_imm_c(cond, r, imm) \
  701. emith_tst_r_imm(r, imm)
  702. #define emith_and_r_r_imm(d, s, imm) \
  703. emith_op_imm(F1_AND, d, s, imm)
  704. #define emith_or_r_r_imm(d, s, imm) \
  705. emith_op_imm(F1_OR, d, s, imm)
  706. #define emith_eor_r_r_imm(d, s, imm) \
  707. emith_op_imm(F1_XOR, d, s, imm)
  708. // shift
  709. #define emith_lsl(d, s, cnt) \
  710. EMIT(R5_LSLW_IMM(d, s, cnt))
  711. #define emith_lsr(d, s, cnt) \
  712. EMIT(R5_LSRW_IMM(d, s, cnt))
  713. #define emith_asr(d, s, cnt) \
  714. EMIT(R5_ASRW_IMM(d, s, cnt))
  715. #define emith_ror(d, s, cnt) do { \
  716. EMIT(R5_LSLW_IMM(AT, s, 32-(cnt))); \
  717. EMIT(R5_LSRW_IMM(d, s, cnt)); \
  718. EMIT(R5_OR_REG(d, d, AT)); \
  719. } while (0)
  720. #define emith_ror_c(cond, d, s, cnt) \
  721. emith_ror(d, s, cnt)
  722. #define emith_rol(d, s, cnt) do { \
  723. EMIT(R5_LSRW_IMM(AT, s, 32-(cnt))); \
  724. EMIT(R5_LSLW_IMM(d, s, cnt)); \
  725. EMIT(R5_OR_REG(d, d, AT)); \
  726. } while (0)
  727. #define emith_rorc(d) do { \
  728. emith_lsr(d, d, 1); \
  729. emith_lsl(AT, FC, 31); \
  730. emith_or_r_r(d, AT); \
  731. } while (0)
  732. #define emith_rolc(d) do { \
  733. emith_lsl(d, d, 1); \
  734. emith_or_r_r(d, FC); \
  735. } while (0)
  736. // NB: all flag setting shifts make V undefined
  737. #define emith_lslf(d, s, cnt) do { \
  738. int _s = s; \
  739. if ((cnt) > 1) { \
  740. emith_lsl(d, s, cnt-1); \
  741. _s = d; \
  742. } \
  743. if ((cnt) > 0) { \
  744. emith_lsr(FC, _s, 31); \
  745. emith_lsl(d, _s, 1); \
  746. } \
  747. emith_move_r_r(FNZ, d); \
  748. emith_cmp_rs = emith_cmp_rt = -1; \
  749. } while (0)
  750. #define emith_lsrf(d, s, cnt) do { \
  751. int _s = s; \
  752. if ((cnt) > 1) { \
  753. emith_lsr(d, s, cnt-1); \
  754. _s = d; \
  755. } \
  756. if ((cnt) > 0) { \
  757. emith_and_r_r_imm(FC, _s, 1); \
  758. emith_lsr(d, _s, 1); \
  759. } \
  760. emith_move_r_r(FNZ, d); \
  761. emith_cmp_rs = emith_cmp_rt = -1; \
  762. } while (0)
  763. #define emith_asrf(d, s, cnt) do { \
  764. int _s = s; \
  765. if ((cnt) > 1) { \
  766. emith_asr(d, s, cnt-1); \
  767. _s = d; \
  768. } \
  769. if ((cnt) > 0) { \
  770. emith_and_r_r_imm(FC, _s, 1); \
  771. emith_asr(d, _s, 1); \
  772. } \
  773. emith_move_r_r(FNZ, d); \
  774. emith_cmp_rs = emith_cmp_rt = -1; \
  775. } while (0)
  776. #define emith_rolf(d, s, cnt) do { \
  777. emith_rol(d, s, cnt); \
  778. emith_and_r_r_imm(FC, d, 1); \
  779. emith_move_r_r(FNZ, d); \
  780. emith_cmp_rs = emith_cmp_rt = -1; \
  781. } while (0)
  782. #define emith_rorf(d, s, cnt) do { \
  783. emith_ror(d, s, cnt); \
  784. emith_lsr(FC, d, 31); \
  785. emith_move_r_r(FNZ, d); \
  786. emith_cmp_rs = emith_cmp_rt = -1; \
  787. } while (0)
  788. #define emith_rolcf(d) do { \
  789. emith_lsr(AT, d, 31); \
  790. emith_lsl(d, d, 1); \
  791. emith_or_r_r(d, FC); \
  792. emith_move_r_r(FC, AT); \
  793. emith_move_r_r(FNZ, d); \
  794. emith_cmp_rs = emith_cmp_rt = -1; \
  795. } while (0)
  796. #define emith_rorcf(d) do { \
  797. emith_and_r_r_imm(AT, d, 1); \
  798. emith_lsr(d, d, 1); \
  799. emith_lsl(FC, FC, 31); \
  800. emith_or_r_r(d, FC); \
  801. emith_move_r_r(FC, AT); \
  802. emith_move_r_r(FNZ, d); \
  803. emith_cmp_rs = emith_cmp_rt = -1; \
  804. } while (0)
  805. // signed/unsigned extend
  806. #define emith_clear_msb(d, s, count) /* bits to clear */ do { \
  807. u32 t; \
  808. if ((count) >= 21) { \
  809. t = (count) - 21; \
  810. t = 0x7ff >> t; \
  811. emith_and_r_r_imm(d, s, t); \
  812. } else { \
  813. emith_lsl(d, s, count); \
  814. emith_lsr(d, d, count); \
  815. } \
  816. } while (0)
  817. #define emith_clear_msb_c(cond, d, s, count) \
  818. emith_clear_msb(d, s, count)
  819. #define emith_sext(d, s, count) /* bits to keep */ do { \
  820. emith_lsl(d, s, 32-(count)); \
  821. emith_asr(d, d, 32-(count)); \
  822. } while (0)
  823. // multiply Rd = Rn*Rm (+ Ra)
  824. #define emith_mul(d, s1, s2) \
  825. EMIT(R5_MULW(d, s1, s2)) \
  826. #define emith_mul_u64(dlo, dhi, s1, s2) \
  827. EMIT_R5_MULLU_REG(dlo, dhi, s1, s2)
  828. #define emith_mul_s64(dlo, dhi, s1, s2) \
  829. EMIT_R5_MULLS_REG(dlo, dhi, s1, s2)
  830. #define emith_mula_s64(dlo, dhi, s1, s2) do { \
  831. int t_ = rcache_get_tmp(); \
  832. EMIT_R5_MULLS_REG(t_, AT, s1, s2); \
  833. emith_add_r_r(dhi, AT); \
  834. emith_add_r_r(dlo, t_); \
  835. EMIT(R5_SLTU_REG(AT, dlo, t_)); \
  836. emith_add_r_r(dhi, AT); \
  837. rcache_free_tmp(t_); \
  838. } while (0)
  839. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  840. emith_mula_s64(dlo, dhi, s1, s2)
  841. // load/store. offs has 12 bits signed, hence larger offs may use a temp
  842. static void emith_ld_offs(int sz, int rd, int rs, int o12)
  843. {
  844. if (o12 >= -0x800 && o12 < 0x800) {
  845. EMIT(R5_I_INSN(OP_LD, sz, rd, rs, o12));
  846. } else {
  847. EMIT(R5_MOVT_IMM(AT, o12 + _CB(o12,1,11,12))); \
  848. EMIT(R5_R_INSN(OP_REG, F1_ADD,_, AT, rs, AT)); \
  849. EMIT(R5_I_INSN(OP_LD, sz, rd, AT, o12));
  850. }
  851. }
  852. #define emith_read_r_r_offs_ptr(r, rs, offs) \
  853. emith_ld_offs(F1_P, r, rs, offs)
  854. #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
  855. emith_read_r_r_offs_ptr(r, rs, offs)
  856. #define emith_read_r_r_offs(r, rs, offs) \
  857. emith_ld_offs(F1_W, r, rs, offs)
  858. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  859. emith_read_r_r_offs(r, rs, offs)
  860. #define emith_read_r_r_r_ptr(r, rs, rm) do { \
  861. emith_add_r_r_r(AT, rs, rm); \
  862. emith_ld_offs(F1_P, r, AT, 0); \
  863. } while (0)
  864. #define emith_read_r_r_r(r, rs, rm) do { \
  865. emith_add_r_r_r(AT, rs, rm); \
  866. emith_ld_offs(F1_W, r, AT, 0); \
  867. } while (0)
  868. #define emith_read_r_r_r_c(cond, r, rs, rm) \
  869. emith_read_r_r_r(r, rs, rm)
  870. #define emith_read8_r_r_offs(r, rs, offs) \
  871. emith_ld_offs(F1_BU, r, rs, offs)
  872. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  873. emith_read8_r_r_offs(r, rs, offs)
  874. #define emith_read8_r_r_r(r, rs, rm) do { \
  875. emith_add_r_r_r(AT, rs, rm); \
  876. emith_ld_offs(F1_BU, r, AT, 0); \
  877. } while (0)
  878. #define emith_read8_r_r_r_c(cond, r, rs, rm) \
  879. emith_read8_r_r_r(r, rs, rm)
  880. #define emith_read16_r_r_offs(r, rs, offs) \
  881. emith_ld_offs(F1_HU, r, rs, offs)
  882. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  883. emith_read16_r_r_offs(r, rs, offs)
  884. #define emith_read16_r_r_r(r, rs, rm) do { \
  885. emith_add_r_r_r(AT, rs, rm); \
  886. emith_ld_offs(F1_HU, r, AT, 0); \
  887. } while (0)
  888. #define emith_read16_r_r_r_c(cond, r, rs, rm) \
  889. emith_read16_r_r_r(r, rs, rm)
  890. #define emith_read8s_r_r_offs(r, rs, offs) \
  891. emith_ld_offs(F1_B, r, rs, offs)
  892. #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
  893. emith_read8s_r_r_offs(r, rs, offs)
  894. #define emith_read8s_r_r_r(r, rs, rm) do { \
  895. emith_add_r_r_r(AT, rs, rm); \
  896. emith_ld_offs(F1_B, r, AT, 0); \
  897. } while (0)
  898. #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
  899. emith_read8s_r_r_r(r, rs, rm)
  900. #define emith_read16s_r_r_offs(r, rs, offs) \
  901. emith_ld_offs(F1_H, r, rs, offs)
  902. #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
  903. emith_read16s_r_r_offs(r, rs, offs)
  904. #define emith_read16s_r_r_r(r, rs, rm) do { \
  905. emith_add_r_r_r(AT, rs, rm); \
  906. emith_ld_offs(F1_H, r, AT, 0); \
  907. } while (0)
  908. #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
  909. emith_read16s_r_r_r(r, rs, rm)
  910. static void emith_st_offs(int sz, int rt, int rs, int o12)
  911. {
  912. if (o12 >= -0x800 && o12 < 800) {
  913. EMIT(R5_S_INSN(OP_ST, sz, rt, rs, o12));
  914. } else {
  915. EMIT(R5_MOVT_IMM(AT, o12 + _CB(o12,1,11,12))); \
  916. EMIT(R5_R_INSN(OP_REG, F1_ADD,_, AT, rs, AT)); \
  917. EMIT(R5_S_INSN(OP_ST, sz, rt, AT, o12));
  918. }
  919. }
  920. #define emith_write_r_r_offs_ptr(r, rs, offs) \
  921. emith_st_offs(F1_P, r, rs, offs)
  922. #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
  923. emith_write_r_r_offs_ptr(r, rs, offs)
  924. #define emith_write_r_r_r_ptr(r, rs, rm) do { \
  925. emith_add_r_r_r(AT, rs, rm); \
  926. emith_st_offs(F1_P, r, AT, 0); \
  927. } while (0)
  928. #define emith_write_r_r_r_ptr_c(cond, r, rs, rm) \
  929. emith_write_r_r_r_ptr(r, rs, rm)
  930. #define emith_write_r_r_offs(r, rs, offs) \
  931. emith_st_offs(F1_W, r, rs, offs)
  932. #define emith_write_r_r_offs_c(cond, r, rs, offs) \
  933. emith_write_r_r_offs(r, rs, offs)
  934. #define emith_write_r_r_r(r, rs, rm) do { \
  935. emith_add_r_r_r(AT, rs, rm); \
  936. emith_st_offs(F1_W, r, AT, 0); \
  937. } while (0)
  938. #define emith_write_r_r_r_c(cond, r, rs, rm) \
  939. emith_write_r_r_r(r, rs, rm)
  940. #define emith_ctx_read_ptr(r, offs) \
  941. emith_read_r_r_offs_ptr(r, CONTEXT_REG, offs)
  942. #define emith_ctx_read(r, offs) \
  943. emith_read_r_r_offs(r, CONTEXT_REG, offs)
  944. #define emith_ctx_read_c(cond, r, offs) \
  945. emith_ctx_read(r, offs)
  946. #define emith_ctx_write_ptr(r, offs) \
  947. emith_write_r_r_offs_ptr(r, CONTEXT_REG, offs)
  948. #define emith_ctx_write(r, offs) \
  949. emith_write_r_r_offs(r, CONTEXT_REG, offs)
  950. #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
  951. int r_ = r, offs_ = offs, cnt_ = cnt; \
  952. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  953. emith_ctx_read(r_, offs_); \
  954. } while (0)
  955. #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
  956. int r_ = r, offs_ = offs, cnt_ = cnt; \
  957. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  958. emith_ctx_write(r_, offs_); \
  959. } while (0)
  960. // function call handling
  961. #define emith_save_caller_regs(mask) do { \
  962. int _c; u32 _m = mask & 0x3fce0; /* x5-x7,x10-x17 */ \
  963. _c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
  964. int _s = count_bits(_m) * 4, _o = _s; \
  965. if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
  966. for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  967. if (_m & (1 << _c)) \
  968. { _o -= 4; if (_c) emith_write_r_r_offs(_c, SP, _o); } \
  969. } while (0)
  970. #define emith_restore_caller_regs(mask) do { \
  971. int _c; u32 _m = mask & 0x3fce0; \
  972. _c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
  973. int _s = count_bits(_m) * 4, _o = 0; \
  974. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  975. if (_m & (1 << _c)) \
  976. { if (_c) emith_read_r_r_offs(_c, SP, _o); _o += 4; } \
  977. if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
  978. } while (0)
  979. #define host_arg2reg(rd, arg) \
  980. rd = (arg+10)
  981. #define emith_pass_arg_r(arg, reg) \
  982. emith_move_r_r(arg, reg)
  983. #define emith_pass_arg_imm(arg, imm) \
  984. emith_move_r_imm(arg, imm)
  985. // branching
  986. #define emith_invert_branch(cond) /* inverted conditional branch */ \
  987. ((cond) ^ 0x01)
  988. // evaluate the emulated condition, returns a register/branch type pair
  989. static int emith_cmpr_check(int rs, int rt, int cond, int *r, int *s)
  990. {
  991. int b = -1;
  992. // condition check for comparing 2 registers
  993. switch (cond) {
  994. case DCOND_EQ: *r = rs; *s = rt; b = F1_BEQ; break;
  995. case DCOND_NE: *r = rs; *s = rt; b = F1_BNE; break;
  996. case DCOND_LO: *r = rs, *s = rt, b = F1_BLTU; break; // s < t, u
  997. case DCOND_HS: *r = rs, *s = rt, b = F1_BGEU; break; // s >= t, u
  998. case DCOND_LS: *r = rt, *s = rs, b = F1_BGEU; break; // s <= t, u
  999. case DCOND_HI: *r = rt, *s = rs, b = F1_BLTU; break; // s > t, u
  1000. case DCOND_LT: *r = rs, *s = rt, b = F1_BLT; break; // s < t
  1001. case DCOND_GE: *r = rs, *s = rt, b = F1_BGE; break; // s >= t
  1002. case DCOND_LE: *r = rt, *s = rs, b = F1_BGE; break; // s <= t
  1003. case DCOND_GT: *r = rt, *s = rs, b = F1_BLT; break; // s > t
  1004. }
  1005. return b;
  1006. }
  1007. static int emith_cmpi_check(int rs, s32 imm, int cond, int *r, int *s)
  1008. {
  1009. int b = -1;
  1010. // condition check for comparing register with immediate
  1011. if (imm == 0) return emith_cmpr_check(rs, Z0, cond, r, s);
  1012. emith_move_r_imm(AT, imm);
  1013. switch (cond) {
  1014. case DCOND_EQ: *r = AT, *s = rs, b = F1_BEQ; break;
  1015. case DCOND_NE: *r = AT, *s = rs, b = F1_BNE; break;
  1016. case DCOND_LO: *r = rs, *s = AT, b = F1_BLTU; break; // s < imm, u
  1017. case DCOND_HS: *r = rs, *s = AT, b = F1_BGEU; break; // s >= imm, u
  1018. case DCOND_LS: *r = AT, *s = rs, b = F1_BGEU; break; // s <= imm, u
  1019. case DCOND_HI: *r = AT, *s = rs, b = F1_BLTU; break; // s > imm, u
  1020. case DCOND_LT: *r = rs, *s = AT, b = F1_BLT; break; // s < imm
  1021. case DCOND_GE: *r = rs, *s = AT, b = F1_BGE; break; // s >= imm
  1022. case DCOND_LE: *r = AT, *s = rs, b = F1_BGE; break; // s <= imm
  1023. case DCOND_GT: *r = AT, *s = rs, b = F1_BLT; break; // s > imm
  1024. }
  1025. return b;
  1026. }
  1027. static int emith_cond_check(int cond, int *r, int *s)
  1028. {
  1029. int b = -1;
  1030. *s = Z0;
  1031. if (emith_cmp_rs >= 0) {
  1032. if (emith_cmp_rt != -1)
  1033. b = emith_cmpr_check(emith_cmp_rs,emith_cmp_rt, cond,r,s);
  1034. else b = emith_cmpi_check(emith_cmp_rs,emith_cmp_imm,cond,r,s);
  1035. }
  1036. // shortcut for V known to be 0
  1037. if (b < 0 && emith_flg_noV) switch (cond) {
  1038. case DCOND_VS: *r = Z0; b = F1_BNE; break; // never
  1039. case DCOND_VC: *r = Z0; b = F1_BEQ; break; // always
  1040. case DCOND_LT: *r = FNZ, b = F1_BLT; break; // N
  1041. case DCOND_GE: *r = FNZ, b = F1_BGE; break; // !N
  1042. case DCOND_LE: *r = Z0, *s = FNZ, b = F1_BGE; break; // N || Z
  1043. case DCOND_GT: *r = Z0, *s = FNZ, b = F1_BLT; break; // !N && !Z
  1044. }
  1045. // the full monty if no shortcut
  1046. if (b < 0) switch (cond) {
  1047. // conditions using NZ
  1048. case DCOND_EQ: *r = FNZ; b = F1_BEQ; break; // Z
  1049. case DCOND_NE: *r = FNZ; b = F1_BNE; break; // !Z
  1050. case DCOND_MI: *r = FNZ; b = F1_BLT; break; // N
  1051. case DCOND_PL: *r = FNZ; b = F1_BGE; break; // !N
  1052. // conditions using C
  1053. case DCOND_LO: *r = FC; b = F1_BNE; break; // C
  1054. case DCOND_HS: *r = FC; b = F1_BEQ; break; // !C
  1055. // conditions using CZ
  1056. case DCOND_LS: // C || Z
  1057. case DCOND_HI: // !C && !Z
  1058. EMIT(R5_ADD_IMM(AT, FC, -1)); // !C && !Z
  1059. EMIT(R5_AND_REG(AT, FNZ, AT));
  1060. *r = AT, b = (cond == DCOND_HI ? F1_BNE : F1_BEQ);
  1061. break;
  1062. // conditions using V
  1063. case DCOND_VS: // V
  1064. case DCOND_VC: // !V
  1065. EMIT(R5_XOR_REG(AT, FV, FNZ)); // V = Nt^Ns^Nd^C
  1066. EMIT(R5_LSRW_IMM(AT, AT, 31));
  1067. EMIT(R5_XOR_REG(AT, AT, FC));
  1068. *r = AT, b = (cond == DCOND_VS ? F1_BNE : F1_BEQ);
  1069. break;
  1070. // conditions using VNZ
  1071. case DCOND_LT: // N^V
  1072. case DCOND_GE: // !(N^V)
  1073. EMIT(R5_LSRW_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1074. EMIT(R5_XOR_REG(AT, FC, AT));
  1075. *r = AT, b = (cond == DCOND_LT ? F1_BNE : F1_BEQ);
  1076. break;
  1077. case DCOND_LE: // (N^V) || Z
  1078. case DCOND_GT: // !(N^V) && !Z
  1079. EMIT(R5_LSRW_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1080. EMIT(R5_XOR_REG(AT, FC, AT));
  1081. EMIT(R5_ADD_IMM(AT, AT, -1)); // !(Nd^V) && !Z
  1082. EMIT(R5_AND_REG(AT, FNZ, AT));
  1083. *r = AT, b = (cond == DCOND_GT ? F1_BNE : F1_BEQ);
  1084. break;
  1085. }
  1086. return b;
  1087. }
  1088. // NB: R5 unconditional jumps have only +/- 1MB range, hence use reg jumps
  1089. #define emith_jump(target) do { \
  1090. uintptr_t target_ = (uintptr_t)(target); \
  1091. EMIT(R5_MOVT_IMM(AT, target_ + _CB(target_,1,11,12))); \
  1092. EMIT(R5_JR(AT, target_)); \
  1093. } while (0)
  1094. #define emith_jump_patchable(target) \
  1095. emith_jump(target)
  1096. // NB: R5 conditional branches have only +/- 4KB range
  1097. #define emith_jump_cond(cond, target) do { \
  1098. int r_, s_, mcond_ = emith_cond_check(cond, &r_, &s_); \
  1099. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
  1100. EMIT(R5_BCOND(mcond_,r_,s_,disp_ & 0x00001fff)); \
  1101. } while (0)
  1102. #define emith_jump_cond_patchable(cond, target) \
  1103. emith_jump_cond(cond, target)
  1104. #define emith_jump_cond_inrange(target) \
  1105. ((u8 *)target - (u8 *)tcache_ptr < 0x1000 && \
  1106. (u8 *)target - (u8 *)tcache_ptr >= -0x1000+0x10) // mind cond_check
  1107. // NB: returns position of patch for cache maintenance
  1108. #define emith_jump_patch(ptr, target, pos) do { \
  1109. u32 *ptr_ = (u32 *)ptr; /* must skip condition check code */ \
  1110. if ((*ptr_&0x77) == OP_BCOND) { \
  1111. u32 *p_ = ptr_, disp_ = (u8 *)target - (u8 *)ptr_; \
  1112. u32 f1_ = _CB(*ptr_,3,12,0); \
  1113. u32 r_ = _CB(*ptr_,5,15,0), s_ = _CB(*ptr_,5,20,0); \
  1114. EMIT_PTR(p_, R5_BCOND(f1_, r_, s_, disp_ & 0x00001fff)); \
  1115. } else { \
  1116. u32 *p_ = ptr_; \
  1117. uintptr_t target_ = (uintptr_t)(target); \
  1118. EMIT_PTR(p_, R5_MOVT_IMM(AT, target_ + _CB(target_,1,11,12))); \
  1119. EMIT_PTR(p_, R5_JR(AT, target_)); \
  1120. } \
  1121. if ((void *)(pos) != NULL) *(u8 **)(pos) = (u8 *)(ptr_); \
  1122. } while (0)
  1123. #define emith_jump_patch_inrange(ptr, target) \
  1124. ((u8 *)target - (u8 *)ptr < 0x1000 && \
  1125. (u8 *)target - (u8 *)ptr >= -0x1000+0x10) // mind cond_check
  1126. #define emith_jump_patch_size() 8
  1127. #define emith_jump_at(ptr, target) do { \
  1128. uintptr_t target_ = (uintptr_t)(target); \
  1129. u32 *ptr_ = (u32 *)ptr; \
  1130. EMIT_PTR(ptr_, R5_MOVT_IMM(AT, target_ + _CB(target_,1,11,12))); \
  1131. EMIT_PTR(ptr_, R5_JR(AT, target_)); \
  1132. } while (0)
  1133. #define emith_jump_at_size() 8
  1134. #define emith_jump_reg(r) \
  1135. EMIT(R5_JR(r, 0))
  1136. #define emith_jump_reg_c(cond, r) \
  1137. emith_jump_reg(r)
  1138. #define emith_jump_ctx(offs) do { \
  1139. emith_ctx_read_ptr(AT, offs); \
  1140. emith_jump_reg(AT); \
  1141. } while (0)
  1142. #define emith_jump_ctx_c(cond, offs) \
  1143. emith_jump_ctx(offs)
  1144. #define emith_call(target) do { \
  1145. uintptr_t target_ = (uintptr_t)(target); \
  1146. EMIT(R5_MOVT_IMM(AT, target_ + _CB(target_,1,11,12))); \
  1147. EMIT(R5_JALR(LR, AT, target_)); \
  1148. } while (0)
  1149. #define emith_call_cond(cond, target) \
  1150. emith_call(target)
  1151. #define emith_call_reg(r) \
  1152. EMIT(R5_JALR(LR, r, 0))
  1153. #define emith_call_ctx(offs) do { \
  1154. emith_ctx_read_ptr(AT, offs); \
  1155. emith_call_reg(AT); \
  1156. } while (0)
  1157. #define emith_call_cleanup() /**/
  1158. #define emith_ret() \
  1159. EMIT(R5_JR(LR, 0))
  1160. #define emith_ret_c(cond) \
  1161. emith_ret()
  1162. #define emith_ret_to_ctx(offs) \
  1163. emith_ctx_write_ptr(LR, offs)
  1164. #define emith_add_r_ret(r) \
  1165. emith_add_r_r_ptr(r, LR)
  1166. #define emith_push_ret(r) do { \
  1167. emith_add_r_r_ptr_imm(SP, SP, -16); /* ABI requires 16 byte aligment */\
  1168. emith_write_r_r_offs(LR, SP, 4); \
  1169. if ((r) > 0) emith_write_r_r_offs(r, SP, 0); \
  1170. } while (0)
  1171. #define emith_pop_and_ret(r) do { \
  1172. if ((r) > 0) emith_read_r_r_offs(r, SP, 0); \
  1173. emith_read_r_r_offs(LR, SP, 4); \
  1174. emith_add_r_r_ptr_imm(SP, SP, 16); \
  1175. emith_ret(); \
  1176. } while (0)
  1177. // emitter ABI stuff
  1178. #define emith_insn_ptr() ((u8 *)tcache_ptr)
  1179. #define emith_flush() /**/
  1180. #define host_instructions_updated(base, end) __builtin___clear_cache(base, end)
  1181. #define emith_update_cache() /**/
  1182. #define emith_rw_offs_max() 0x7ff
  1183. // SH2 drc specific
  1184. #define emith_sh2_drc_entry() do { \
  1185. int _c; u32 _m = 0x0ffc0202; /* x1,x9,x18-x27 */ \
  1186. _c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
  1187. int _s = count_bits(_m) * 4, _o = _s; \
  1188. if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
  1189. for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  1190. if (_m & (1 << _c)) \
  1191. { _o -= 4; if (_c) emith_write_r_r_offs(_c, SP, _o); } \
  1192. } while (0)
  1193. #define emith_sh2_drc_exit() do { \
  1194. int _c; u32 _m = 0x0ffc0202; \
  1195. _c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
  1196. int _s = count_bits(_m) * 4, _o = 0; \
  1197. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1198. if (_m & (1 << _c)) \
  1199. { if (_c) emith_read_r_r_offs(_c, SP, _o); _o += 4; } \
  1200. if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
  1201. emith_ret(); \
  1202. } while (0)
  1203. // NB: assumes a is in arg0, tab, func and mask are temp
  1204. #define emith_sh2_rcall(a, tab, func, mask) do { \
  1205. emith_lsr(mask, a, SH2_READ_SHIFT); \
  1206. emith_add_r_r_r_lsl_ptr(tab, tab, mask, PTR_SCALE+1); \
  1207. emith_read_r_r_offs_ptr(func, tab, 0); \
  1208. emith_read_r_r_offs(mask, tab, 1 << PTR_SCALE); \
  1209. emith_addf_r_r_r_ptr(func, func, func); \
  1210. } while (0)
  1211. // NB: assumes a, val are in arg0 and arg1, tab and func are temp
  1212. #define emith_sh2_wcall(a, val, tab, func) do { \
  1213. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  1214. emith_lsl(func, func, PTR_SCALE); \
  1215. emith_read_r_r_r_ptr(func, tab, func); \
  1216. emith_move_r_r_ptr(12, CONTEXT_REG); /* arg2 */ \
  1217. emith_jump_reg(func); \
  1218. } while (0)
  1219. #define emith_sh2_delay_loop(cycles, reg) do { \
  1220. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  1221. int t1 = rcache_get_tmp(); \
  1222. int t2 = rcache_get_tmp(); \
  1223. int t3 = rcache_get_tmp(); \
  1224. /* if (sr < 0) return */ \
  1225. emith_cmp_r_imm(sr, 0); \
  1226. EMITH_JMP_START(DCOND_LE); \
  1227. /* turns = sr.cycles / cycles */ \
  1228. emith_asr(t2, sr, 12); \
  1229. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  1230. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  1231. rcache_free_tmp(t3); \
  1232. if (reg >= 0) { \
  1233. /* if (reg <= turns) turns = reg-1 */ \
  1234. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  1235. emith_cmp_r_r(t3, t2); \
  1236. EMITH_SJMP_START(DCOND_HI); \
  1237. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  1238. EMITH_SJMP_END(DCOND_HI); \
  1239. /* if (reg <= 1) turns = 0 */ \
  1240. emith_cmp_r_imm(t3, 1); \
  1241. EMITH_SJMP_START(DCOND_HI); \
  1242. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  1243. EMITH_SJMP_END(DCOND_HI); \
  1244. /* reg -= turns */ \
  1245. emith_sub_r_r(t3, t2); \
  1246. } \
  1247. /* sr.cycles -= turns * cycles; */ \
  1248. emith_move_r_imm(t1, cycles); \
  1249. emith_mul(t1, t2, t1); \
  1250. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  1251. EMITH_JMP_END(DCOND_LE); \
  1252. rcache_free_tmp(t1); \
  1253. rcache_free_tmp(t2); \
  1254. } while (0)
  1255. /*
  1256. * T = !carry(Rn = (Rn << 1) | T)
  1257. * if Q
  1258. * C = carry(Rn += Rm)
  1259. * else
  1260. * C = carry(Rn -= Rm)
  1261. * T ^= C
  1262. */
  1263. #define emith_sh2_div1_step(rn, rm, sr) do { \
  1264. int t_ = rcache_get_tmp(); \
  1265. emith_and_r_r_imm(AT, sr, T); \
  1266. emith_lsr(FC, rn, 31); /*Rn = (Rn<<1)+T*/ \
  1267. emith_lsl(t_, rn, 1); \
  1268. emith_or_r_r(t_, AT); \
  1269. emith_or_r_imm(sr, T); /* T = !carry */ \
  1270. emith_eor_r_r(sr, FC); \
  1271. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  1272. EMITH_JMP3_START(DCOND_EQ); \
  1273. emith_add_r_r_r(rn, t_, rm); \
  1274. EMIT(R5_SLTU_REG(FC, rn, t_)); \
  1275. EMITH_JMP3_MID(DCOND_EQ); \
  1276. emith_sub_r_r_r(rn, t_, rm); \
  1277. EMIT(R5_SLTU_REG(FC, t_, rn)); \
  1278. EMITH_JMP3_END(); \
  1279. emith_eor_r_r(sr, FC); /* T ^= carry */ \
  1280. rcache_free_tmp(t_); \
  1281. } while (0)
  1282. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1283. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1284. emith_tst_r_imm(sr, S); \
  1285. EMITH_SJMP_START(DCOND_EQ); \
  1286. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1287. emith_sext(mh, mh, 16); \
  1288. EMITH_SJMP_END(DCOND_EQ); \
  1289. emith_mula_s64(ml, mh, rn, rm); \
  1290. emith_tst_r_imm(sr, S); \
  1291. EMITH_SJMP_START(DCOND_EQ); \
  1292. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1293. /* to check: add MACH >> 31 to MACH >> 15. this is 0 if no overflow */ \
  1294. emith_asr(rn, mh, 15); \
  1295. emith_add_r_r_r_lsr(rn, rn, mh, 31); /* sum = (MACH>>31)+(MACH>>15) */ \
  1296. emith_teq_r_r(rn, Z0); /* (need only N and Z flags) */ \
  1297. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1298. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1299. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1300. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> +ovl */ \
  1301. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0xffffffff */ \
  1302. emith_sub_r_imm_c(DCOND_MI, mh, 1); /* 0x00007fff */ \
  1303. EMITH_SJMP_END(DCOND_PL); \
  1304. EMITH_SJMP_END(DCOND_EQ); \
  1305. EMITH_SJMP_END(DCOND_EQ); \
  1306. } while (0)
  1307. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1308. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1309. emith_tst_r_imm(sr, S); \
  1310. EMITH_SJMP_START(DCOND_EQ); \
  1311. /* XXX: MACH should be untouched when S is set? */ \
  1312. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1313. EMITH_SJMP_END(DCOND_EQ); \
  1314. emith_mula_s64(ml, mh, rn, rm); \
  1315. emith_tst_r_imm(sr, S); \
  1316. EMITH_SJMP_START(DCOND_EQ); \
  1317. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1318. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1319. emith_lsr(rn, ml, 31); \
  1320. emith_add_r_r(rn, mh); /* sum = MACH + ((MACL>>31)&1) */ \
  1321. emith_teq_r_r(rn, Z0); /* (need only N and Z flags) */ \
  1322. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1323. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1324. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1325. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1326. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> positive ovrfl */ \
  1327. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0x7fffffff */ \
  1328. EMITH_SJMP_END(DCOND_PL); \
  1329. EMITH_SJMP_END(DCOND_EQ); \
  1330. EMITH_SJMP_END(DCOND_EQ); \
  1331. } while (0)
  1332. #define emith_write_sr(sr, srcr) do { \
  1333. emith_lsr(sr, sr , 10); emith_lsl(sr, sr, 10); \
  1334. emith_lsl(AT, srcr, 22); emith_lsr(AT, AT, 22); \
  1335. emith_or_r_r(sr, AT); \
  1336. } while (0)
  1337. #define emith_carry_to_t(sr, is_sub) do { \
  1338. emith_and_r_imm(sr, 0xfffffffe); \
  1339. emith_or_r_r(sr, FC); \
  1340. } while (0)
  1341. #define emith_t_to_carry(sr, is_sub) do { \
  1342. emith_and_r_r_imm(FC, sr, 1); \
  1343. } while (0)
  1344. #define emith_tpop_carry(sr, is_sub) do { \
  1345. emith_and_r_r_imm(FC, sr, 1); \
  1346. emith_eor_r_r(sr, FC); \
  1347. } while (0)
  1348. #define emith_tpush_carry(sr, is_sub) \
  1349. emith_or_r_r(sr, FC)
  1350. #ifdef T
  1351. // T bit handling
  1352. #define emith_invert_cond(cond) \
  1353. ((cond) ^ 1)
  1354. static void emith_clr_t_cond(int sr)
  1355. {
  1356. emith_bic_r_imm(sr, T);
  1357. }
  1358. static void emith_set_t_cond(int sr, int cond)
  1359. {
  1360. int b, r, s;
  1361. u8 *ptr;
  1362. u32 val = 0, inv = 0;
  1363. // try to avoid jumping around if possible
  1364. if (emith_cmp_rs >= 0) {
  1365. if (emith_cmp_rt >= 0)
  1366. b = emith_cmpr_check(emith_cmp_rs, emith_cmp_rt, cond, &r, &s);
  1367. else
  1368. b = emith_cmpi_check(emith_cmp_rs, emith_cmp_imm, cond, &r, &s);
  1369. } else {
  1370. b = emith_cond_check(cond, &r, &s);
  1371. if (r == Z0) {
  1372. if (b == F1_BEQ || b == F1_BGE || b == F1_BGEU)
  1373. emith_or_r_imm(sr, T);
  1374. return;
  1375. } else if (r == FC)
  1376. val++, inv = (b == F1_BEQ);
  1377. }
  1378. if (!val) switch (b) {
  1379. case F1_BEQ: if (s == Z0) { EMIT(R5_SLTU_IMM(AT,r ,1)); r=AT; val++; break; }
  1380. EMIT(R5_XOR_REG(AT, r, s));
  1381. EMIT(R5_SLTU_IMM(AT,AT, 1)); r=AT; val++; break;
  1382. case F1_BNE: if (s == Z0) { EMIT(R5_SLTU_IMM(AT,Z0,r)); r=AT; val++; break; }
  1383. EMIT(R5_XOR_REG(AT, r, s));
  1384. EMIT(R5_SLTU_IMM(AT,Z0,AT)); r=AT; val++; break;
  1385. case F1_BLTU: EMIT(R5_SLTU_REG(AT, r, s)); r=AT; val++; break;
  1386. case F1_BGEU: EMIT(R5_SLTU_REG(AT, r, s)); r=AT; val++; inv++; break;
  1387. case F1_BLT: EMIT(R5_SLT_REG(AT, r, s)); r=AT; val++; break;
  1388. case F1_BGE: EMIT(R5_SLT_REG(AT, r, s)); r=AT; val++; inv++; break;
  1389. }
  1390. if (val) {
  1391. emith_or_r_r(sr, r);
  1392. if (inv)
  1393. emith_eor_r_imm(sr, T);
  1394. return;
  1395. }
  1396. // can't obtain result directly, use presumably slower jump !cond + or sr,T
  1397. b = emith_invert_branch(b);
  1398. ptr = tcache_ptr;
  1399. EMIT(R5_BCOND(b, r, s, 0));
  1400. emith_or_r_imm(sr, T);
  1401. val = (u8 *)tcache_ptr - (u8 *)(ptr);
  1402. EMIT_PTR(ptr, R5_BCOND(b, r, s, val & 0x00001fff));
  1403. }
  1404. #define emith_get_t_cond() -1
  1405. #define emith_sync_t(sr) ((void)sr)
  1406. #define emith_invalidate_t()
  1407. static void emith_set_t(int sr, int val)
  1408. {
  1409. if (val)
  1410. emith_or_r_imm(sr, T);
  1411. else
  1412. emith_bic_r_imm(sr, T);
  1413. }
  1414. static int emith_tst_t(int sr, int tf)
  1415. {
  1416. emith_tst_r_imm(sr, T);
  1417. return tf ? DCOND_NE: DCOND_EQ;
  1418. }
  1419. #endif