emit_mips.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866
  1. /*
  2. * Basic macros to emit MIPS32/MIPS64 Release 1 or 2 instructions and some utils
  3. * Copyright (C) 2019 kub
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. #define HOST_REGS 32
  9. // MIPS32 ABI: params: r4-r7, return: r2-r3, temp: r1(at),r8-r15,r24-r25,r31(ra)
  10. // saved: r16-r23,r30, reserved: r0(zero), r26-r27(irq), r28(gp), r29(sp)
  11. // r1,r15,r24,r25(at,t7-t9) are used internally by the code emitter
  12. // MIPSN32/MIPS64 ABI: params: r4-r11, no caller-reserved save area on stack
  13. #define RET_REG 2 // v0
  14. #define PARAM_REGS { 4, 5, 6, 7 } // a0-a3
  15. #define PRESERVED_REGS { 16, 17, 18, 19, 20, 21, 22, 23 } // s0-s7
  16. #define TEMPORARY_REGS { 2, 3, 8, 9, 10, 11, 12, 13, 14 } // v0-v1,t0-t6
  17. #define CONTEXT_REG 23 // s7
  18. #define STATIC_SH2_REGS { SHR_SR,22 , SHR_R(0),21 , SHR_R(1),20 }
  19. // NB: the ubiquitous JZ74[46]0 uses MIPS32 Release 1, a slight MIPS II superset
  20. #ifndef __mips_isa_rev
  21. #define __mips_isa_rev 1 // surprisingly not always defined
  22. #endif
  23. // registers usable for user code: r1-r25, others reserved or special
  24. #define Z0 0 // zero register
  25. #define GP 28 // global pointer
  26. #define SP 29 // stack pointer
  27. #define FP 30 // frame pointer
  28. #define LR 31 // link register
  29. // internally used by code emitter:
  30. #define AT 1 // used to hold intermediate results
  31. #define FNZ 15 // emulated processor flags: N (bit 31) ,Z (all bits)
  32. #define FC 24 // emulated processor flags: C (bit 0), others 0
  33. #define FV 25 // emulated processor flags: Nt^Ns (bit 31). others x
  34. // All operations but ptr ops are using the lower 32 bits of the registers.
  35. // The upper 32 bits always contain the sign extension from the lower 32 bits.
  36. // unified conditions; virtual, not corresponding to anything real on MIPS
  37. #define DCOND_EQ 0x0
  38. #define DCOND_NE 0x1
  39. #define DCOND_HS 0x2
  40. #define DCOND_LO 0x3
  41. #define DCOND_MI 0x4
  42. #define DCOND_PL 0x5
  43. #define DCOND_VS 0x6
  44. #define DCOND_VC 0x7
  45. #define DCOND_HI 0x8
  46. #define DCOND_LS 0x9
  47. #define DCOND_GE 0xa
  48. #define DCOND_LT 0xb
  49. #define DCOND_GT 0xc
  50. #define DCOND_LE 0xd
  51. #define DCOND_CS DCOND_LO
  52. #define DCOND_CC DCOND_HS
  53. // unified insn
  54. #define MIPS_INSN(op, rs, rt, rd, sa, fn) \
  55. (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|((fn)<<0))
  56. #define _ 0 // marker for "field unused"
  57. #define __(n) o##n // enum marker for "undefined"
  58. // opcode field (encoded in op)
  59. enum { OP__FN=000, OP__RT, OP_J, OP_JAL, OP_BEQ, OP_BNE, OP_BLEZ, OP_BGTZ };
  60. enum { OP_ADDI=010, OP_ADDIU, OP_SLTI, OP_SLTIU, OP_ANDI, OP_ORI, OP_XORI, OP_LUI };
  61. enum { OP_DADDI=030, OP_DADDIU, OP_LDL, OP_LDR, OP__FN2=034, OP__FN3=037 };
  62. enum { OP_LB=040, OP_LH, OP_LWL, OP_LW, OP_LBU, OP_LHU, OP_LWR, OP_LWU };
  63. enum { OP_SB=050, OP_SH, OP_SWL, OP_SW, OP_SDL, OP_SDR, OP_SWR };
  64. enum { OP_SD=067, OP_LD=077 };
  65. // function field (encoded in fn if opcode = OP__FN)
  66. enum { FN_SLL=000, __(01), FN_SRL, FN_SRA, FN_SLLV, __(05), FN_SRLV, FN_SRAV };
  67. enum { FN_JR=010, FN_JALR, FN_MOVZ, FN_MOVN, FN_SYNC=017 };
  68. enum { FN_MFHI=020, FN_MTHI, FN_MFLO, FN_MTLO, FN_DSSLV, __(25), FN_DSLRV, FN_DSRAV };
  69. enum { FN_MULT=030, FN_MULTU, FN_DIV, FN_DIVU, FN_DMULT, FN_DMULTU, FN_DDIV, FN_DDIVU };
  70. enum { FN_ADD=040, FN_ADDU, FN_SUB, FN_SUBU, FN_AND, FN_OR, FN_XOR, FN_NOR };
  71. enum { FN_SLT=052, FN_SLTU, FN_DADD, FN_DADDU, FN_DSUB, FN_DSUBU };
  72. enum { FN_DSLL=070, __(71), FN_DSRL, FN_DSRA, FN_DSLL32, __(75), FN_DSRL32, FN_DSRA32 };
  73. // function field (encoded in fn if opcode = OP__FN2)
  74. enum { FN2_MADD=000, FN2_MADDU, FN2_MUL, __(03), FN2_MSUB, FN2_MSUBU };
  75. enum { FN2_CLZ=040, FN2_CLO, FN2_DCLZ=044, FN2_DCLO };
  76. // function field (encoded in fn if opcode = OP__FN3)
  77. enum { FN3_EXT=000, FN3_DEXTM, FN3_DEXTU, FN3_DEXT, FN3_INS, FN3_DINSM, FN3_DINSU, FN3_DINS };
  78. enum { FN3_BSHFL=040, FN3_DBSHFL=044 };
  79. // rt field (encoded in rt if opcode = OP__RT)
  80. enum { RT_BLTZ=000, RT_BGEZ, RT_BLTZAL=020, RT_BGEZAL, RT_SYNCI=037 };
  81. // bit shuffle function (encoded in sa if function = FN3_BSHFL)
  82. enum { BS_SBH=002, BS_SHD=005, BS_SEB=020, BS_SEH=030 };
  83. // r (rotate) bit function (encoded in rs/sa if function = FN_SRL/FN_SRLV)
  84. enum { RB_SRL=0, RB_ROTR=1 };
  85. #define MIPS_NOP 000 // null operation: SLL r0, r0, #0
  86. // arithmetic/logical
  87. #define MIPS_OP_REG(op, sa, rd, rs, rt) \
  88. MIPS_INSN(OP__FN, rs, rt, rd, sa, op) // R-type, SPECIAL
  89. #define MIPS_OP2_REG(op, sa, rd, rs, rt) \
  90. MIPS_INSN(OP__FN2, rs, rt, rd, sa, op) // R-type, SPECIAL2
  91. #define MIPS_OP3_REG(op, sa, rd, rs, rt) \
  92. MIPS_INSN(OP__FN3, rs, rt, rd, sa, op) // R-type, SPECIAL3
  93. #define MIPS_OP_IMM(op, rt, rs, imm) \
  94. MIPS_INSN(op, rs, rt, _, _, (u16)(imm)) // I-type
  95. // rd = rs OP rt
  96. #define MIPS_ADD_REG(rd, rs, rt) \
  97. MIPS_OP_REG(FN_ADDU,_, rd, rs, rt)
  98. #define MIPS_DADD_REG(rd, rs, rt) \
  99. MIPS_OP_REG(FN_DADDU,_, rd, rs, rt)
  100. #define MIPS_SUB_REG(rd, rs, rt) \
  101. MIPS_OP_REG(FN_SUBU,_, rd, rs, rt)
  102. #define MIPS_DSUB_REG(rd, rs, rt) \
  103. MIPS_OP_REG(FN_DSUBU,_, rd, rs, rt)
  104. #define MIPS_NEG_REG(rd, rt) \
  105. MIPS_SUB_REG(rd, Z0, rt)
  106. #define MIPS_XOR_REG(rd, rs, rt) \
  107. MIPS_OP_REG(FN_XOR,_, rd, rs, rt)
  108. #define MIPS_OR_REG(rd, rs, rt) \
  109. MIPS_OP_REG(FN_OR,_, rd, rs, rt)
  110. #define MIPS_AND_REG(rd, rs, rt) \
  111. MIPS_OP_REG(FN_AND,_, rd, rs, rt)
  112. #define MIPS_NOR_REG(rd, rs, rt) \
  113. MIPS_OP_REG(FN_NOR,_, rd, rs, rt)
  114. #define MIPS_MOVE_REG(rd, rs) \
  115. MIPS_OR_REG(rd, rs, Z0)
  116. #define MIPS_MVN_REG(rd, rs) \
  117. MIPS_NOR_REG(rd, rs, Z0)
  118. // rd = rt SHIFT rs
  119. #define MIPS_LSL_REG(rd, rt, rs) \
  120. MIPS_OP_REG(FN_SLLV,_, rd, rs, rt)
  121. #define MIPS_LSR_REG(rd, rt, rs) \
  122. MIPS_OP_REG(FN_SRLV,RB_SRL, rd, rs, rt)
  123. #define MIPS_ASR_REG(rd, rt, rs) \
  124. MIPS_OP_REG(FN_SRAV,_, rd, rs, rt)
  125. #define MIPS_ROR_REG(rd, rt, rs) \
  126. MIPS_OP_REG(FN_SRLV,RB_ROTR, rd, rs, rt)
  127. #define MIPS_SEB_REG(rd, rt) \
  128. MIPS_OP3_REG(FN3_BSHFL, BS_SEB, rd, _, rt)
  129. #define MIPS_SEH_REG(rd, rt) \
  130. MIPS_OP3_REG(FN3_BSHFL, BS_SEH, rd, _, rt)
  131. #define MIPS_EXT_IMM(rt, rs, lsb, sz) \
  132. MIPS_OP3_REG(FN3_EXT, lsb, (sz)-1, rs, rt)
  133. #define MIPS_INS_IMM(rt, rs, lsb, sz) \
  134. MIPS_OP3_REG(FN3_INS, lsb, (lsb)+(sz)-1, rs, rt)
  135. // rd = (rs < rt)
  136. #define MIPS_SLT_REG(rd, rs, rt) \
  137. MIPS_OP_REG(FN_SLT,_, rd, rs, rt)
  138. #define MIPS_SLTU_REG(rd, rs, rt) \
  139. MIPS_OP_REG(FN_SLTU,_, rd, rs, rt)
  140. // rt = rs OP imm16
  141. #define MIPS_ADD_IMM(rt, rs, imm16) \
  142. MIPS_OP_IMM(OP_ADDIU, rt, rs, imm16)
  143. #define MIPS_DADD_IMM(rt, rs, imm16) \
  144. MIPS_OP_IMM(OP_DADDIU, rt, rs, imm16)
  145. #define MIPS_XOR_IMM(rt, rs, imm16) \
  146. MIPS_OP_IMM(OP_XORI, rt, rs, imm16)
  147. #define MIPS_OR_IMM(rt, rs, imm16) \
  148. MIPS_OP_IMM(OP_ORI, rt, rs, imm16)
  149. #define MIPS_AND_IMM(rt, rs, imm16) \
  150. MIPS_OP_IMM(OP_ANDI, rt, rs, imm16)
  151. // rt = (imm16 << (0|16))
  152. #define MIPS_MOV_IMM(rt, imm16) \
  153. MIPS_OP_IMM(OP_ORI, rt, Z0, imm16)
  154. #define MIPS_MOVT_IMM(rt, imm16) \
  155. MIPS_OP_IMM(OP_LUI, rt, _, imm16)
  156. // rd = rt SHIFT imm5
  157. #define MIPS_LSL_IMM(rd, rt, bits) \
  158. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_SLL)
  159. #define MIPS_LSR_IMM(rd, rt, bits) \
  160. MIPS_INSN(OP__FN, RB_SRL, rt, rd, bits, FN_SRL)
  161. #define MIPS_ASR_IMM(rd, rt, bits) \
  162. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_SRA)
  163. #define MIPS_ROR_IMM(rd, rt, bits) \
  164. MIPS_INSN(OP__FN, RB_ROTR, rt, rd, bits, FN_SRL)
  165. #define MIPS_DLSL_IMM(rd, rt, bits) \
  166. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_DSLL)
  167. #define MIPS_DLSL32_IMM(rd, rt, bits) \
  168. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_DSLL32)
  169. // rt = (rs < imm16)
  170. #define MIPS_SLT_IMM(rt, rs, imm16) \
  171. MIPS_OP_IMM(OP_SLTI, rt, rs, imm16)
  172. #define MIPS_SLTU_IMM(rt, rs, imm16) \
  173. MIPS_OP_IMM(OP_SLTIU, rt, rs, imm16)
  174. // multiplication
  175. #define MIPS_MULT(rt, rs) \
  176. MIPS_OP_REG(FN_MULT,_, _, rs, rt)
  177. #define MIPS_MULTU(rt, rs) \
  178. MIPS_OP_REG(FN_MULTU,_, _, rs, rt)
  179. #define MIPS_MADD(rt, rs) \
  180. MIPS_OP2_REG(FN_MADD,_, _, rs, rt)
  181. #define MIPS_MADDU(rt, rs) \
  182. MIPS_OP2_REG(FN_MADDU,_, _, rs, rt)
  183. #define MIPS_MFLO(rd) \
  184. MIPS_OP_REG(FN_MFLO,_, rd, _, _)
  185. #define MIPS_MFHI(rd) \
  186. MIPS_OP_REG(FN_MFHI,_, rd, _, _)
  187. // branching
  188. #define MIPS_J(abs26) \
  189. MIPS_INSN(OP_J, _,_,_,_, (abs26) >> 2) // J-type
  190. #define MIPS_JAL(abs26) \
  191. MIPS_INSN(OP_JAL, _,_,_,_, (abs26) >> 2)
  192. #define MIPS_JR(rs) \
  193. MIPS_OP_REG(FN_JR,_, _,rs,_)
  194. #define MIPS_JALR(rd, rs) \
  195. MIPS_OP_REG(FN_JALR,_, rd,rs,_)
  196. // conditional branches; no condition code, these compare rs against rt or Z0
  197. #define MIPS_BEQ (OP_BEQ << 5) // rs == rt (rt in lower 5 bits)
  198. #define MIPS_BNE (OP_BNE << 5) // rs != rt (ditto)
  199. #define MIPS_BLE (OP_BLEZ << 5) // rs <= 0
  200. #define MIPS_BGT (OP_BGTZ << 5) // rs > 0
  201. #define MIPS_BLT ((OP__RT << 5)|RT_BLTZ) // rs < 0
  202. #define MIPS_BGE ((OP__RT << 5)|RT_BGEZ) // rs >= 0
  203. #define MIPS_BGTL ((OP__RT << 5)|RT_BLTZAL) // rs > 0, link $ra if jumping
  204. #define MIPS_BGEL ((OP__RT << 5)|RT_BGEZAL) // rs >= 0, link $ra if jumping
  205. #define MIPS_BCOND(cond, rs, rt, offs16) \
  206. MIPS_OP_IMM((cond >> 5), rt, rs, (offs16) >> 2)
  207. #define MIPS_BCONDZ(cond, rs, offs16) \
  208. MIPS_OP_IMM((cond >> 5), (cond & 0x1f), rs, (offs16) >> 2)
  209. #define MIPS_B(offs16) \
  210. MIPS_BCONDZ(MIPS_BEQ, Z0, offs16)
  211. #define MIPS_BL(offs16) \
  212. MIPS_BCONDZ(MIPS_BGEL, Z0, offs16)
  213. // load/store indexed base
  214. #define MIPS_LD(rt, rs, offs16) \
  215. MIPS_OP_IMM(OP_LD, rt, rs, (u16)(offs16))
  216. #define MIPS_LW(rt, rs, offs16) \
  217. MIPS_OP_IMM(OP_LW, rt, rs, (u16)(offs16))
  218. #define MIPS_LH(rt, rs, offs16) \
  219. MIPS_OP_IMM(OP_LH, rt, rs, (u16)(offs16))
  220. #define MIPS_LB(rt, rs, offs16) \
  221. MIPS_OP_IMM(OP_LB, rt, rs, (u16)(offs16))
  222. #define MIPS_LHU(rt, rs, offs16) \
  223. MIPS_OP_IMM(OP_LHU, rt, rs, (u16)(offs16))
  224. #define MIPS_LBU(rt, rs, offs16) \
  225. MIPS_OP_IMM(OP_LBU, rt, rs, (u16)(offs16))
  226. #define MIPS_SD(rt, rs, offs16) \
  227. MIPS_OP_IMM(OP_SD, rt, rs, (u16)(offs16))
  228. #define MIPS_SW(rt, rs, offs16) \
  229. MIPS_OP_IMM(OP_SW, rt, rs, (u16)(offs16))
  230. #define MIPS_SH(rt, rs, offs16) \
  231. MIPS_OP_IMM(OP_SH, rt, rs, (u16)(offs16))
  232. #define MIPS_SB(rt, rs, offs16) \
  233. MIPS_OP_IMM(OP_SB, rt, rs, (u16)(offs16))
  234. // pointer operations
  235. #if _MIPS_SZPTR == 64
  236. #define OP_LP OP_LD
  237. #define OP_SP OP_SD
  238. #define OP_PADDIU OP_DADDIU
  239. #define FN_PADDU FN_DADDU
  240. #define FN_PSUBU FN_DSUBU
  241. #define PTR_SCALE 3
  242. #else
  243. #define OP_LP OP_LW
  244. #define OP_SP OP_SW
  245. #define OP_PADDIU OP_ADDIU
  246. #define FN_PADDU FN_ADDU
  247. #define FN_PSUBU FN_SUBU
  248. #define PTR_SCALE 2
  249. #endif
  250. #define PTR_SIZE (1<<PTR_SCALE)
  251. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  252. #define EMIT_PTR(ptr, x) \
  253. do { \
  254. *(u32 *)(ptr) = x; \
  255. ptr = (void *)((u8 *)(ptr) + sizeof(u32)); \
  256. } while (0)
  257. // FIFO for some instructions, for delay slot handling
  258. #define FSZ 4
  259. static u32 emith_last_insns[FSZ];
  260. static unsigned emith_last_idx, emith_last_cnt;
  261. #define EMIT_PUSHOP() \
  262. do { \
  263. if (emith_last_cnt > 0) { \
  264. u32 *p = (u32 *)tcache_ptr - emith_last_cnt; \
  265. int idx = (emith_last_idx - emith_last_cnt+1) %FSZ; \
  266. EMIT_PTR(p, emith_last_insns[idx]);\
  267. emith_last_cnt --; \
  268. } \
  269. } while (0)
  270. #define EMIT(op) \
  271. do { \
  272. if (emith_last_cnt >= FSZ) EMIT_PUSHOP(); \
  273. tcache_ptr = (void *)((u32 *)tcache_ptr + 1); \
  274. emith_last_idx = (emith_last_idx+1) %FSZ; \
  275. emith_last_insns[emith_last_idx] = op; \
  276. emith_last_cnt ++; \
  277. COUNT_OP; \
  278. } while (0)
  279. #define emith_flush() \
  280. do { \
  281. while (emith_last_cnt) EMIT_PUSHOP(); \
  282. emith_flg_hint = _FHV|_FHC; \
  283. } while (0)
  284. #define emith_insn_ptr() (u8 *)((u32 *)tcache_ptr - emith_last_cnt)
  285. // delay slot stuff
  286. static int emith_is_j(u32 op) // J, JAL
  287. { return ((op>>26) & 076) == OP_J; }
  288. static int emith_is_jr(u32 op) // JR, JALR
  289. { return (op>>26) == OP__FN && (op & 076) == FN_JR; }
  290. static int emith_is_b(u32 op) // B
  291. { return ((op>>26) & 074) == OP_BEQ ||
  292. ((op>>26) == OP__RT && ((op>>16) & 036) == RT_BLTZ); }
  293. // register usage for dependency evaluation XXX better do this as in emit_arm?
  294. static uint64_t emith_has_rs[5] = // OP__FN1-3, OP__RT, others
  295. { 0x005ffcffffda0fd2ULL, 0x0000003300000037ULL, 0x00000000000000ffULL,
  296. 0x800f5f0fUL, 0xf7ffffff0ff07ff0ULL };
  297. static uint64_t emith_has_rt[5] = // OP__FN1-3, OP__RT, others
  298. { 0xdd5ffcffffd00cddULL, 0x0000000000000037ULL, 0x0000001100000000ULL,
  299. 0x00000000UL, 0x80007f440c300030ULL };
  300. static uint64_t emith_has_rd[5] = // OP__FN1-3, OP__RT, others(rt instead of rd)
  301. { 0xdd00fcff00d50edfULL, 0x0000003300000004ULL, 0x08000011000000ffULL,
  302. 0x00000000UL, 0x119100ff0f00ff00ULL };
  303. #define emith_has_(rx,ix,op,sa,m) \
  304. (emith_has_##rx[ix] & (1ULL << (((op)>>(sa)) & (m))))
  305. static int emith_rs(u32 op)
  306. { if ((op>>26) == OP__FN)
  307. return emith_has_(rs,0,op, 0,0x3f) ? (op>>21)&0x1f : 0;
  308. if ((op>>26) == OP__FN2)
  309. return emith_has_(rs,1,op, 0,0x3f) ? (op>>21)&0x1f : 0;
  310. if ((op>>26) == OP__FN3)
  311. return emith_has_(rs,2,op, 0,0x3f) ? (op>>21)&0x1f : 0;
  312. if ((op>>26) == OP__RT)
  313. return emith_has_(rs,3,op,16,0x1f) ? (op>>21)&0x1f : 0;
  314. return emith_has_(rs,4,op,26,0x3f) ? (op>>21)&0x1f : 0;
  315. }
  316. static int emith_rt(u32 op)
  317. { if ((op>>26) == OP__FN)
  318. return emith_has_(rt,0,op, 0,0x3f) ? (op>>16)&0x1f : 0;
  319. if ((op>>26) == OP__FN2)
  320. return emith_has_(rt,1,op, 0,0x3f) ? (op>>16)&0x1f : 0;
  321. if ((op>>26) == OP__FN3)
  322. return emith_has_(rt,2,op, 0,0x3f) ? (op>>16)&0x1f : 0;
  323. if ((op>>26) == OP__RT)
  324. return 0;
  325. return emith_has_(rt,4,op,26,0x3f) ? (op>>16)&0x1f : 0;
  326. }
  327. static int emith_rd(u32 op)
  328. { int ret = emith_has_(rd,4,op,26,0x3f) ? (op>>16)&0x1f :-1;
  329. if ((op>>26) == OP__FN)
  330. ret = emith_has_(rd,0,op, 0,0x3f) ? (op>>11)&0x1f :-1;
  331. if ((op>>26) == OP__FN2)
  332. ret = emith_has_(rd,1,op, 0,0x3f) ? (op>>11)&0x1f :-1;
  333. if ((op>>26) == OP__FN3 && (op&0x3f) == FN3_BSHFL)
  334. ret = emith_has_(rd,2,op, 0,0x3f) ? (op>>11)&0x1f :-1;
  335. if ((op>>26) == OP__FN3 && (op&0x3f) != FN3_BSHFL)
  336. ret = emith_has_(rd,2,op, 0,0x3f) ? (op>>16)&0x1f :-1;
  337. if ((op>>26) == OP__RT)
  338. ret = -1;
  339. return (ret ?: -1); // Z0 doesn't have dependencies
  340. }
  341. static int emith_b_isswap(u32 bop, u32 lop)
  342. {
  343. if (emith_is_j(bop))
  344. return bop;
  345. else if (emith_is_jr(bop) && emith_rd(lop) != emith_rs(bop))
  346. return bop;
  347. else if (emith_is_b(bop) && emith_rd(lop) != emith_rs(bop) &&
  348. emith_rd(lop) != emith_rt(bop))
  349. if ((bop & 0xffff) != 0x7fff) // displacement overflow?
  350. return (bop & 0xffff0000) | ((bop+1) & 0x0000ffff);
  351. return 0;
  352. }
  353. static int emith_insn_swappable(u32 op1, u32 op2)
  354. {
  355. if (emith_rd(op1) != emith_rd(op2) &&
  356. emith_rs(op1) != emith_rd(op2) && emith_rt(op1) != emith_rd(op2) &&
  357. emith_rs(op2) != emith_rd(op1) && emith_rt(op2) != emith_rd(op1))
  358. return 1;
  359. return 0;
  360. }
  361. // emit branch, trying to fill the delay slot with one of the last insns
  362. static void *emith_branch(u32 op)
  363. {
  364. unsigned idx = emith_last_idx, ds = idx;
  365. u32 bop = 0, sop;
  366. void *bp;
  367. int i, j, s;
  368. // check for ds insn; older mustn't interact with newer ones to overtake
  369. for (i = 0; i < emith_last_cnt && !bop; i++) {
  370. ds = (idx-i)%FSZ;
  371. sop = emith_last_insns[ds];
  372. for (j = i, s = 1; j > 0 && s; j--)
  373. s = emith_insn_swappable(emith_last_insns[(ds+j)%FSZ], sop);
  374. if (s)
  375. bop = emith_b_isswap(op, sop);
  376. }
  377. // flush FIFO, but omit delay slot insn
  378. tcache_ptr = (void *)((u32 *)tcache_ptr - emith_last_cnt);
  379. idx = (idx-emith_last_cnt+1)%FSZ;
  380. for (i = emith_last_cnt; i > 0; i--, idx = (idx+1)%FSZ)
  381. if (!bop || idx != ds)
  382. EMIT_PTR(tcache_ptr, emith_last_insns[idx]);
  383. emith_last_cnt = 0;
  384. // emit branch and delay slot
  385. bp = tcache_ptr;
  386. if (bop) { // can swap
  387. EMIT_PTR(tcache_ptr, bop); COUNT_OP;
  388. EMIT_PTR(tcache_ptr, emith_last_insns[ds]);
  389. } else { // can't swap
  390. EMIT_PTR(tcache_ptr, op); COUNT_OP;
  391. EMIT_PTR(tcache_ptr, MIPS_NOP); COUNT_OP;
  392. }
  393. return bp;
  394. }
  395. // if-then-else conditional execution helpers
  396. #define JMP_POS(ptr) \
  397. ptr = emith_branch(MIPS_BCONDZ(cond_m, cond_r, 0));
  398. #define JMP_EMIT(cond, ptr) { \
  399. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr) - 4; \
  400. emith_flush(); /* prohibit delay slot switching across jump targets */ \
  401. EMIT_PTR(ptr, MIPS_BCONDZ(cond_m, cond_r, val_ & 0x0003ffff)); \
  402. }
  403. #define JMP_EMIT_NC(ptr) { \
  404. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr) - 4; \
  405. emith_flush(); \
  406. EMIT_PTR(ptr, MIPS_B(val_ & 0x0003ffff)); \
  407. }
  408. #define EMITH_JMP_START(cond) { \
  409. int cond_r, cond_m = emith_cond_check(cond, &cond_r); \
  410. u8 *cond_ptr; \
  411. JMP_POS(cond_ptr)
  412. #define EMITH_JMP_END(cond) \
  413. JMP_EMIT(cond, cond_ptr); \
  414. }
  415. #define EMITH_JMP3_START(cond) { \
  416. int cond_r, cond_m = emith_cond_check(cond, &cond_r); \
  417. u8 *cond_ptr, *else_ptr; \
  418. JMP_POS(cond_ptr)
  419. #define EMITH_JMP3_MID(cond) \
  420. JMP_POS(else_ptr); \
  421. JMP_EMIT(cond, cond_ptr);
  422. #define EMITH_JMP3_END() \
  423. JMP_EMIT_NC(else_ptr); \
  424. }
  425. // "simple" jump (no more than a few insns)
  426. // ARM32 will use conditional instructions here
  427. #define EMITH_SJMP_START EMITH_JMP_START
  428. #define EMITH_SJMP_END EMITH_JMP_END
  429. #define EMITH_SJMP3_START EMITH_JMP3_START
  430. #define EMITH_SJMP3_MID EMITH_JMP3_MID
  431. #define EMITH_SJMP3_END EMITH_JMP3_END
  432. #define EMITH_SJMP2_START(cond) \
  433. EMITH_SJMP3_START(cond)
  434. #define EMITH_SJMP2_MID(cond) \
  435. EMITH_SJMP3_MID(cond)
  436. #define EMITH_SJMP2_END(cond) \
  437. EMITH_SJMP3_END()
  438. // flag register emulation. this is modelled after arm/x86.
  439. // the FNZ register stores the result of the last flag setting operation for
  440. // N and Z flag, used for EQ,NE,MI,PL branches.
  441. // the FC register stores the C flag (used for HI,HS,LO,LS,CC,CS).
  442. // the FV register stores information for V flag calculation (used for
  443. // GT,GE,LT,LE,VC,VS). V flag is costly and only fully calculated when needed.
  444. // the core registers may be temp registers, since the condition after calls
  445. // is undefined anyway.
  446. // flag emulation creates 2 (ie cmp #0/beq) up to 9 (ie adcf/ble) extra insns.
  447. // flag handling shortcuts may reduce this by 1-4 insns, see emith_cond_check()
  448. static int emith_cmp_rs, emith_cmp_rt; // registers used in cmp_r_r/cmp_r_imm
  449. static s32 emith_cmp_imm; // immediate value used in cmp_r_imm
  450. enum { _FHC=1, _FHV=2 } emith_flg_hint; // C/V flag usage hinted by compiler
  451. static int emith_flg_noV; // V flag known not to be set
  452. #define EMITH_HINT_COND(cond) do { \
  453. /* only need to check cond>>1 since the lowest bit inverts the cond */ \
  454. unsigned _mv = BITMASK3(DCOND_VS>>1,DCOND_GE>>1,DCOND_GT>>1); \
  455. unsigned _mc = _mv | BITMASK2(DCOND_HS>>1,DCOND_HI>>1); \
  456. emith_flg_hint = (_mv & BITMASK1(cond >> 1) ? _FHV : 0); \
  457. emith_flg_hint |= (_mc & BITMASK1(cond >> 1) ? _FHC : 0); \
  458. } while (0)
  459. // store minimal cc information: rd, rt^rs, carry
  460. // NB: the result *must* first go to FNZ, in case rd == rs or rd == rt.
  461. // NB: for adcf and sbcf, carry-in must be dealt with separately (see there)
  462. static void emith_set_arith_flags(int rd, int rs, int rt, s32 imm, int sub)
  463. {
  464. if (emith_flg_hint & _FHC) {
  465. if (sub) // C = sub:rt<rd, add:rd<rt
  466. EMIT(MIPS_SLTU_REG(FC, rs, FNZ));
  467. else EMIT(MIPS_SLTU_REG(FC, FNZ, rs));// C in FC, bit 0
  468. }
  469. if (emith_flg_hint & _FHV) {
  470. emith_flg_noV = 0;
  471. if (rt > Z0) // Nt^Ns in FV, bit 31
  472. EMIT(MIPS_XOR_REG(FV, rs, rt));
  473. else if (rt == Z0 || imm == 0)
  474. emith_flg_noV = 1; // imm #0 can't overflow
  475. else if ((imm < 0) == !sub)
  476. EMIT(MIPS_NOR_REG(FV, rs, Z0));
  477. else if ((imm > 0) == !sub)
  478. EMIT(MIPS_XOR_REG(FV, rs, Z0));
  479. }
  480. // full V = Nd^Nt^Ns^C calculation is deferred until really needed
  481. if (rd && rd != FNZ)
  482. EMIT(MIPS_MOVE_REG(rd, FNZ)); // N,Z via result value in FNZ
  483. emith_cmp_rs = emith_cmp_rt = -1;
  484. }
  485. // since MIPS has less-than and compare-branch insns, handle cmp separately by
  486. // storing the involved regs for later use in one of those MIPS insns.
  487. // This works for all conditions but VC/VS, but this is fortunately never used.
  488. static void emith_set_compare_flags(int rs, int rt, s32 imm)
  489. {
  490. emith_cmp_rt = rt;
  491. emith_cmp_rs = rs;
  492. emith_cmp_imm = imm;
  493. }
  494. // data processing, register
  495. #define emith_move_r_r_ptr(d, s) \
  496. EMIT(MIPS_MOVE_REG(d, s))
  497. #define emith_move_r_r_ptr_c(cond, d, s) \
  498. emith_move_r_r_ptr(d, s)
  499. #define emith_move_r_r(d, s) \
  500. emith_move_r_r_ptr(d, s)
  501. #define emith_move_r_r_c(cond, d, s) \
  502. emith_move_r_r(d, s)
  503. #define emith_mvn_r_r(d, s) \
  504. EMIT(MIPS_MVN_REG(d, s))
  505. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  506. if (simm) { \
  507. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  508. EMIT(MIPS_OP_REG(FN_PADDU,_, d, s1, AT)); \
  509. } else EMIT(MIPS_OP_REG(FN_PADDU,_, d, s1, s2)); \
  510. } while (0)
  511. #define emith_add_r_r_r_lsl(d, s1, s2, simm) do { \
  512. if (simm) { \
  513. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  514. EMIT(MIPS_ADD_REG(d, s1, AT)); \
  515. } else EMIT(MIPS_ADD_REG(d, s1, s2)); \
  516. } while (0)
  517. #define emith_add_r_r_r_lsr(d, s1, s2, simm) do { \
  518. if (simm) { \
  519. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  520. EMIT(MIPS_ADD_REG(d, s1, AT)); \
  521. } else EMIT(MIPS_ADD_REG(d, s1, s2)); \
  522. } while (0)
  523. #define emith_addf_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  524. if (simm) { \
  525. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  526. EMIT(MIPS_OP_REG(FN_PADDU,_, FNZ, s1, AT)); \
  527. emith_set_arith_flags(d, s1, AT, 0, 0); \
  528. } else { \
  529. EMIT(MIPS_OP_REG(FN_PADDU,_, FNZ, s1, s2)); \
  530. emith_set_arith_flags(d, s1, s2, 0, 0); \
  531. } \
  532. } while (0)
  533. #define emith_addf_r_r_r_lsl(d, s1, s2, simm) do { \
  534. if (simm) { \
  535. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  536. EMIT(MIPS_ADD_REG(FNZ, s1, AT)); \
  537. emith_set_arith_flags(d, s1, AT, 0, 0); \
  538. } else { \
  539. EMIT(MIPS_ADD_REG(FNZ, s1, s2)); \
  540. emith_set_arith_flags(d, s1, s2, 0, 0); \
  541. } \
  542. } while (0)
  543. #define emith_addf_r_r_r_lsr(d, s1, s2, simm) do { \
  544. if (simm) { \
  545. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  546. EMIT(MIPS_ADD_REG(FNZ, s1, AT)); \
  547. emith_set_arith_flags(d, s1, AT, 0, 0); \
  548. } else { \
  549. EMIT(MIPS_ADD_REG(FNZ, s1, s2)); \
  550. emith_set_arith_flags(d, s1, s2, 0, 0); \
  551. } \
  552. } while (0)
  553. #define emith_sub_r_r_r_lsl(d, s1, s2, simm) do { \
  554. if (simm) { \
  555. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  556. EMIT(MIPS_SUB_REG(d, s1, AT)); \
  557. } else EMIT(MIPS_SUB_REG(d, s1, s2)); \
  558. } while (0)
  559. #define emith_subf_r_r_r_lsl(d, s1, s2, simm) do { \
  560. if (simm) { \
  561. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  562. EMIT(MIPS_SUB_REG(FNZ, s1, AT)); \
  563. emith_set_arith_flags(d, s1, AT, 0, 1); \
  564. } else { \
  565. EMIT(MIPS_SUB_REG(FNZ, s1, s2)); \
  566. emith_set_arith_flags(d, s1, s2, 0, 1); \
  567. } \
  568. } while (0)
  569. #define emith_or_r_r_r_lsl(d, s1, s2, simm) do { \
  570. if (simm) { \
  571. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  572. EMIT(MIPS_OR_REG(d, s1, AT)); \
  573. } else EMIT(MIPS_OR_REG(d, s1, s2)); \
  574. } while (0)
  575. #define emith_or_r_r_r_lsr(d, s1, s2, simm) do { \
  576. if (simm) { \
  577. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  578. EMIT(MIPS_OR_REG(d, s1, AT)); \
  579. } else EMIT(MIPS_OR_REG(d, s1, s2)); \
  580. } while (0)
  581. #define emith_eor_r_r_r_lsl(d, s1, s2, simm) do { \
  582. if (simm) { \
  583. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  584. EMIT(MIPS_XOR_REG(d, s1, AT)); \
  585. } else EMIT(MIPS_XOR_REG(d, s1, s2)); \
  586. } while (0)
  587. #define emith_eor_r_r_r_lsr(d, s1, s2, simm) do { \
  588. if (simm) { \
  589. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  590. EMIT(MIPS_XOR_REG(d, s1, AT)); \
  591. } else EMIT(MIPS_XOR_REG(d, s1, s2)); \
  592. } while (0)
  593. #define emith_and_r_r_r_lsl(d, s1, s2, simm) do { \
  594. if (simm) { \
  595. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  596. EMIT(MIPS_AND_REG(d, s1, AT)); \
  597. } else EMIT(MIPS_AND_REG(d, s1, s2)); \
  598. } while (0)
  599. #define emith_or_r_r_lsl(d, s, lslimm) \
  600. emith_or_r_r_r_lsl(d, d, s, lslimm)
  601. #define emith_or_r_r_lsr(d, s, lsrimm) \
  602. emith_or_r_r_r_lsr(d, d, s, lsrimm)
  603. #define emith_eor_r_r_lsl(d, s, lslimm) \
  604. emith_eor_r_r_r_lsl(d, d, s, lslimm)
  605. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  606. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  607. #define emith_add_r_r_r(d, s1, s2) \
  608. emith_add_r_r_r_lsl(d, s1, s2, 0)
  609. #define emith_addf_r_r_r_ptr(d, s1, s2) \
  610. emith_addf_r_r_r_lsl_ptr(d, s1, s2, 0)
  611. #define emith_addf_r_r_r(d, s1, s2) \
  612. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  613. #define emith_sub_r_r_r(d, s1, s2) \
  614. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  615. #define emith_subf_r_r_r(d, s1, s2) \
  616. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  617. #define emith_or_r_r_r(d, s1, s2) \
  618. emith_or_r_r_r_lsl(d, s1, s2, 0)
  619. #define emith_eor_r_r_r(d, s1, s2) \
  620. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  621. #define emith_and_r_r_r(d, s1, s2) \
  622. emith_and_r_r_r_lsl(d, s1, s2, 0)
  623. #define emith_add_r_r_ptr(d, s) \
  624. emith_add_r_r_r_lsl_ptr(d, d, s, 0)
  625. #define emith_add_r_r(d, s) \
  626. emith_add_r_r_r(d, d, s)
  627. #define emith_sub_r_r(d, s) \
  628. emith_sub_r_r_r(d, d, s)
  629. #define emith_neg_r_r(d, s) \
  630. EMIT(MIPS_NEG_REG(d, s))
  631. #define emith_adc_r_r_r(d, s1, s2) do { \
  632. emith_add_r_r_r(AT, s2, FC); \
  633. emith_add_r_r_r(d, s1, AT); \
  634. } while (0)
  635. #define emith_sbc_r_r_r(d, s1, s2) do { \
  636. emith_add_r_r_r(AT, s2, FC); \
  637. emith_sub_r_r_r(d, s1, AT); \
  638. } while (0)
  639. #define emith_adc_r_r(d, s) \
  640. emith_adc_r_r_r(d, d, s)
  641. #define emith_negc_r_r(d, s) \
  642. emith_sbc_r_r_r(d, Z0, s)
  643. // NB: the incoming carry Cin can cause Cout if s2+Cin=0 (or s1+Cin=0 FWIW)
  644. // moreover, if s2+Cin=0 caused Cout, s1+s2+Cin=s1+0 can't cause another Cout
  645. #define emith_adcf_r_r_r(d, s1, s2) do { \
  646. emith_add_r_r_r(FNZ, s2, FC); \
  647. EMIT(MIPS_SLTU_REG(AT, FNZ, FC)); \
  648. emith_add_r_r_r(FNZ, s1, FNZ); \
  649. emith_set_arith_flags(d, s1, s2, 0, 0); \
  650. emith_or_r_r(FC, AT); \
  651. } while (0)
  652. #define emith_sbcf_r_r_r(d, s1, s2) do { \
  653. emith_add_r_r_r(FNZ, s2, FC); \
  654. EMIT(MIPS_SLTU_REG(AT, FNZ, FC)); \
  655. emith_sub_r_r_r(FNZ, s1, FNZ); \
  656. emith_set_arith_flags(d, s1, s2, 0, 1); \
  657. emith_or_r_r(FC, AT); \
  658. } while (0)
  659. #define emith_and_r_r(d, s) \
  660. emith_and_r_r_r(d, d, s)
  661. #define emith_and_r_r_c(cond, d, s) \
  662. emith_and_r_r(d, s)
  663. #define emith_or_r_r(d, s) \
  664. emith_or_r_r_r(d, d, s)
  665. #define emith_eor_r_r(d, s) \
  666. emith_eor_r_r_r(d, d, s)
  667. #define emith_tst_r_r_ptr(d, s) do { \
  668. if (d != s) { \
  669. emith_and_r_r_r(FNZ, d, s); \
  670. emith_cmp_rs = emith_cmp_rt = -1; \
  671. } else emith_cmp_rs = s, emith_cmp_rt = Z0; \
  672. } while (0)
  673. #define emith_tst_r_r(d, s) \
  674. emith_tst_r_r_ptr(d, s)
  675. #define emith_teq_r_r(d, s) do { \
  676. emith_eor_r_r_r(FNZ, d, s); \
  677. emith_cmp_rs = emith_cmp_rt = -1; \
  678. } while (0)
  679. #define emith_cmp_r_r(d, s) \
  680. emith_set_compare_flags(d, s, 0)
  681. // emith_subf_r_r_r(FNZ, d, s)
  682. #define emith_addf_r_r(d, s) \
  683. emith_addf_r_r_r(d, d, s)
  684. #define emith_subf_r_r(d, s) \
  685. emith_subf_r_r_r(d, d, s)
  686. #define emith_adcf_r_r(d, s) \
  687. emith_adcf_r_r_r(d, d, s)
  688. #define emith_sbcf_r_r(d, s) \
  689. emith_sbcf_r_r_r(d, d, s)
  690. #define emith_negcf_r_r(d, s) \
  691. emith_sbcf_r_r_r(d, Z0, s)
  692. // move immediate
  693. static void emith_move_imm(int r, uintptr_t imm)
  694. {
  695. #if _MIPS_SZPTR == 64
  696. if ((s32)imm != imm) {
  697. emith_move_imm(r, imm >> 32);
  698. if (imm & 0xffff0000) {
  699. EMIT(MIPS_DLSL_IMM(r, r, 16));
  700. EMIT(MIPS_OR_IMM(r, r, (imm >> 16) & 0xffff));
  701. EMIT(MIPS_DLSL_IMM(r, r, 16));
  702. } else EMIT(MIPS_DLSL32_IMM(r, r, 0));
  703. if (imm & 0x0000ffff)
  704. EMIT(MIPS_OR_IMM(r, r, imm & 0xffff));
  705. } else
  706. #endif
  707. if ((s16)imm == imm) {
  708. EMIT(MIPS_ADD_IMM(r, Z0, imm));
  709. } else if (!((u32)imm >> 16)) {
  710. EMIT(MIPS_OR_IMM(r, Z0, imm));
  711. } else {
  712. int s = Z0;
  713. if ((u32)imm >> 16) {
  714. EMIT(MIPS_MOVT_IMM(r, (u32)imm >> 16));
  715. s = r;
  716. }
  717. if ((u16)imm)
  718. EMIT(MIPS_OR_IMM(r, s, (u16)imm));
  719. }
  720. }
  721. #define emith_move_r_ptr_imm(r, imm) \
  722. emith_move_imm(r, (uintptr_t)(imm))
  723. #define emith_move_r_imm(r, imm) \
  724. emith_move_imm(r, (u32)(imm))
  725. #define emith_move_r_imm_c(cond, r, imm) \
  726. emith_move_r_imm(r, imm)
  727. #define emith_move_r_imm_s8_patchable(r, imm) \
  728. EMIT(MIPS_ADD_IMM(r, Z0, (s8)(imm)))
  729. #define emith_move_r_imm_s8_patch(ptr, imm) do { \
  730. u32 *ptr_ = (u32 *)ptr; \
  731. while (*ptr_ >> 26 != OP_ADDIU) ptr_++; \
  732. EMIT_PTR(ptr_, (*ptr_ & 0xffff0000) | (u16)(s8)(imm)); \
  733. } while (0)
  734. // arithmetic, immediate - can only be ADDI[U], since SUBI[U] doesn't exist
  735. static void emith_add_imm(int ptr, int rd, int rs, u32 imm)
  736. {
  737. if ((s16)imm == imm) {
  738. if (imm || rd != rs)
  739. EMIT(MIPS_OP_IMM(ptr ? OP_PADDIU:OP_ADDIU, rd,rs,imm));
  740. } else if ((s32)imm < 0) {
  741. emith_move_r_imm(AT, -imm);
  742. EMIT(MIPS_OP_REG((ptr ? FN_PSUBU:FN_SUBU),_, rd,rs,AT));
  743. } else {
  744. emith_move_r_imm(AT, imm);
  745. EMIT(MIPS_OP_REG((ptr ? FN_PADDU:FN_ADDU),_, rd,rs,AT));
  746. }
  747. }
  748. #define emith_add_r_imm(r, imm) \
  749. emith_add_r_r_imm(r, r, imm)
  750. #define emith_add_r_imm_c(cond, r, imm) \
  751. emith_add_r_imm(r, imm)
  752. #define emith_addf_r_imm(r, imm) \
  753. emith_addf_r_r_imm(r, imm)
  754. #define emith_sub_r_imm(r, imm) \
  755. emith_sub_r_r_imm(r, r, imm)
  756. #define emith_sub_r_imm_c(cond, r, imm) \
  757. emith_sub_r_imm(r, imm)
  758. #define emith_subf_r_imm(r, imm) \
  759. emith_subf_r_r_imm(r, r, imm)
  760. #define emith_adc_r_imm(r, imm) \
  761. emith_adc_r_r_imm(r, r, imm)
  762. #define emith_adcf_r_imm(r, imm) \
  763. emith_adcf_r_r_imm(r, r, imm)
  764. #define emith_cmp_r_imm(r, imm) \
  765. emith_set_compare_flags(r, -1, imm)
  766. // emith_subf_r_r_imm(FNZ, r, (s16)imm)
  767. #define emith_add_r_r_ptr_imm(d, s, imm) \
  768. emith_add_imm(1, d, s, imm)
  769. #define emith_add_r_r_imm(d, s, imm) \
  770. emith_add_imm(0, d, s, imm)
  771. #define emith_addf_r_r_imm(d, s, imm) do { \
  772. emith_add_r_r_imm(FNZ, s, imm); \
  773. emith_set_arith_flags(d, s, -1, imm, 0); \
  774. } while (0)
  775. #define emith_adc_r_r_imm(d, s, imm) do { \
  776. emith_add_r_r_r(AT, s, FC); \
  777. emith_add_r_r_imm(d, AT, imm); \
  778. } while (0)
  779. #define emith_adcf_r_r_imm(d, s, imm) do { \
  780. if (imm == 0) { \
  781. emith_add_r_r_r(FNZ, s, FC); \
  782. emith_set_arith_flags(d, s, -1, 1, 0); \
  783. } else { \
  784. emith_add_r_r_r(FNZ, s, FC); \
  785. EMIT(MIPS_SLTU_REG(AT, FNZ, FC)); \
  786. emith_add_r_r_imm(FNZ, FNZ, imm); \
  787. emith_set_arith_flags(d, s, -1, imm, 0); \
  788. emith_or_r_r(FC, AT); \
  789. } \
  790. } while (0)
  791. // NB: no SUBI in MIPS II, since ADDI takes a signed imm
  792. #define emith_sub_r_r_imm(d, s, imm) \
  793. emith_add_r_r_imm(d, s, -(imm))
  794. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  795. emith_sub_r_r_imm(d, s, imm)
  796. #define emith_subf_r_r_imm(d, s, imm) do { \
  797. emith_sub_r_r_imm(FNZ, s, imm); \
  798. emith_set_arith_flags(d, s, -1, imm, 1); \
  799. } while (0)
  800. // logical, immediate
  801. static void emith_log_imm(int op, int rd, int rs, u32 imm)
  802. {
  803. if (imm >> 16) {
  804. emith_move_r_imm(AT, imm);
  805. EMIT(MIPS_OP_REG(FN_AND + (op-OP_ANDI),_, rd, rs, AT));
  806. } else if (op == OP_ANDI || imm || rd != rs)
  807. EMIT(MIPS_OP_IMM(op, rd, rs, imm));
  808. }
  809. #define emith_and_r_imm(r, imm) \
  810. emith_log_imm(OP_ANDI, r, r, imm)
  811. #define emith_or_r_imm(r, imm) \
  812. emith_log_imm(OP_ORI, r, r, imm)
  813. #define emith_or_r_imm_c(cond, r, imm) \
  814. emith_or_r_imm(r, imm)
  815. #define emith_eor_r_imm_ptr(r, imm) \
  816. emith_log_imm(OP_XORI, r, r, imm)
  817. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  818. emith_eor_r_imm_ptr(r, imm)
  819. #define emith_eor_r_imm(r, imm) \
  820. emith_eor_r_imm_ptr(r, imm)
  821. #define emith_eor_r_imm_c(cond, r, imm) \
  822. emith_eor_r_imm(r, imm)
  823. /* NB: BIC #imm not available in MIPS; use AND #~imm instead */
  824. #define emith_bic_r_imm(r, imm) \
  825. emith_log_imm(OP_ANDI, r, r, ~(imm))
  826. #define emith_bic_r_imm_c(cond, r, imm) \
  827. emith_bic_r_imm(r, imm)
  828. #define emith_tst_r_imm(r, imm) do { \
  829. emith_log_imm(OP_ANDI, FNZ, r, imm); \
  830. emith_cmp_rs = emith_cmp_rt = -1; \
  831. } while (0)
  832. #define emith_tst_r_imm_c(cond, r, imm) \
  833. emith_tst_r_imm(r, imm)
  834. #define emith_and_r_r_imm(d, s, imm) \
  835. emith_log_imm(OP_ANDI, d, s, imm)
  836. #define emith_or_r_r_imm(d, s, imm) \
  837. emith_log_imm(OP_ORI, d, s, imm)
  838. #define emith_eor_r_r_imm(d, s, imm) \
  839. emith_log_imm(OP_XORI, d, s, imm)
  840. // shift
  841. #define emith_lsl(d, s, cnt) \
  842. EMIT(MIPS_LSL_IMM(d, s, cnt))
  843. #define emith_lsr(d, s, cnt) \
  844. EMIT(MIPS_LSR_IMM(d, s, cnt))
  845. #define emith_asr(d, s, cnt) \
  846. EMIT(MIPS_ASR_IMM(d, s, cnt))
  847. #define emith_ror(d, s, cnt) do { \
  848. if (__mips_isa_rev < 2) { \
  849. EMIT(MIPS_LSL_IMM(AT, s, 32-(cnt))); \
  850. EMIT(MIPS_LSR_IMM(d, s, cnt)); \
  851. EMIT(MIPS_OR_REG(d, d, AT)); \
  852. } else EMIT(MIPS_ROR_IMM(d, s, cnt)); \
  853. } while (0)
  854. #define emith_ror_c(cond, d, s, cnt) \
  855. emith_ror(d, s, cnt)
  856. #define emith_rol(d, s, cnt) do { \
  857. if (__mips_isa_rev < 2) { \
  858. EMIT(MIPS_LSR_IMM(AT, s, 32-(cnt))); \
  859. EMIT(MIPS_LSL_IMM(d, s, cnt)); \
  860. EMIT(MIPS_OR_REG(d, d, AT)); \
  861. } else EMIT(MIPS_ROR_IMM(d, s, 32-(cnt))); \
  862. } while (0)
  863. #define emith_rorc(d) do { \
  864. emith_lsr(d, d, 1); \
  865. emith_lsl(AT, FC, 31); \
  866. emith_or_r_r(d, AT); \
  867. } while (0)
  868. #define emith_rolc(d) do { \
  869. emith_lsl(d, d, 1); \
  870. emith_or_r_r(d, FC); \
  871. } while (0)
  872. // NB: all flag setting shifts make V undefined
  873. #define emith_lslf(d, s, cnt) do { \
  874. int _s = s; \
  875. if ((cnt) > 1) { \
  876. emith_lsl(d, s, cnt-1); \
  877. _s = d; \
  878. } \
  879. if ((cnt) > 0) { \
  880. emith_lsr(FC, _s, 31); \
  881. emith_lsl(d, _s, 1); \
  882. } \
  883. emith_move_r_r(FNZ, d); \
  884. emith_cmp_rs = emith_cmp_rt = -1; \
  885. } while (0)
  886. #define emith_lsrf(d, s, cnt) do { \
  887. int _s = s; \
  888. if ((cnt) > 1) { \
  889. emith_lsr(d, s, cnt-1); \
  890. _s = d; \
  891. } \
  892. if ((cnt) > 0) { \
  893. emith_and_r_r_imm(FC, _s, 1); \
  894. emith_lsr(d, _s, 1); \
  895. } \
  896. emith_move_r_r(FNZ, d); \
  897. emith_cmp_rs = emith_cmp_rt = -1; \
  898. } while (0)
  899. #define emith_asrf(d, s, cnt) do { \
  900. int _s = s; \
  901. if ((cnt) > 1) { \
  902. emith_asr(d, s, cnt-1); \
  903. _s = d; \
  904. } \
  905. if ((cnt) > 0) { \
  906. emith_and_r_r_imm(FC, _s, 1); \
  907. emith_asr(d, _s, 1); \
  908. } \
  909. emith_move_r_r(FNZ, d); \
  910. emith_cmp_rs = emith_cmp_rt = -1; \
  911. } while (0)
  912. #define emith_rolf(d, s, cnt) do { \
  913. emith_rol(d, s, cnt); \
  914. emith_and_r_r_imm(FC, d, 1); \
  915. emith_move_r_r(FNZ, d); \
  916. emith_cmp_rs = emith_cmp_rt = -1; \
  917. } while (0)
  918. #define emith_rorf(d, s, cnt) do { \
  919. emith_ror(d, s, cnt); \
  920. emith_lsr(FC, d, 31); \
  921. emith_move_r_r(FNZ, d); \
  922. emith_cmp_rs = emith_cmp_rt = -1; \
  923. } while (0)
  924. #define emith_rolcf(d) do { \
  925. emith_lsr(AT, d, 31); \
  926. emith_lsl(d, d, 1); \
  927. emith_or_r_r(d, FC); \
  928. emith_move_r_r(FC, AT); \
  929. emith_move_r_r(FNZ, d); \
  930. emith_cmp_rs = emith_cmp_rt = -1; \
  931. } while (0)
  932. #define emith_rorcf(d) do { \
  933. emith_and_r_r_imm(AT, d, 1); \
  934. emith_lsr(d, d, 1); \
  935. emith_lsl(FC, FC, 31); \
  936. emith_or_r_r(d, FC); \
  937. emith_move_r_r(FC, AT); \
  938. emith_move_r_r(FNZ, d); \
  939. emith_cmp_rs = emith_cmp_rt = -1; \
  940. } while (0)
  941. // signed/unsigned extend
  942. #define emith_clear_msb(d, s, count) /* bits to clear */ do { \
  943. u32 t; \
  944. if (__mips_isa_rev >= 2) \
  945. EMIT(MIPS_EXT_IMM(d, s, 0, 32-(count))); \
  946. else if ((count) >= 16) { \
  947. t = (count) - 16; \
  948. t = 0xffff >> t; \
  949. emith_and_r_r_imm(d, s, t); \
  950. } else { \
  951. emith_lsl(d, s, count); \
  952. emith_lsr(d, d, count); \
  953. } \
  954. } while (0)
  955. #define emith_clear_msb_c(cond, d, s, count) \
  956. emith_clear_msb(d, s, count)
  957. #define emith_sext(d, s, count) /* bits to keep */ do { \
  958. if (__mips_isa_rev >= 2 && count == 8) \
  959. EMIT(MIPS_SEB_REG(d, s)); \
  960. else if (__mips_isa_rev >= 2 && count == 16) \
  961. EMIT(MIPS_SEH_REG(d, s)); \
  962. else { \
  963. emith_lsl(d, s, 32-(count)); \
  964. emith_asr(d, d, 32-(count)); \
  965. } \
  966. } while (0)
  967. // multiply Rd = Rn*Rm (+ Ra); NB: next 2 insns after MFLO/MFHI mustn't be MULT
  968. static u8 *last_lohi;
  969. static void emith_lohi_nops(void)
  970. {
  971. u32 d;
  972. while ((d = (u8 *)tcache_ptr - last_lohi) < 8 && d >= 0) EMIT(MIPS_NOP);
  973. }
  974. #define emith_mul(d, s1, s2) do { \
  975. emith_lohi_nops(); \
  976. EMIT(MIPS_MULTU(s1, s2)); \
  977. EMIT(MIPS_MFLO(d)); \
  978. last_lohi = (u8 *)tcache_ptr; \
  979. } while (0)
  980. #define emith_mul_u64(dlo, dhi, s1, s2) do { \
  981. emith_lohi_nops(); \
  982. EMIT(MIPS_MULTU(s1, s2)); \
  983. EMIT(MIPS_MFLO(dlo)); \
  984. EMIT(MIPS_MFHI(dhi)); \
  985. last_lohi = (u8 *)tcache_ptr; \
  986. } while (0)
  987. #define emith_mul_s64(dlo, dhi, s1, s2) do { \
  988. emith_lohi_nops(); \
  989. EMIT(MIPS_MULT(s1, s2)); \
  990. EMIT(MIPS_MFLO(dlo)); \
  991. EMIT(MIPS_MFHI(dhi)); \
  992. last_lohi = (u8 *)tcache_ptr; \
  993. } while (0)
  994. #define emith_mula_s64(dlo, dhi, s1, s2) do { \
  995. int t_ = rcache_get_tmp(); \
  996. emith_lohi_nops(); \
  997. EMIT(MIPS_MULT(s1, s2)); \
  998. EMIT(MIPS_MFLO(AT)); \
  999. EMIT(MIPS_MFHI(t_)); \
  1000. last_lohi = (u8 *)tcache_ptr; \
  1001. emith_add_r_r(dlo, AT); \
  1002. EMIT(MIPS_SLTU_REG(AT, dlo, AT)); \
  1003. emith_add_r_r(dhi, AT); \
  1004. emith_add_r_r(dhi, t_); \
  1005. rcache_free_tmp(t_); \
  1006. } while (0)
  1007. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  1008. emith_mula_s64(dlo, dhi, s1, s2)
  1009. // load/store. offs has 16 bits signed, which is currently sufficient
  1010. #define emith_read_r_r_offs_ptr(r, rs, offs) \
  1011. EMIT(MIPS_OP_IMM(OP_LP, r, rs, offs))
  1012. #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
  1013. emith_read_r_r_offs_ptr(r, rs, offs)
  1014. #define emith_read_r_r_offs(r, rs, offs) \
  1015. EMIT(MIPS_LW(r, rs, offs))
  1016. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  1017. emith_read_r_r_offs(r, rs, offs)
  1018. #define emith_read_r_r_r_ptr(r, rs, rm) do { \
  1019. emith_add_r_r_r(AT, rs, rm); \
  1020. EMIT(MIPS_OP_IMM(OP_LP, r, AT, 0)); \
  1021. } while (0)
  1022. #define emith_read_r_r_r(r, rs, rm) do { \
  1023. emith_add_r_r_r(AT, rs, rm); \
  1024. EMIT(MIPS_LW(r, AT, 0)); \
  1025. } while (0)
  1026. #define emith_read_r_r_r_c(cond, r, rs, rm) \
  1027. emith_read_r_r_r(r, rs, rm)
  1028. #define emith_read8_r_r_offs(r, rs, offs) \
  1029. EMIT(MIPS_LBU(r, rs, offs))
  1030. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  1031. emith_read8_r_r_offs(r, rs, offs)
  1032. #define emith_read8_r_r_r(r, rs, rm) do { \
  1033. emith_add_r_r_r(AT, rs, rm); \
  1034. EMIT(MIPS_LBU(r, AT, 0)); \
  1035. } while (0)
  1036. #define emith_read8_r_r_r_c(cond, r, rs, rm) \
  1037. emith_read8_r_r_r(r, rs, rm)
  1038. #define emith_read16_r_r_offs(r, rs, offs) \
  1039. EMIT(MIPS_LHU(r, rs, offs))
  1040. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  1041. emith_read16_r_r_offs(r, rs, offs)
  1042. #define emith_read16_r_r_r(r, rs, rm) do { \
  1043. emith_add_r_r_r(AT, rs, rm); \
  1044. EMIT(MIPS_LHU(r, AT, 0)); \
  1045. } while (0)
  1046. #define emith_read16_r_r_r_c(cond, r, rs, rm) \
  1047. emith_read16_r_r_r(r, rs, rm)
  1048. #define emith_read8s_r_r_offs(r, rs, offs) \
  1049. EMIT(MIPS_LB(r, rs, offs))
  1050. #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
  1051. emith_read8s_r_r_offs(r, rs, offs)
  1052. #define emith_read8s_r_r_r(r, rs, rm) do { \
  1053. emith_add_r_r_r(AT, rs, rm); \
  1054. EMIT(MIPS_LB(r, AT, 0)); \
  1055. } while (0)
  1056. #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
  1057. emith_read8s_r_r_r(r, rs, rm)
  1058. #define emith_read16s_r_r_offs(r, rs, offs) \
  1059. EMIT(MIPS_LH(r, rs, offs))
  1060. #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
  1061. emith_read16s_r_r_offs(r, rs, offs)
  1062. #define emith_read16s_r_r_r(r, rs, rm) do { \
  1063. emith_add_r_r_r(AT, rs, rm); \
  1064. EMIT(MIPS_LH(r, AT, 0)); \
  1065. } while (0)
  1066. #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
  1067. emith_read16s_r_r_r(r, rs, rm)
  1068. #define emith_write_r_r_offs_ptr(r, rs, offs) \
  1069. EMIT(MIPS_OP_IMM(OP_SP, r, rs, offs))
  1070. #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
  1071. emith_write_r_r_offs_ptr(r, rs, offs)
  1072. #define emith_write_r_r_r_ptr(r, rs, rm) do { \
  1073. emith_add_r_r_r(AT, rs, rm); \
  1074. EMIT(MIPS_OP_IMM(OP_SP, r, AT, 0)); \
  1075. } while (0)
  1076. #define emith_write_r_r_r_ptr_c(cond, r, rs, rm) \
  1077. emith_write_r_r_r_ptr(r, rs, rm)
  1078. #define emith_write_r_r_offs(r, rs, offs) \
  1079. EMIT(MIPS_SW(r, rs, offs))
  1080. #define emith_write_r_r_offs_c(cond, r, rs, offs) \
  1081. emith_write_r_r_offs(r, rs, offs)
  1082. #define emith_write_r_r_r(r, rs, rm) do { \
  1083. emith_add_r_r_r(AT, rs, rm); \
  1084. EMIT(MIPS_SW(r, AT, 0)); \
  1085. } while (0)
  1086. #define emith_write_r_r_r_c(cond, r, rs, rm) \
  1087. emith_write_r_r_r(r, rs, rm)
  1088. #define emith_ctx_read_ptr(r, offs) \
  1089. emith_read_r_r_offs_ptr(r, CONTEXT_REG, offs)
  1090. #define emith_ctx_read(r, offs) \
  1091. emith_read_r_r_offs(r, CONTEXT_REG, offs)
  1092. #define emith_ctx_read_c(cond, r, offs) \
  1093. emith_ctx_read(r, offs)
  1094. #define emith_ctx_write_ptr(r, offs) \
  1095. emith_write_r_r_offs_ptr(r, CONTEXT_REG, offs)
  1096. #define emith_ctx_write(r, offs) \
  1097. emith_write_r_r_offs(r, CONTEXT_REG, offs)
  1098. #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
  1099. int r_ = r, offs_ = offs, cnt_ = cnt; \
  1100. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  1101. emith_ctx_read(r_, offs_); \
  1102. } while (0)
  1103. #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
  1104. int r_ = r, offs_ = offs, cnt_ = cnt; \
  1105. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  1106. emith_ctx_write(r_, offs_); \
  1107. } while (0)
  1108. // function call handling
  1109. #define emith_save_caller_regs(mask) do { \
  1110. int _c; u32 _m = mask & 0x300fffc; /* r2-r15,r24-r25 */ \
  1111. if (__builtin_parity(_m) == 1) _m |= 0x1; /* ABI align */ \
  1112. int _s = count_bits(_m) * 4, _o = _s; \
  1113. if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
  1114. for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  1115. if (_m & (1 << _c)) \
  1116. { _o -= 4; if (_c) emith_write_r_r_offs(_c, SP, _o); } \
  1117. } while (0)
  1118. #define emith_restore_caller_regs(mask) do { \
  1119. int _c; u32 _m = mask & 0x300fffc; \
  1120. if (__builtin_parity(_m) == 1) _m |= 0x1; \
  1121. int _s = count_bits(_m) * 4, _o = 0; \
  1122. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1123. if (_m & (1 << _c)) \
  1124. { if (_c) emith_read_r_r_offs(_c, SP, _o); _o += 4; } \
  1125. if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
  1126. } while (0)
  1127. #define host_arg2reg(rd, arg) \
  1128. rd = (arg+4)
  1129. #define emith_pass_arg_r(arg, reg) \
  1130. emith_move_r_r(arg, reg)
  1131. #define emith_pass_arg_imm(arg, imm) \
  1132. emith_move_r_imm(arg, imm)
  1133. // branching
  1134. #define emith_invert_branch(cond) /* inverted conditional branch */ \
  1135. (((cond) >> 5) == OP__RT ? (cond) ^ 0x01 : (cond) ^ 0x20)
  1136. // evaluate the emulated condition, returns a register/branch type pair
  1137. static int emith_cmpr_check(int rs, int rt, int cond, int *r)
  1138. {
  1139. int b = 0;
  1140. // condition check for comparing 2 registers
  1141. switch (cond) {
  1142. case DCOND_EQ: *r = rs; b = MIPS_BEQ|rt; break;
  1143. case DCOND_NE: *r = rs; b = MIPS_BNE|rt; break;
  1144. case DCOND_LO: EMIT(MIPS_SLTU_REG(AT, rs, rt));
  1145. *r = AT, b = MIPS_BNE; break; // s < t unsigned
  1146. case DCOND_HS: EMIT(MIPS_SLTU_REG(AT, rs, rt));
  1147. *r = AT, b = MIPS_BEQ; break; // s >= t unsigned
  1148. case DCOND_LS: EMIT(MIPS_SLTU_REG(AT, rt, rs));
  1149. *r = AT, b = MIPS_BEQ; break; // s <= t unsigned
  1150. case DCOND_HI: EMIT(MIPS_SLTU_REG(AT, rt, rs));
  1151. *r = AT, b = MIPS_BNE; break; // s > t unsigned
  1152. case DCOND_LT: if (rt == 0) { *r = rs, b = MIPS_BLT; break; } // s < 0
  1153. EMIT(MIPS_SLT_REG(AT, rs, rt));
  1154. *r = AT, b = MIPS_BNE; break; // s < t
  1155. case DCOND_GE: if (rt == 0) { *r = rs, b = MIPS_BGE; break; } // s >= 0
  1156. EMIT(MIPS_SLT_REG(AT, rs, rt));
  1157. *r = AT, b = MIPS_BEQ; break; // s >= t
  1158. case DCOND_LE: if (rt == 0) { *r = rs, b = MIPS_BLE; break; } // s <= 0
  1159. EMIT(MIPS_SLT_REG(AT, rt, rs));
  1160. *r = AT, b = MIPS_BEQ; break; // s <= t
  1161. case DCOND_GT: if (rt == 0) { *r = rs, b = MIPS_BGT; break; } // s > 0
  1162. EMIT(MIPS_SLT_REG(AT, rt, rs));
  1163. *r = AT, b = MIPS_BNE; break; // s > t
  1164. }
  1165. return b;
  1166. }
  1167. static int emith_cmpi_check(int rs, s32 imm, int cond, int *r)
  1168. {
  1169. int b = 0;
  1170. // condition check for comparing register with immediate
  1171. if (imm == 0) return emith_cmpr_check(rs, Z0, cond, r);
  1172. switch (cond) {
  1173. case DCOND_EQ: emith_move_r_imm(AT, imm);
  1174. *r = rs; b = MIPS_BEQ|AT; break;
  1175. case DCOND_NE: emith_move_r_imm(AT, imm);
  1176. *r = rs; b = MIPS_BNE|AT; break;
  1177. case DCOND_LO: EMIT(MIPS_SLTU_IMM(AT, rs, imm));
  1178. *r = AT, b = MIPS_BNE; break; // s < imm unsigned
  1179. case DCOND_HS: EMIT(MIPS_SLTU_IMM(AT, rs, imm));
  1180. *r = AT, b = MIPS_BEQ; break; // s >= imm unsigned
  1181. case DCOND_LS: emith_move_r_imm(AT, imm);
  1182. EMIT(MIPS_SLTU_REG(AT, AT, rs));
  1183. *r = AT, b = MIPS_BEQ; break; // s <= imm unsigned
  1184. case DCOND_HI: emith_move_r_imm(AT, imm);
  1185. EMIT(MIPS_SLTU_REG(AT, AT, rs));
  1186. *r = AT, b = MIPS_BNE; break; // s > imm unsigned
  1187. case DCOND_LT: EMIT(MIPS_SLT_IMM(AT, rs, imm));
  1188. *r = AT, b = MIPS_BNE; break; // s < imm
  1189. case DCOND_GE: EMIT(MIPS_SLT_IMM(AT, rs, imm));
  1190. *r = AT, b = MIPS_BEQ; break; // s >= imm
  1191. case DCOND_LE: emith_move_r_imm(AT, imm);
  1192. EMIT(MIPS_SLT_REG(AT, AT, rs));
  1193. *r = AT, b = MIPS_BEQ; break; // s <= imm
  1194. case DCOND_GT: emith_move_r_imm(AT, imm);
  1195. EMIT(MIPS_SLT_REG(AT, AT, rs));
  1196. *r = AT, b = MIPS_BNE; break; // s > imm
  1197. }
  1198. return b;
  1199. }
  1200. static int emith_cond_check(int cond, int *r)
  1201. {
  1202. int b = 0;
  1203. if (emith_cmp_rs >= 0) {
  1204. if (emith_cmp_rt != -1)
  1205. b = emith_cmpr_check(emith_cmp_rs,emith_cmp_rt, cond,r);
  1206. else b = emith_cmpi_check(emith_cmp_rs,emith_cmp_imm,cond,r);
  1207. }
  1208. // shortcut for V known to be 0
  1209. if (!b && emith_flg_noV) switch (cond) {
  1210. case DCOND_VS: *r = Z0; b = MIPS_BNE; break; // never
  1211. case DCOND_VC: *r = Z0; b = MIPS_BEQ; break; // always
  1212. case DCOND_LT: *r = FNZ, b = MIPS_BLT; break; // N
  1213. case DCOND_GE: *r = FNZ, b = MIPS_BGE; break; // !N
  1214. case DCOND_LE: *r = FNZ, b = MIPS_BLE; break; // N || Z
  1215. case DCOND_GT: *r = FNZ, b = MIPS_BGT; break; // !N && !Z
  1216. }
  1217. // the full monty if no shortcut
  1218. if (!b) switch (cond) {
  1219. // conditions using NZ
  1220. case DCOND_EQ: *r = FNZ; b = MIPS_BEQ; break; // Z
  1221. case DCOND_NE: *r = FNZ; b = MIPS_BNE; break; // !Z
  1222. case DCOND_MI: *r = FNZ; b = MIPS_BLT; break; // N
  1223. case DCOND_PL: *r = FNZ; b = MIPS_BGE; break; // !N
  1224. // conditions using C
  1225. case DCOND_LO: *r = FC; b = MIPS_BNE; break; // C
  1226. case DCOND_HS: *r = FC; b = MIPS_BEQ; break; // !C
  1227. // conditions using CZ
  1228. case DCOND_LS: // C || Z
  1229. case DCOND_HI: // !C && !Z
  1230. EMIT(MIPS_ADD_IMM(AT, FC, -1)); // !C && !Z
  1231. EMIT(MIPS_AND_REG(AT, FNZ, AT));
  1232. *r = AT, b = (cond == DCOND_HI ? MIPS_BNE : MIPS_BEQ);
  1233. break;
  1234. // conditions using V
  1235. case DCOND_VS: // V
  1236. case DCOND_VC: // !V
  1237. EMIT(MIPS_XOR_REG(AT, FV, FNZ)); // V = Nt^Ns^Nd^C
  1238. EMIT(MIPS_LSR_IMM(AT, AT, 31));
  1239. EMIT(MIPS_XOR_REG(AT, AT, FC));
  1240. *r = AT, b = (cond == DCOND_VS ? MIPS_BNE : MIPS_BEQ);
  1241. break;
  1242. // conditions using VNZ
  1243. case DCOND_LT: // N^V
  1244. case DCOND_GE: // !(N^V)
  1245. EMIT(MIPS_LSR_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1246. EMIT(MIPS_XOR_REG(AT, FC, AT));
  1247. *r = AT, b = (cond == DCOND_LT ? MIPS_BNE : MIPS_BEQ);
  1248. break;
  1249. case DCOND_LE: // (N^V) || Z
  1250. case DCOND_GT: // !(N^V) && !Z
  1251. EMIT(MIPS_LSR_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1252. EMIT(MIPS_XOR_REG(AT, FC, AT));
  1253. EMIT(MIPS_ADD_IMM(AT, AT, -1)); // !(Nd^V) && !Z
  1254. EMIT(MIPS_AND_REG(AT, FNZ, AT));
  1255. *r = AT, b = (cond == DCOND_GT ? MIPS_BNE : MIPS_BEQ);
  1256. break;
  1257. }
  1258. return b;
  1259. }
  1260. // NB: assumes all targets are in the same 256MB segment
  1261. #define emith_jump(target) \
  1262. emith_branch(MIPS_J((uintptr_t)target & 0x0fffffff))
  1263. #define emith_jump_patchable(target) \
  1264. emith_jump(target)
  1265. // NB: MIPS conditional branches have only +/- 128KB range
  1266. #define emith_jump_cond(cond, target) do { \
  1267. int r_, mcond_ = emith_cond_check(cond, &r_); \
  1268. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr - 4; \
  1269. emith_branch(MIPS_BCONDZ(mcond_,r_,disp_ & 0x0003ffff)); \
  1270. } while (0)
  1271. #define emith_jump_cond_patchable(cond, target) \
  1272. emith_jump_cond(cond, target)
  1273. #define emith_jump_cond_inrange(target) \
  1274. ((u8 *)target - (u8 *)tcache_ptr - 4 < 0x20000 && \
  1275. (u8 *)target - (u8 *)tcache_ptr - 4 >= -0x20000+0x10) //mind cond_check
  1276. // NB: returns position of patch for cache maintenance
  1277. #define emith_jump_patch(ptr, target, pos) do { \
  1278. u32 *ptr_ = (u32 *)ptr-1; /* must skip condition check code */ \
  1279. u32 disp_, mask_; \
  1280. while (!emith_is_j(*ptr_) && !emith_is_b(*ptr_)) ptr_ ++; \
  1281. if (emith_is_b(*ptr_)) \
  1282. mask_ = 0xffff0000, disp_ = (u8 *)target - (u8 *)ptr_ - 4; \
  1283. else mask_ = 0xfc000000, disp_ = (uintptr_t)target; \
  1284. EMIT_PTR(ptr_, (*ptr_ & mask_) | ((disp_ >> 2) & ~mask_)); \
  1285. if ((void *)(pos) != NULL) *(u8 **)(pos) = (u8 *)(ptr_-1); \
  1286. } while (0)
  1287. #define emith_jump_patch_inrange(ptr, target) \
  1288. ((u8 *)target - (u8 *)ptr - 4 < 0x20000 && \
  1289. (u8 *)target - (u8 *)ptr - 4 >= -0x20000+0x10) // mind cond_check
  1290. #define emith_jump_patch_size() 4
  1291. #define emith_jump_at(ptr, target) do { \
  1292. u32 *ptr_ = (u32 *)ptr; \
  1293. EMIT_PTR(ptr_, MIPS_J((uintptr_t)target & 0x0fffffff)); \
  1294. EMIT_PTR(ptr_, MIPS_NOP); \
  1295. } while (0)
  1296. #define emith_jump_at_size() 8
  1297. #define emith_jump_reg(r) \
  1298. emith_branch(MIPS_JR(r))
  1299. #define emith_jump_reg_c(cond, r) \
  1300. emith_jump_reg(r)
  1301. #define emith_jump_ctx(offs) do { \
  1302. emith_ctx_read_ptr(AT, offs); \
  1303. emith_jump_reg(AT); \
  1304. } while (0)
  1305. #define emith_jump_ctx_c(cond, offs) \
  1306. emith_jump_ctx(offs)
  1307. #define emith_call(target) \
  1308. emith_branch(MIPS_JAL((uintptr_t)target & 0x0fffffff))
  1309. #define emith_call_cond(cond, target) \
  1310. emith_call(target)
  1311. #define emith_call_reg(r) \
  1312. emith_branch(MIPS_JALR(LR, r))
  1313. #define emith_call_ctx(offs) do { \
  1314. emith_ctx_read_ptr(AT, offs); \
  1315. emith_call_reg(AT); \
  1316. } while (0)
  1317. #define emith_call_cleanup() /**/
  1318. #define emith_ret() \
  1319. emith_branch(MIPS_JR(LR))
  1320. #define emith_ret_c(cond) \
  1321. emith_ret()
  1322. #define emith_ret_to_ctx(offs) \
  1323. emith_ctx_write_ptr(LR, offs)
  1324. #define emith_add_r_ret(r) \
  1325. emith_add_r_r_ptr(r, LR)
  1326. // NB: ABI SP alignment is 8 for 64 bit, O32 has a 16 byte arg save area
  1327. #define emith_push_ret(r) do { \
  1328. int offs_ = 8+16 - 2*PTR_SIZE; \
  1329. emith_add_r_r_ptr_imm(SP, SP, -8-16); \
  1330. emith_write_r_r_offs_ptr(LR, SP, offs_ + PTR_SIZE); \
  1331. if ((r) > 0) emith_write_r_r_offs(r, SP, offs_); \
  1332. } while (0)
  1333. #define emith_pop_and_ret(r) do { \
  1334. int offs_ = 8+16 - 2*PTR_SIZE; \
  1335. if ((r) > 0) emith_read_r_r_offs(r, SP, offs_); \
  1336. emith_read_r_r_offs_ptr(LR, SP, offs_ + PTR_SIZE); \
  1337. emith_add_r_r_ptr_imm(SP, SP, 8+16); \
  1338. emith_ret(); \
  1339. } while (0)
  1340. // emitter ABI stuff
  1341. #define emith_pool_check() /**/
  1342. #define emith_pool_commit(j) /**/
  1343. #define emith_update_cache() /**/
  1344. #define emith_rw_offs_max() 0x7fff
  1345. #define emith_uext_ptr(r) /**/
  1346. #if __mips_isa_rev >= 2 && defined(MIPS_USE_SYNCI) && defined(__GNUC__)
  1347. // this should normally be in libc clear_cache; however, it sometimes isn't.
  1348. // core function taken from SYNCI description, MIPS32 instruction set manual
  1349. static NOINLINE void host_instructions_updated(void *base, void *end, int force)
  1350. {
  1351. int step, tmp;
  1352. asm volatile(
  1353. " bal 0f;" // needed to allow for jr.hb
  1354. " b 3f;"
  1355. "0: rdhwr %2, $1;"
  1356. " beqz %2, 2f;"
  1357. "1: synci 0(%0);"
  1358. " sltu %3, %0, %1;"
  1359. " addu %0, %0, %2;"
  1360. " bnez %3, 1b;"
  1361. " sync;"
  1362. "2: jr.hb $ra;"
  1363. "3: " : "+r"(base), "+r"(end), "=r"(step), "=r"(tmp) :: "$31");
  1364. }
  1365. #else
  1366. #define host_instructions_updated(base, end, force) __builtin___clear_cache(base, end)
  1367. #endif
  1368. // SH2 drc specific
  1369. #define emith_sh2_drc_entry() do { \
  1370. int _c, _z = PTR_SIZE; u32 _m = 0xd0ff0000; \
  1371. if (__builtin_parity(_m) == 1) _m |= 0x1; /* ABI align for SP is 8 */ \
  1372. int _s = count_bits(_m) * _z + 16, _o = _s; /* 16 O32 arg save area */ \
  1373. if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
  1374. for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  1375. if (_m & (1 << _c)) \
  1376. { _o -= _z; if (_c) emith_write_r_r_offs_ptr(_c, SP, _o); } \
  1377. } while (0)
  1378. #define emith_sh2_drc_exit() do { \
  1379. int _c, _z = PTR_SIZE; u32 _m = 0xd0ff0000; \
  1380. if (__builtin_parity(_m) == 1) _m |= 0x1; \
  1381. int _s = count_bits(_m) * _z + 16, _o = 16; \
  1382. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1383. if (_m & (1 << _c)) \
  1384. { if (_c) emith_read_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
  1385. if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
  1386. emith_ret(); \
  1387. } while (0)
  1388. // NB: assumes a is in arg0, tab, func and mask are temp
  1389. #define emith_sh2_rcall(a, tab, func, mask) do { \
  1390. emith_lsr(mask, a, SH2_READ_SHIFT); \
  1391. emith_add_r_r_r_lsl_ptr(tab, tab, mask, PTR_SCALE+1); \
  1392. emith_read_r_r_offs_ptr(func, tab, 0); \
  1393. emith_read_r_r_offs(mask, tab, (1 << PTR_SCALE)); \
  1394. emith_addf_r_r_r_ptr(func, func, func); \
  1395. } while (0)
  1396. // NB: assumes a, val are in arg0 and arg1, tab and func are temp
  1397. #define emith_sh2_wcall(a, val, tab, func) do { \
  1398. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  1399. emith_lsl(func, func, PTR_SCALE); \
  1400. emith_read_r_r_r_ptr(func, tab, func); \
  1401. emith_move_r_r_ptr(6, CONTEXT_REG); /* arg2 */ \
  1402. emith_jump_reg(func); \
  1403. } while (0)
  1404. #define emith_sh2_delay_loop(cycles, reg) do { \
  1405. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  1406. int t1 = rcache_get_tmp(); \
  1407. int t2 = rcache_get_tmp(); \
  1408. int t3 = rcache_get_tmp(); \
  1409. /* if (sr < 0) return */ \
  1410. emith_cmp_r_imm(sr, 0); \
  1411. EMITH_JMP_START(DCOND_LE); \
  1412. /* turns = sr.cycles / cycles */ \
  1413. emith_asr(t2, sr, 12); \
  1414. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  1415. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  1416. rcache_free_tmp(t3); \
  1417. if (reg >= 0) { \
  1418. /* if (reg <= turns) turns = reg-1 */ \
  1419. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  1420. emith_cmp_r_r(t3, t2); \
  1421. EMITH_SJMP_START(DCOND_HI); \
  1422. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  1423. EMITH_SJMP_END(DCOND_HI); \
  1424. /* if (reg <= 1) turns = 0 */ \
  1425. emith_cmp_r_imm(t3, 1); \
  1426. EMITH_SJMP_START(DCOND_HI); \
  1427. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  1428. EMITH_SJMP_END(DCOND_HI); \
  1429. /* reg -= turns */ \
  1430. emith_sub_r_r(t3, t2); \
  1431. } \
  1432. /* sr.cycles -= turns * cycles; */ \
  1433. emith_move_r_imm(t1, cycles); \
  1434. emith_mul(t1, t2, t1); \
  1435. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  1436. EMITH_JMP_END(DCOND_LE); \
  1437. rcache_free_tmp(t1); \
  1438. rcache_free_tmp(t2); \
  1439. } while (0)
  1440. /*
  1441. * T = !carry(Rn = (Rn << 1) | T)
  1442. * if Q
  1443. * C = carry(Rn += Rm)
  1444. * else
  1445. * C = carry(Rn -= Rm)
  1446. * T ^= C
  1447. */
  1448. #define emith_sh2_div1_step(rn, rm, sr) do { \
  1449. int t_ = rcache_get_tmp(); \
  1450. emith_and_r_r_imm(AT, sr, T); \
  1451. emith_lsr(FC, rn, 31); /*Rn = (Rn<<1)+T*/ \
  1452. emith_lsl(t_, rn, 1); \
  1453. emith_or_r_r(t_, AT); \
  1454. emith_or_r_imm(sr, T); /* T = !carry */ \
  1455. emith_eor_r_r(sr, FC); \
  1456. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  1457. EMITH_JMP3_START(DCOND_EQ); \
  1458. emith_add_r_r_r(rn, t_, rm); \
  1459. EMIT(MIPS_SLTU_REG(FC, rn, t_)); \
  1460. EMITH_JMP3_MID(DCOND_EQ); \
  1461. emith_sub_r_r_r(rn, t_, rm); \
  1462. EMIT(MIPS_SLTU_REG(FC, t_, rn)); \
  1463. EMITH_JMP3_END(); \
  1464. emith_eor_r_r(sr, FC); /* T ^= carry */ \
  1465. rcache_free_tmp(t_); \
  1466. } while (0)
  1467. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1468. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1469. emith_tst_r_imm(sr, S); \
  1470. EMITH_SJMP_START(DCOND_EQ); \
  1471. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1472. emith_sext(mh, mh, 16); \
  1473. EMITH_SJMP_END(DCOND_EQ); \
  1474. emith_mula_s64(ml, mh, rn, rm); \
  1475. emith_tst_r_imm(sr, S); \
  1476. EMITH_SJMP_START(DCOND_EQ); \
  1477. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1478. /* to check: add MACH >> 31 to MACH >> 15. this is 0 if no overflow */ \
  1479. emith_asr(rn, mh, 15); \
  1480. emith_add_r_r_r_lsr(rn, rn, mh, 31); /* sum = (MACH>>31)+(MACH>>15) */ \
  1481. emith_teq_r_r(rn, Z0); /* (need only N and Z flags) */ \
  1482. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1483. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1484. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1485. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> +ovl */ \
  1486. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0xffffffff */ \
  1487. emith_sub_r_imm_c(DCOND_MI, mh, 1); /* 0x00007fff */ \
  1488. EMITH_SJMP_END(DCOND_PL); \
  1489. EMITH_SJMP_END(DCOND_EQ); \
  1490. EMITH_SJMP_END(DCOND_EQ); \
  1491. } while (0)
  1492. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1493. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1494. emith_tst_r_imm(sr, S); \
  1495. EMITH_SJMP_START(DCOND_EQ); \
  1496. /* XXX: MACH should be untouched when S is set? */ \
  1497. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1498. EMITH_SJMP_END(DCOND_EQ); \
  1499. emith_mula_s64(ml, mh, rn, rm); \
  1500. emith_tst_r_imm(sr, S); \
  1501. EMITH_SJMP_START(DCOND_EQ); \
  1502. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1503. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1504. emith_lsr(rn, ml, 31); \
  1505. emith_add_r_r(rn, mh); /* sum = MACH + ((MACL>>31)&1) */ \
  1506. emith_teq_r_r(rn, Z0); /* (need only N and Z flags) */ \
  1507. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1508. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1509. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1510. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1511. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> positive ovrfl */ \
  1512. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0x7fffffff */ \
  1513. EMITH_SJMP_END(DCOND_PL); \
  1514. EMITH_SJMP_END(DCOND_EQ); \
  1515. EMITH_SJMP_END(DCOND_EQ); \
  1516. } while (0)
  1517. #define emith_write_sr(sr, srcr) do { \
  1518. if (__mips_isa_rev < 2) { \
  1519. emith_lsr(sr, sr , 10); emith_lsl(sr, sr, 10); \
  1520. emith_lsl(AT, srcr, 22); emith_lsr(AT, AT, 22); \
  1521. emith_or_r_r(sr, AT); \
  1522. } else EMIT(MIPS_INS_IMM(sr, srcr, 0, 10)); \
  1523. } while (0)
  1524. #define emith_carry_to_t(sr, is_sub) do { \
  1525. if (__mips_isa_rev < 2) { \
  1526. emith_and_r_imm(sr, 0xfffffffe); \
  1527. emith_or_r_r(sr, FC); \
  1528. } else EMIT(MIPS_INS_IMM(sr, FC, 0, 1)); \
  1529. } while (0)
  1530. #define emith_t_to_carry(sr, is_sub) do { \
  1531. emith_and_r_r_imm(FC, sr, 1); \
  1532. } while (0)
  1533. #define emith_tpop_carry(sr, is_sub) do { \
  1534. emith_and_r_r_imm(FC, sr, 1); \
  1535. emith_eor_r_r(sr, FC); \
  1536. } while (0)
  1537. #define emith_tpush_carry(sr, is_sub) \
  1538. emith_or_r_r(sr, FC)
  1539. #ifdef T
  1540. // T bit handling
  1541. #define emith_invert_cond(cond) \
  1542. ((cond) ^ 1)
  1543. static void emith_clr_t_cond(int sr)
  1544. {
  1545. emith_bic_r_imm(sr, T);
  1546. }
  1547. static void emith_set_t_cond(int sr, int cond)
  1548. {
  1549. int b, r;
  1550. u8 *ptr;
  1551. u32 val = 0, inv = 0;
  1552. // try to avoid jumping around if possible
  1553. if (emith_cmp_rs >= 0) {
  1554. if (emith_cmp_rt >= 0)
  1555. b = emith_cmpr_check(emith_cmp_rs, emith_cmp_rt, cond, &r);
  1556. else
  1557. b = emith_cmpi_check(emith_cmp_rs, emith_cmp_imm, cond, &r);
  1558. // XXX this relies on the inner workings of cmp_check...
  1559. if (r == AT)
  1560. // result of slt check which returns either 0 or 1 in AT
  1561. val++, inv = (b == MIPS_BEQ);
  1562. } else {
  1563. b = emith_cond_check(cond, &r);
  1564. if (r == Z0) {
  1565. if (b == MIPS_BEQ || b == MIPS_BLE || b == MIPS_BGE)
  1566. emith_or_r_imm(sr, T);
  1567. return;
  1568. } else if (r == FC)
  1569. val++, inv = (b == MIPS_BEQ);
  1570. }
  1571. if (!val) switch (b) { // cases: b..z r, aka cmp r,Z0 or cmp r,#0
  1572. case MIPS_BEQ: EMIT(MIPS_SLTU_IMM(AT, r, 1)); r=AT; val++; break;
  1573. case MIPS_BNE: EMIT(MIPS_SLTU_REG(AT,Z0, r)); r=AT; val++; break;
  1574. case MIPS_BLT: EMIT(MIPS_SLT_REG(AT, r, Z0)); r=AT; val++; break;
  1575. case MIPS_BGE: EMIT(MIPS_SLT_REG(AT, r, Z0)); r=AT; val++; inv++; break;
  1576. case MIPS_BLE: EMIT(MIPS_SLT_REG(AT, Z0, r)); r=AT; val++; inv++; break;
  1577. case MIPS_BGT: EMIT(MIPS_SLT_REG(AT, Z0, r)); r=AT; val++; break;
  1578. default: // cases: beq/bne r,s, aka cmp r,s
  1579. if ((b>>5) == OP_BEQ) {
  1580. EMIT(MIPS_XOR_REG(AT, r, b&0x1f));
  1581. EMIT(MIPS_SLTU_IMM(AT,AT, 1)); r=AT; val++; break;
  1582. } else if ((b>>5) == OP_BNE) {
  1583. EMIT(MIPS_XOR_REG(AT, r, b&0x1f));
  1584. EMIT(MIPS_SLTU_REG(AT,Z0,AT)); r=AT; val++; break;
  1585. }
  1586. }
  1587. if (val) {
  1588. emith_or_r_r(sr, r);
  1589. if (inv)
  1590. emith_eor_r_imm(sr, T);
  1591. return;
  1592. }
  1593. // can't obtain result directly, use presumably slower jump !cond + or sr,T
  1594. b = emith_invert_branch(b);
  1595. ptr = emith_branch(MIPS_BCONDZ(b, r, 0));
  1596. emith_or_r_imm(sr, T);
  1597. emith_flush(); // prohibit delay slot switching across jump targets
  1598. val = (u8 *)tcache_ptr - (u8 *)(ptr) - 4;
  1599. EMIT_PTR(ptr, MIPS_BCONDZ(b, r, val & 0x0003ffff));
  1600. }
  1601. #define emith_get_t_cond() -1
  1602. #define emith_sync_t(sr) ((void)sr)
  1603. #define emith_invalidate_t()
  1604. static void emith_set_t(int sr, int val)
  1605. {
  1606. if (val)
  1607. emith_or_r_imm(sr, T);
  1608. else
  1609. emith_bic_r_imm(sr, T);
  1610. }
  1611. static int emith_tst_t(int sr, int tf)
  1612. {
  1613. emith_tst_r_imm(sr, T);
  1614. return tf ? DCOND_NE: DCOND_EQ;
  1615. }
  1616. #endif