emit_mips.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883
  1. /*
  2. * Basic macros to emit MIPS32/MIPS64 Release 1 or 2 instructions and some utils
  3. * Copyright (C) 2019 kub
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. #define HOST_REGS 32
  9. // MIPS32 ABI: params: r4-r7, return: r2-r3, temp: r1(at),r8-r15,r24-r25,r31(ra)
  10. // saved: r16-r23,r30, reserved: r0(zero), r26-r27(irq), r28(gp), r29(sp)
  11. // r1,r15,r24,r25(at,t7-t9) are used internally by the code emitter
  12. // MIPSN32/MIPS64 ABI: params: r4-r11, no caller-reserved save area on stack
  13. // for PIC code, on function calls r25(t9) must contain the called address
  14. #define RET_REG 2 // v0
  15. #define PARAM_REGS { 4, 5, 6, 7 } // a0-a3
  16. #define PRESERVED_REGS { 16, 17, 18, 19, 20, 21, 22, 23 } // s0-s7
  17. #define TEMPORARY_REGS { 2, 3, 8, 9, 10, 11, 12, 13, 14 } // v0-v1,t0-t6
  18. #define CONTEXT_REG 23 // s7
  19. #define STATIC_SH2_REGS { SHR_SR,22 , SHR_R(0),21 , SHR_R(1),20 }
  20. // NB: the ubiquitous JZ74[46]0 uses MIPS32 Release 1, a slight MIPS II superset
  21. #ifndef __mips_isa_rev
  22. #define __mips_isa_rev 1 // surprisingly not always defined
  23. #endif
  24. // registers usable for user code: r1-r25, others reserved or special
  25. #define Z0 0 // zero register
  26. #define CR 25 // call register
  27. #define GP 28 // global pointer
  28. #define SP 29 // stack pointer
  29. #define FP 30 // frame pointer
  30. #define LR 31 // link register
  31. // internally used by code emitter:
  32. #define AT 1 // used to hold intermediate results
  33. #define FNZ 15 // emulated processor flags: N (bit 31) ,Z (all bits)
  34. #define FC 24 // emulated processor flags: C (bit 0), others 0
  35. #define FV 25 // emulated processor flags: Nt^Ns (bit 31). others x
  36. // All operations but ptr ops are using the lower 32 bits of the registers.
  37. // The upper 32 bits always contain the sign extension from the lower 32 bits.
  38. // unified conditions; virtual, not corresponding to anything real on MIPS
  39. #define DCOND_EQ 0x0
  40. #define DCOND_NE 0x1
  41. #define DCOND_HS 0x2
  42. #define DCOND_LO 0x3
  43. #define DCOND_MI 0x4
  44. #define DCOND_PL 0x5
  45. #define DCOND_VS 0x6
  46. #define DCOND_VC 0x7
  47. #define DCOND_HI 0x8
  48. #define DCOND_LS 0x9
  49. #define DCOND_GE 0xa
  50. #define DCOND_LT 0xb
  51. #define DCOND_GT 0xc
  52. #define DCOND_LE 0xd
  53. #define DCOND_CS DCOND_LO
  54. #define DCOND_CC DCOND_HS
  55. // unified insn
  56. #define MIPS_INSN(op, rs, rt, rd, sa, fn) \
  57. (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|((fn)<<0))
  58. #define _ 0 // marker for "field unused"
  59. #define __(n) o##n // enum marker for "undefined"
  60. // opcode field (encoded in op)
  61. enum { OP__FN=000, OP__RT, OP_J, OP_JAL, OP_BEQ, OP_BNE, OP_BLEZ, OP_BGTZ };
  62. enum { OP_ADDI=010, OP_ADDIU, OP_SLTI, OP_SLTIU, OP_ANDI, OP_ORI, OP_XORI, OP_LUI };
  63. enum { OP_DADDI=030, OP_DADDIU, OP_LDL, OP_LDR, OP__FN2=034, OP__FN3=037 };
  64. enum { OP_LB=040, OP_LH, OP_LWL, OP_LW, OP_LBU, OP_LHU, OP_LWR, OP_LWU };
  65. enum { OP_SB=050, OP_SH, OP_SWL, OP_SW, OP_SDL, OP_SDR, OP_SWR };
  66. enum { OP_SD=067, OP_LD=077 };
  67. // function field (encoded in fn if opcode = OP__FN)
  68. enum { FN_SLL=000, __(01), FN_SRL, FN_SRA, FN_SLLV, __(05), FN_SRLV, FN_SRAV };
  69. enum { FN_JR=010, FN_JALR, FN_MOVZ, FN_MOVN, FN_SYNC=017 };
  70. enum { FN_MFHI=020, FN_MTHI, FN_MFLO, FN_MTLO, FN_DSSLV, __(25), FN_DSLRV, FN_DSRAV };
  71. enum { FN_MULT=030, FN_MULTU, FN_DIV, FN_DIVU, FN_DMULT, FN_DMULTU, FN_DDIV, FN_DDIVU };
  72. enum { FN_ADD=040, FN_ADDU, FN_SUB, FN_SUBU, FN_AND, FN_OR, FN_XOR, FN_NOR };
  73. enum { FN_SLT=052, FN_SLTU, FN_DADD, FN_DADDU, FN_DSUB, FN_DSUBU };
  74. enum { FN_DSLL=070, __(71), FN_DSRL, FN_DSRA, FN_DSLL32, __(75), FN_DSRL32, FN_DSRA32 };
  75. // function field (encoded in fn if opcode = OP__FN2)
  76. enum { FN2_MADD=000, FN2_MADDU, FN2_MUL, __(03), FN2_MSUB, FN2_MSUBU };
  77. enum { FN2_CLZ=040, FN2_CLO, FN2_DCLZ=044, FN2_DCLO };
  78. // function field (encoded in fn if opcode = OP__FN3)
  79. enum { FN3_EXT=000, FN3_DEXTM, FN3_DEXTU, FN3_DEXT, FN3_INS, FN3_DINSM, FN3_DINSU, FN3_DINS };
  80. enum { FN3_BSHFL=040, FN3_DBSHFL=044 };
  81. // rt field (encoded in rt if opcode = OP__RT)
  82. enum { RT_BLTZ=000, RT_BGEZ, RT_BLTZAL=020, RT_BGEZAL, RT_SYNCI=037 };
  83. // bit shuffle function (encoded in sa if function = FN3_BSHFL)
  84. enum { BS_SBH=002, BS_SHD=005, BS_SEB=020, BS_SEH=030 };
  85. // r (rotate) bit function (encoded in rs/sa if function = FN_SRL/FN_SRLV)
  86. enum { RB_SRL=0, RB_ROTR=1 };
  87. #define MIPS_NOP 000 // null operation: SLL r0, r0, #0
  88. // arithmetic/logical
  89. #define MIPS_OP_REG(op, sa, rd, rs, rt) \
  90. MIPS_INSN(OP__FN, rs, rt, rd, sa, op) // R-type, SPECIAL
  91. #define MIPS_OP2_REG(op, sa, rd, rs, rt) \
  92. MIPS_INSN(OP__FN2, rs, rt, rd, sa, op) // R-type, SPECIAL2
  93. #define MIPS_OP3_REG(op, sa, rd, rs, rt) \
  94. MIPS_INSN(OP__FN3, rs, rt, rd, sa, op) // R-type, SPECIAL3
  95. #define MIPS_OP_IMM(op, rt, rs, imm) \
  96. MIPS_INSN(op, rs, rt, _, _, (u16)(imm)) // I-type
  97. // rd = rs OP rt
  98. #define MIPS_ADD_REG(rd, rs, rt) \
  99. MIPS_OP_REG(FN_ADDU,_, rd, rs, rt)
  100. #define MIPS_DADD_REG(rd, rs, rt) \
  101. MIPS_OP_REG(FN_DADDU,_, rd, rs, rt)
  102. #define MIPS_SUB_REG(rd, rs, rt) \
  103. MIPS_OP_REG(FN_SUBU,_, rd, rs, rt)
  104. #define MIPS_DSUB_REG(rd, rs, rt) \
  105. MIPS_OP_REG(FN_DSUBU,_, rd, rs, rt)
  106. #define MIPS_NEG_REG(rd, rt) \
  107. MIPS_SUB_REG(rd, Z0, rt)
  108. #define MIPS_XOR_REG(rd, rs, rt) \
  109. MIPS_OP_REG(FN_XOR,_, rd, rs, rt)
  110. #define MIPS_OR_REG(rd, rs, rt) \
  111. MIPS_OP_REG(FN_OR,_, rd, rs, rt)
  112. #define MIPS_AND_REG(rd, rs, rt) \
  113. MIPS_OP_REG(FN_AND,_, rd, rs, rt)
  114. #define MIPS_NOR_REG(rd, rs, rt) \
  115. MIPS_OP_REG(FN_NOR,_, rd, rs, rt)
  116. #define MIPS_MOVE_REG(rd, rs) \
  117. MIPS_OR_REG(rd, rs, Z0)
  118. #define MIPS_MVN_REG(rd, rs) \
  119. MIPS_NOR_REG(rd, rs, Z0)
  120. // rd = rt SHIFT rs
  121. #define MIPS_LSL_REG(rd, rt, rs) \
  122. MIPS_OP_REG(FN_SLLV,_, rd, rs, rt)
  123. #define MIPS_LSR_REG(rd, rt, rs) \
  124. MIPS_OP_REG(FN_SRLV,RB_SRL, rd, rs, rt)
  125. #define MIPS_ASR_REG(rd, rt, rs) \
  126. MIPS_OP_REG(FN_SRAV,_, rd, rs, rt)
  127. #define MIPS_ROR_REG(rd, rt, rs) \
  128. MIPS_OP_REG(FN_SRLV,RB_ROTR, rd, rs, rt)
  129. #define MIPS_SEB_REG(rd, rt) \
  130. MIPS_OP3_REG(FN3_BSHFL, BS_SEB, rd, _, rt)
  131. #define MIPS_SEH_REG(rd, rt) \
  132. MIPS_OP3_REG(FN3_BSHFL, BS_SEH, rd, _, rt)
  133. #define MIPS_EXT_IMM(rt, rs, lsb, sz) \
  134. MIPS_OP3_REG(FN3_EXT, lsb, (sz)-1, rs, rt)
  135. #define MIPS_INS_IMM(rt, rs, lsb, sz) \
  136. MIPS_OP3_REG(FN3_INS, lsb, (lsb)+(sz)-1, rs, rt)
  137. // rd = (rs < rt)
  138. #define MIPS_SLT_REG(rd, rs, rt) \
  139. MIPS_OP_REG(FN_SLT,_, rd, rs, rt)
  140. #define MIPS_SLTU_REG(rd, rs, rt) \
  141. MIPS_OP_REG(FN_SLTU,_, rd, rs, rt)
  142. // rt = rs OP imm16
  143. #define MIPS_ADD_IMM(rt, rs, imm16) \
  144. MIPS_OP_IMM(OP_ADDIU, rt, rs, imm16)
  145. #define MIPS_DADD_IMM(rt, rs, imm16) \
  146. MIPS_OP_IMM(OP_DADDIU, rt, rs, imm16)
  147. #define MIPS_XOR_IMM(rt, rs, imm16) \
  148. MIPS_OP_IMM(OP_XORI, rt, rs, imm16)
  149. #define MIPS_OR_IMM(rt, rs, imm16) \
  150. MIPS_OP_IMM(OP_ORI, rt, rs, imm16)
  151. #define MIPS_AND_IMM(rt, rs, imm16) \
  152. MIPS_OP_IMM(OP_ANDI, rt, rs, imm16)
  153. // rt = (imm16 << (0|16))
  154. #define MIPS_MOV_IMM(rt, imm16) \
  155. MIPS_OP_IMM(OP_ORI, rt, Z0, imm16)
  156. #define MIPS_MOVT_IMM(rt, imm16) \
  157. MIPS_OP_IMM(OP_LUI, rt, _, imm16)
  158. // rd = rt SHIFT imm5
  159. #define MIPS_LSL_IMM(rd, rt, bits) \
  160. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_SLL)
  161. #define MIPS_LSR_IMM(rd, rt, bits) \
  162. MIPS_INSN(OP__FN, RB_SRL, rt, rd, bits, FN_SRL)
  163. #define MIPS_ASR_IMM(rd, rt, bits) \
  164. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_SRA)
  165. #define MIPS_ROR_IMM(rd, rt, bits) \
  166. MIPS_INSN(OP__FN, RB_ROTR, rt, rd, bits, FN_SRL)
  167. #define MIPS_DLSL_IMM(rd, rt, bits) \
  168. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_DSLL)
  169. #define MIPS_DLSL32_IMM(rd, rt, bits) \
  170. MIPS_INSN(OP__FN, _, rt, rd, bits, FN_DSLL32)
  171. // rt = (rs < imm16)
  172. #define MIPS_SLT_IMM(rt, rs, imm16) \
  173. MIPS_OP_IMM(OP_SLTI, rt, rs, imm16)
  174. #define MIPS_SLTU_IMM(rt, rs, imm16) \
  175. MIPS_OP_IMM(OP_SLTIU, rt, rs, imm16)
  176. // multiplication
  177. #define MIPS_MULT(rt, rs) \
  178. MIPS_OP_REG(FN_MULT,_, _, rs, rt)
  179. #define MIPS_MULTU(rt, rs) \
  180. MIPS_OP_REG(FN_MULTU,_, _, rs, rt)
  181. #define MIPS_MADD(rt, rs) \
  182. MIPS_OP2_REG(FN_MADD,_, _, rs, rt)
  183. #define MIPS_MADDU(rt, rs) \
  184. MIPS_OP2_REG(FN_MADDU,_, _, rs, rt)
  185. #define MIPS_MFLO(rd) \
  186. MIPS_OP_REG(FN_MFLO,_, rd, _, _)
  187. #define MIPS_MFHI(rd) \
  188. MIPS_OP_REG(FN_MFHI,_, rd, _, _)
  189. // branching
  190. #define MIPS_J(abs26) \
  191. MIPS_INSN(OP_J, _,_,_,_, (abs26) >> 2) // J-type
  192. #define MIPS_JAL(abs26) \
  193. MIPS_INSN(OP_JAL, _,_,_,_, (abs26) >> 2)
  194. #define MIPS_JR(rs) \
  195. MIPS_OP_REG(FN_JR,_, _,rs,_)
  196. #define MIPS_JALR(rd, rs) \
  197. MIPS_OP_REG(FN_JALR,_, rd,rs,_)
  198. // conditional branches; no condition code, these compare rs against rt or Z0
  199. #define MIPS_BEQ (OP_BEQ << 5) // rs == rt (rt in lower 5 bits)
  200. #define MIPS_BNE (OP_BNE << 5) // rs != rt (ditto)
  201. #define MIPS_BLE (OP_BLEZ << 5) // rs <= 0
  202. #define MIPS_BGT (OP_BGTZ << 5) // rs > 0
  203. #define MIPS_BLT ((OP__RT << 5)|RT_BLTZ) // rs < 0
  204. #define MIPS_BGE ((OP__RT << 5)|RT_BGEZ) // rs >= 0
  205. #define MIPS_BGTL ((OP__RT << 5)|RT_BLTZAL) // rs > 0, link $ra if jumping
  206. #define MIPS_BGEL ((OP__RT << 5)|RT_BGEZAL) // rs >= 0, link $ra if jumping
  207. #define MIPS_BCOND(cond, rs, rt, offs16) \
  208. MIPS_OP_IMM((cond >> 5), rt, rs, (offs16) >> 2)
  209. #define MIPS_BCONDZ(cond, rs, offs16) \
  210. MIPS_OP_IMM((cond >> 5), (cond & 0x1f), rs, (offs16) >> 2)
  211. #define MIPS_B(offs16) \
  212. MIPS_BCONDZ(MIPS_BEQ, Z0, offs16)
  213. #define MIPS_BL(offs16) \
  214. MIPS_BCONDZ(MIPS_BGEL, Z0, offs16)
  215. // load/store indexed base
  216. #define MIPS_LD(rt, rs, offs16) \
  217. MIPS_OP_IMM(OP_LD, rt, rs, (u16)(offs16))
  218. #define MIPS_LW(rt, rs, offs16) \
  219. MIPS_OP_IMM(OP_LW, rt, rs, (u16)(offs16))
  220. #define MIPS_LH(rt, rs, offs16) \
  221. MIPS_OP_IMM(OP_LH, rt, rs, (u16)(offs16))
  222. #define MIPS_LB(rt, rs, offs16) \
  223. MIPS_OP_IMM(OP_LB, rt, rs, (u16)(offs16))
  224. #define MIPS_LHU(rt, rs, offs16) \
  225. MIPS_OP_IMM(OP_LHU, rt, rs, (u16)(offs16))
  226. #define MIPS_LBU(rt, rs, offs16) \
  227. MIPS_OP_IMM(OP_LBU, rt, rs, (u16)(offs16))
  228. #define MIPS_SD(rt, rs, offs16) \
  229. MIPS_OP_IMM(OP_SD, rt, rs, (u16)(offs16))
  230. #define MIPS_SW(rt, rs, offs16) \
  231. MIPS_OP_IMM(OP_SW, rt, rs, (u16)(offs16))
  232. #define MIPS_SH(rt, rs, offs16) \
  233. MIPS_OP_IMM(OP_SH, rt, rs, (u16)(offs16))
  234. #define MIPS_SB(rt, rs, offs16) \
  235. MIPS_OP_IMM(OP_SB, rt, rs, (u16)(offs16))
  236. // pointer operations
  237. #if _MIPS_SZPTR == 64
  238. #define OP_LP OP_LD
  239. #define OP_SP OP_SD
  240. #define OP_PADDIU OP_DADDIU
  241. #define FN_PADDU FN_DADDU
  242. #define FN_PSUBU FN_DSUBU
  243. #define PTR_SCALE 3
  244. #else
  245. #define OP_LP OP_LW
  246. #define OP_SP OP_SW
  247. #define OP_PADDIU OP_ADDIU
  248. #define FN_PADDU FN_ADDU
  249. #define FN_PSUBU FN_SUBU
  250. #define PTR_SCALE 2
  251. #endif
  252. #define PTR_SIZE (1<<PTR_SCALE)
  253. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  254. #define EMIT_PTR(ptr, x) \
  255. do { \
  256. *(u32 *)(ptr) = x; \
  257. ptr = (void *)((u8 *)(ptr) + sizeof(u32)); \
  258. } while (0)
  259. // FIFO for some instructions, for delay slot handling
  260. #define FSZ 4
  261. static u32 emith_last_insns[FSZ];
  262. static unsigned emith_last_idx, emith_last_cnt;
  263. #define EMIT_PUSHOP() \
  264. do { \
  265. if (emith_last_cnt > 0) { \
  266. u32 *p = (u32 *)tcache_ptr - emith_last_cnt; \
  267. int idx = (emith_last_idx - emith_last_cnt+1) %FSZ; \
  268. EMIT_PTR(p, emith_last_insns[idx]);\
  269. emith_last_cnt --; \
  270. } \
  271. } while (0)
  272. #define EMIT(op) \
  273. do { \
  274. if (emith_last_cnt >= FSZ) EMIT_PUSHOP(); \
  275. tcache_ptr = (void *)((u32 *)tcache_ptr + 1); \
  276. emith_last_idx = (emith_last_idx+1) %FSZ; \
  277. emith_last_insns[emith_last_idx] = op; \
  278. emith_last_cnt ++; \
  279. COUNT_OP; \
  280. } while (0)
  281. #define emith_flush() \
  282. do { \
  283. while (emith_last_cnt) EMIT_PUSHOP(); \
  284. emith_flg_hint = _FHV|_FHC; \
  285. } while (0)
  286. #define emith_insn_ptr() (u8 *)((u32 *)tcache_ptr - emith_last_cnt)
  287. // delay slot stuff
  288. static int emith_is_j(u32 op) // J, JAL
  289. { return ((op>>26) & 076) == OP_J; }
  290. static int emith_is_jr(u32 op) // JR, JALR
  291. { return (op>>26) == OP__FN && (op & 076) == FN_JR; }
  292. static int emith_is_b(u32 op) // B
  293. { return ((op>>26) & 074) == OP_BEQ ||
  294. ((op>>26) == OP__RT && ((op>>16) & 036) == RT_BLTZ); }
  295. // register usage for dependency evaluation XXX better do this as in emit_arm?
  296. static uint64_t emith_has_rs[5] = // OP__FN1-3, OP__RT, others
  297. { 0x005ffcffffda0fd2ULL, 0x0000003300000037ULL, 0x00000000000000ffULL,
  298. 0x800f5f0fUL, 0xf7ffffff0ff07ff0ULL };
  299. static uint64_t emith_has_rt[5] = // OP__FN1-3, OP__RT, others
  300. { 0xdd5ffcffffd00cddULL, 0x0000000000000037ULL, 0x0000001100000000ULL,
  301. 0x00000000UL, 0x80007f440c300030ULL };
  302. static uint64_t emith_has_rd[5] = // OP__FN1-3, OP__RT, others(rt instead of rd)
  303. { 0xdd00fcff00d50edfULL, 0x0000003300000004ULL, 0x08000011000000ffULL,
  304. 0x00000000UL, 0x119100ff0f00ff00ULL };
  305. #define emith_has_(rx,ix,op,sa,m) \
  306. (emith_has_##rx[ix] & (1ULL << (((op)>>(sa)) & (m))))
  307. static int emith_rs(u32 op)
  308. { if ((op>>26) == OP__FN)
  309. return emith_has_(rs,0,op, 0,0x3f) ? (op>>21)&0x1f : 0;
  310. if ((op>>26) == OP__FN2)
  311. return emith_has_(rs,1,op, 0,0x3f) ? (op>>21)&0x1f : 0;
  312. if ((op>>26) == OP__FN3)
  313. return emith_has_(rs,2,op, 0,0x3f) ? (op>>21)&0x1f : 0;
  314. if ((op>>26) == OP__RT)
  315. return emith_has_(rs,3,op,16,0x1f) ? (op>>21)&0x1f : 0;
  316. return emith_has_(rs,4,op,26,0x3f) ? (op>>21)&0x1f : 0;
  317. }
  318. static int emith_rt(u32 op)
  319. { if ((op>>26) == OP__FN)
  320. return emith_has_(rt,0,op, 0,0x3f) ? (op>>16)&0x1f : 0;
  321. if ((op>>26) == OP__FN2)
  322. return emith_has_(rt,1,op, 0,0x3f) ? (op>>16)&0x1f : 0;
  323. if ((op>>26) == OP__FN3)
  324. return emith_has_(rt,2,op, 0,0x3f) ? (op>>16)&0x1f : 0;
  325. if ((op>>26) == OP__RT)
  326. return 0;
  327. return emith_has_(rt,4,op,26,0x3f) ? (op>>16)&0x1f : 0;
  328. }
  329. static int emith_rd(u32 op)
  330. { int ret = emith_has_(rd,4,op,26,0x3f) ? (op>>16)&0x1f :-1;
  331. if ((op>>26) == OP__FN)
  332. ret = emith_has_(rd,0,op, 0,0x3f) ? (op>>11)&0x1f :-1;
  333. if ((op>>26) == OP__FN2)
  334. ret = emith_has_(rd,1,op, 0,0x3f) ? (op>>11)&0x1f :-1;
  335. if ((op>>26) == OP__FN3 && (op&0x3f) == FN3_BSHFL)
  336. ret = emith_has_(rd,2,op, 0,0x3f) ? (op>>11)&0x1f :-1;
  337. if ((op>>26) == OP__FN3 && (op&0x3f) != FN3_BSHFL)
  338. ret = emith_has_(rd,2,op, 0,0x3f) ? (op>>16)&0x1f :-1;
  339. if ((op>>26) == OP__RT)
  340. ret = -1;
  341. return (ret ?: -1); // Z0 doesn't have dependencies
  342. }
  343. static int emith_b_isswap(u32 bop, u32 lop)
  344. {
  345. if (emith_is_j(bop))
  346. return bop;
  347. else if (emith_is_jr(bop) && emith_rd(lop) != emith_rs(bop))
  348. return bop;
  349. else if (emith_is_b(bop) && emith_rd(lop) != emith_rs(bop) &&
  350. emith_rd(lop) != emith_rt(bop))
  351. if ((bop & 0xffff) != 0x7fff) // displacement overflow?
  352. return (bop & 0xffff0000) | ((bop+1) & 0x0000ffff);
  353. return 0;
  354. }
  355. static int emith_insn_swappable(u32 op1, u32 op2)
  356. {
  357. if (emith_rd(op1) != emith_rd(op2) &&
  358. emith_rs(op1) != emith_rd(op2) && emith_rt(op1) != emith_rd(op2) &&
  359. emith_rs(op2) != emith_rd(op1) && emith_rt(op2) != emith_rd(op1))
  360. return 1;
  361. return 0;
  362. }
  363. // emit branch, trying to fill the delay slot with one of the last insns
  364. static void *emith_branch(u32 op)
  365. {
  366. unsigned idx = emith_last_idx, ds = idx;
  367. u32 bop = 0, sop;
  368. void *bp;
  369. int i, j, s;
  370. // check for ds insn; older mustn't interact with newer ones to overtake
  371. for (i = 0; i < emith_last_cnt && !bop; i++) {
  372. ds = (idx-i)%FSZ;
  373. sop = emith_last_insns[ds];
  374. for (j = i, s = 1; j > 0 && s; j--)
  375. s = emith_insn_swappable(emith_last_insns[(ds+j)%FSZ], sop);
  376. if (s)
  377. bop = emith_b_isswap(op, sop);
  378. }
  379. // flush FIFO, but omit delay slot insn
  380. tcache_ptr = (void *)((u32 *)tcache_ptr - emith_last_cnt);
  381. idx = (idx-emith_last_cnt+1)%FSZ;
  382. for (i = emith_last_cnt; i > 0; i--, idx = (idx+1)%FSZ)
  383. if (!bop || idx != ds)
  384. EMIT_PTR(tcache_ptr, emith_last_insns[idx]);
  385. emith_last_cnt = 0;
  386. // emit branch and delay slot
  387. bp = tcache_ptr;
  388. if (bop) { // can swap
  389. EMIT_PTR(tcache_ptr, bop); COUNT_OP;
  390. EMIT_PTR(tcache_ptr, emith_last_insns[ds]);
  391. } else { // can't swap
  392. EMIT_PTR(tcache_ptr, op); COUNT_OP;
  393. EMIT_PTR(tcache_ptr, MIPS_NOP); COUNT_OP;
  394. }
  395. return bp;
  396. }
  397. // if-then-else conditional execution helpers
  398. #define JMP_POS(ptr) \
  399. ptr = emith_branch(MIPS_BCONDZ(cond_m, cond_r, 0));
  400. #define JMP_EMIT(cond, ptr) { \
  401. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr) - 4; \
  402. emith_flush(); /* prohibit delay slot switching across jump targets */ \
  403. EMIT_PTR(ptr, MIPS_BCONDZ(cond_m, cond_r, val_ & 0x0003ffff)); \
  404. }
  405. #define JMP_EMIT_NC(ptr) { \
  406. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr) - 4; \
  407. emith_flush(); \
  408. EMIT_PTR(ptr, MIPS_B(val_ & 0x0003ffff)); \
  409. }
  410. #define EMITH_JMP_START(cond) { \
  411. int cond_r, cond_m = emith_cond_check(cond, &cond_r); \
  412. u8 *cond_ptr; \
  413. JMP_POS(cond_ptr)
  414. #define EMITH_JMP_END(cond) \
  415. JMP_EMIT(cond, cond_ptr); \
  416. }
  417. #define EMITH_JMP3_START(cond) { \
  418. int cond_r, cond_m = emith_cond_check(cond, &cond_r); \
  419. u8 *cond_ptr, *else_ptr; \
  420. JMP_POS(cond_ptr)
  421. #define EMITH_JMP3_MID(cond) \
  422. JMP_POS(else_ptr); \
  423. JMP_EMIT(cond, cond_ptr);
  424. #define EMITH_JMP3_END() \
  425. JMP_EMIT_NC(else_ptr); \
  426. }
  427. // "simple" jump (no more than a few insns)
  428. // ARM32 will use conditional instructions here
  429. #define EMITH_SJMP_START EMITH_JMP_START
  430. #define EMITH_SJMP_END EMITH_JMP_END
  431. #define EMITH_SJMP3_START EMITH_JMP3_START
  432. #define EMITH_SJMP3_MID EMITH_JMP3_MID
  433. #define EMITH_SJMP3_END EMITH_JMP3_END
  434. #define EMITH_SJMP2_START(cond) \
  435. EMITH_SJMP3_START(cond)
  436. #define EMITH_SJMP2_MID(cond) \
  437. EMITH_SJMP3_MID(cond)
  438. #define EMITH_SJMP2_END(cond) \
  439. EMITH_SJMP3_END()
  440. // flag register emulation. this is modelled after arm/x86.
  441. // the FNZ register stores the result of the last flag setting operation for
  442. // N and Z flag, used for EQ,NE,MI,PL branches.
  443. // the FC register stores the C flag (used for HI,HS,LO,LS,CC,CS).
  444. // the FV register stores information for V flag calculation (used for
  445. // GT,GE,LT,LE,VC,VS). V flag is costly and only fully calculated when needed.
  446. // the core registers may be temp registers, since the condition after calls
  447. // is undefined anyway.
  448. // flag emulation creates 2 (ie cmp #0/beq) up to 9 (ie adcf/ble) extra insns.
  449. // flag handling shortcuts may reduce this by 1-4 insns, see emith_cond_check()
  450. static int emith_cmp_rs, emith_cmp_rt; // registers used in cmp_r_r/cmp_r_imm
  451. static s32 emith_cmp_imm; // immediate value used in cmp_r_imm
  452. enum { _FHC=1, _FHV=2 } emith_flg_hint; // C/V flag usage hinted by compiler
  453. static int emith_flg_noV; // V flag known not to be set
  454. #define EMITH_HINT_COND(cond) do { \
  455. /* only need to check cond>>1 since the lowest bit inverts the cond */ \
  456. unsigned _mv = BITMASK3(DCOND_VS>>1,DCOND_GE>>1,DCOND_GT>>1); \
  457. unsigned _mc = _mv | BITMASK2(DCOND_HS>>1,DCOND_HI>>1); \
  458. emith_flg_hint = (_mv & BITMASK1(cond >> 1) ? _FHV : 0); \
  459. emith_flg_hint |= (_mc & BITMASK1(cond >> 1) ? _FHC : 0); \
  460. } while (0)
  461. // store minimal cc information: rd, rt^rs, carry
  462. // NB: the result *must* first go to FNZ, in case rd == rs or rd == rt.
  463. // NB: for adcf and sbcf, carry-in must be dealt with separately (see there)
  464. static void emith_set_arith_flags(int rd, int rs, int rt, s32 imm, int sub)
  465. {
  466. if (emith_flg_hint & _FHC) {
  467. if (sub) // C = sub:rt<rd, add:rd<rt
  468. EMIT(MIPS_SLTU_REG(FC, rs, FNZ));
  469. else EMIT(MIPS_SLTU_REG(FC, FNZ, rs));// C in FC, bit 0
  470. }
  471. if (emith_flg_hint & _FHV) {
  472. emith_flg_noV = 0;
  473. if (rt > Z0) // Nt^Ns in FV, bit 31
  474. EMIT(MIPS_XOR_REG(FV, rs, rt));
  475. else if (rt == Z0 || imm == 0)
  476. emith_flg_noV = 1; // imm #0 can't overflow
  477. else if ((imm < 0) == !sub)
  478. EMIT(MIPS_NOR_REG(FV, rs, Z0));
  479. else if ((imm > 0) == !sub)
  480. EMIT(MIPS_XOR_REG(FV, rs, Z0));
  481. }
  482. // full V = Nd^Nt^Ns^C calculation is deferred until really needed
  483. if (rd && rd != FNZ)
  484. EMIT(MIPS_MOVE_REG(rd, FNZ)); // N,Z via result value in FNZ
  485. emith_cmp_rs = emith_cmp_rt = -1;
  486. }
  487. // since MIPS has less-than and compare-branch insns, handle cmp separately by
  488. // storing the involved regs for later use in one of those MIPS insns.
  489. // This works for all conditions but VC/VS, but this is fortunately never used.
  490. static void emith_set_compare_flags(int rs, int rt, s32 imm)
  491. {
  492. emith_cmp_rt = rt;
  493. emith_cmp_rs = rs;
  494. emith_cmp_imm = imm;
  495. }
  496. // data processing, register
  497. #define emith_move_r_r_ptr(d, s) \
  498. EMIT(MIPS_MOVE_REG(d, s))
  499. #define emith_move_r_r_ptr_c(cond, d, s) \
  500. emith_move_r_r_ptr(d, s)
  501. #define emith_move_r_r(d, s) \
  502. emith_move_r_r_ptr(d, s)
  503. #define emith_move_r_r_c(cond, d, s) \
  504. emith_move_r_r(d, s)
  505. #define emith_mvn_r_r(d, s) \
  506. EMIT(MIPS_MVN_REG(d, s))
  507. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  508. if (simm) { \
  509. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  510. EMIT(MIPS_OP_REG(FN_PADDU,_, d, s1, AT)); \
  511. } else EMIT(MIPS_OP_REG(FN_PADDU,_, d, s1, s2)); \
  512. } while (0)
  513. #define emith_add_r_r_r_lsl(d, s1, s2, simm) do { \
  514. if (simm) { \
  515. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  516. EMIT(MIPS_ADD_REG(d, s1, AT)); \
  517. } else EMIT(MIPS_ADD_REG(d, s1, s2)); \
  518. } while (0)
  519. #define emith_add_r_r_r_lsr(d, s1, s2, simm) do { \
  520. if (simm) { \
  521. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  522. EMIT(MIPS_ADD_REG(d, s1, AT)); \
  523. } else EMIT(MIPS_ADD_REG(d, s1, s2)); \
  524. } while (0)
  525. #define emith_addf_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  526. if (simm) { \
  527. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  528. EMIT(MIPS_OP_REG(FN_PADDU,_, FNZ, s1, AT)); \
  529. emith_set_arith_flags(d, s1, AT, 0, 0); \
  530. } else { \
  531. EMIT(MIPS_OP_REG(FN_PADDU,_, FNZ, s1, s2)); \
  532. emith_set_arith_flags(d, s1, s2, 0, 0); \
  533. } \
  534. } while (0)
  535. #define emith_addf_r_r_r_lsl(d, s1, s2, simm) do { \
  536. if (simm) { \
  537. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  538. EMIT(MIPS_ADD_REG(FNZ, s1, AT)); \
  539. emith_set_arith_flags(d, s1, AT, 0, 0); \
  540. } else { \
  541. EMIT(MIPS_ADD_REG(FNZ, s1, s2)); \
  542. emith_set_arith_flags(d, s1, s2, 0, 0); \
  543. } \
  544. } while (0)
  545. #define emith_addf_r_r_r_lsr(d, s1, s2, simm) do { \
  546. if (simm) { \
  547. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  548. EMIT(MIPS_ADD_REG(FNZ, s1, AT)); \
  549. emith_set_arith_flags(d, s1, AT, 0, 0); \
  550. } else { \
  551. EMIT(MIPS_ADD_REG(FNZ, s1, s2)); \
  552. emith_set_arith_flags(d, s1, s2, 0, 0); \
  553. } \
  554. } while (0)
  555. #define emith_sub_r_r_r_lsl(d, s1, s2, simm) do { \
  556. if (simm) { \
  557. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  558. EMIT(MIPS_SUB_REG(d, s1, AT)); \
  559. } else EMIT(MIPS_SUB_REG(d, s1, s2)); \
  560. } while (0)
  561. #define emith_subf_r_r_r_lsl(d, s1, s2, simm) do { \
  562. if (simm) { \
  563. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  564. EMIT(MIPS_SUB_REG(FNZ, s1, AT)); \
  565. emith_set_arith_flags(d, s1, AT, 0, 1); \
  566. } else { \
  567. EMIT(MIPS_SUB_REG(FNZ, s1, s2)); \
  568. emith_set_arith_flags(d, s1, s2, 0, 1); \
  569. } \
  570. } while (0)
  571. #define emith_or_r_r_r_lsl(d, s1, s2, simm) do { \
  572. if (simm) { \
  573. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  574. EMIT(MIPS_OR_REG(d, s1, AT)); \
  575. } else EMIT(MIPS_OR_REG(d, s1, s2)); \
  576. } while (0)
  577. #define emith_or_r_r_r_lsr(d, s1, s2, simm) do { \
  578. if (simm) { \
  579. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  580. EMIT(MIPS_OR_REG(d, s1, AT)); \
  581. } else EMIT(MIPS_OR_REG(d, s1, s2)); \
  582. } while (0)
  583. #define emith_eor_r_r_r_lsl(d, s1, s2, simm) do { \
  584. if (simm) { \
  585. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  586. EMIT(MIPS_XOR_REG(d, s1, AT)); \
  587. } else EMIT(MIPS_XOR_REG(d, s1, s2)); \
  588. } while (0)
  589. #define emith_eor_r_r_r_lsr(d, s1, s2, simm) do { \
  590. if (simm) { \
  591. EMIT(MIPS_LSR_IMM(AT, s2, simm)); \
  592. EMIT(MIPS_XOR_REG(d, s1, AT)); \
  593. } else EMIT(MIPS_XOR_REG(d, s1, s2)); \
  594. } while (0)
  595. #define emith_and_r_r_r_lsl(d, s1, s2, simm) do { \
  596. if (simm) { \
  597. EMIT(MIPS_LSL_IMM(AT, s2, simm)); \
  598. EMIT(MIPS_AND_REG(d, s1, AT)); \
  599. } else EMIT(MIPS_AND_REG(d, s1, s2)); \
  600. } while (0)
  601. #define emith_or_r_r_lsl(d, s, lslimm) \
  602. emith_or_r_r_r_lsl(d, d, s, lslimm)
  603. #define emith_or_r_r_lsr(d, s, lsrimm) \
  604. emith_or_r_r_r_lsr(d, d, s, lsrimm)
  605. #define emith_eor_r_r_lsl(d, s, lslimm) \
  606. emith_eor_r_r_r_lsl(d, d, s, lslimm)
  607. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  608. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  609. #define emith_add_r_r_r(d, s1, s2) \
  610. emith_add_r_r_r_lsl(d, s1, s2, 0)
  611. #define emith_addf_r_r_r_ptr(d, s1, s2) \
  612. emith_addf_r_r_r_lsl_ptr(d, s1, s2, 0)
  613. #define emith_addf_r_r_r(d, s1, s2) \
  614. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  615. #define emith_sub_r_r_r(d, s1, s2) \
  616. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  617. #define emith_subf_r_r_r(d, s1, s2) \
  618. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  619. #define emith_or_r_r_r(d, s1, s2) \
  620. emith_or_r_r_r_lsl(d, s1, s2, 0)
  621. #define emith_eor_r_r_r(d, s1, s2) \
  622. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  623. #define emith_and_r_r_r(d, s1, s2) \
  624. emith_and_r_r_r_lsl(d, s1, s2, 0)
  625. #define emith_add_r_r_ptr(d, s) \
  626. emith_add_r_r_r_lsl_ptr(d, d, s, 0)
  627. #define emith_add_r_r(d, s) \
  628. emith_add_r_r_r(d, d, s)
  629. #define emith_sub_r_r(d, s) \
  630. emith_sub_r_r_r(d, d, s)
  631. #define emith_neg_r_r(d, s) \
  632. EMIT(MIPS_NEG_REG(d, s))
  633. #define emith_adc_r_r_r(d, s1, s2) do { \
  634. emith_add_r_r_r(AT, s2, FC); \
  635. emith_add_r_r_r(d, s1, AT); \
  636. } while (0)
  637. #define emith_sbc_r_r_r(d, s1, s2) do { \
  638. emith_add_r_r_r(AT, s2, FC); \
  639. emith_sub_r_r_r(d, s1, AT); \
  640. } while (0)
  641. #define emith_adc_r_r(d, s) \
  642. emith_adc_r_r_r(d, d, s)
  643. #define emith_negc_r_r(d, s) \
  644. emith_sbc_r_r_r(d, Z0, s)
  645. // NB: the incoming carry Cin can cause Cout if s2+Cin=0 (or s1+Cin=0 FWIW)
  646. // moreover, if s2+Cin=0 caused Cout, s1+s2+Cin=s1+0 can't cause another Cout
  647. #define emith_adcf_r_r_r(d, s1, s2) do { \
  648. emith_add_r_r_r(FNZ, s2, FC); \
  649. EMIT(MIPS_SLTU_REG(AT, FNZ, FC)); \
  650. emith_add_r_r_r(FNZ, s1, FNZ); \
  651. emith_set_arith_flags(d, s1, s2, 0, 0); \
  652. emith_or_r_r(FC, AT); \
  653. } while (0)
  654. #define emith_sbcf_r_r_r(d, s1, s2) do { \
  655. emith_add_r_r_r(FNZ, s2, FC); \
  656. EMIT(MIPS_SLTU_REG(AT, FNZ, FC)); \
  657. emith_sub_r_r_r(FNZ, s1, FNZ); \
  658. emith_set_arith_flags(d, s1, s2, 0, 1); \
  659. emith_or_r_r(FC, AT); \
  660. } while (0)
  661. #define emith_and_r_r(d, s) \
  662. emith_and_r_r_r(d, d, s)
  663. #define emith_and_r_r_c(cond, d, s) \
  664. emith_and_r_r(d, s)
  665. #define emith_or_r_r(d, s) \
  666. emith_or_r_r_r(d, d, s)
  667. #define emith_eor_r_r(d, s) \
  668. emith_eor_r_r_r(d, d, s)
  669. #define emith_tst_r_r_ptr(d, s) do { \
  670. if (d != s) { \
  671. emith_and_r_r_r(FNZ, d, s); \
  672. emith_cmp_rs = emith_cmp_rt = -1; \
  673. } else emith_cmp_rs = s, emith_cmp_rt = Z0; \
  674. } while (0)
  675. #define emith_tst_r_r(d, s) \
  676. emith_tst_r_r_ptr(d, s)
  677. #define emith_teq_r_r(d, s) do { \
  678. emith_eor_r_r_r(FNZ, d, s); \
  679. emith_cmp_rs = emith_cmp_rt = -1; \
  680. } while (0)
  681. #define emith_cmp_r_r(d, s) \
  682. emith_set_compare_flags(d, s, 0)
  683. // emith_subf_r_r_r(FNZ, d, s)
  684. #define emith_addf_r_r(d, s) \
  685. emith_addf_r_r_r(d, d, s)
  686. #define emith_subf_r_r(d, s) \
  687. emith_subf_r_r_r(d, d, s)
  688. #define emith_adcf_r_r(d, s) \
  689. emith_adcf_r_r_r(d, d, s)
  690. #define emith_sbcf_r_r(d, s) \
  691. emith_sbcf_r_r_r(d, d, s)
  692. #define emith_negcf_r_r(d, s) \
  693. emith_sbcf_r_r_r(d, Z0, s)
  694. // move immediate
  695. static void emith_move_imm(int r, uintptr_t imm)
  696. {
  697. #if _MIPS_SZPTR == 64
  698. if ((s32)imm != imm) {
  699. emith_move_imm(r, imm >> 32);
  700. if (imm & 0xffff0000) {
  701. EMIT(MIPS_DLSL_IMM(r, r, 16));
  702. EMIT(MIPS_OR_IMM(r, r, (imm >> 16) & 0xffff));
  703. EMIT(MIPS_DLSL_IMM(r, r, 16));
  704. } else EMIT(MIPS_DLSL32_IMM(r, r, 0));
  705. if (imm & 0x0000ffff)
  706. EMIT(MIPS_OR_IMM(r, r, imm & 0xffff));
  707. } else
  708. #endif
  709. if ((s16)imm == imm) {
  710. EMIT(MIPS_ADD_IMM(r, Z0, imm));
  711. } else if (!((u32)imm >> 16)) {
  712. EMIT(MIPS_OR_IMM(r, Z0, imm));
  713. } else {
  714. int s = Z0;
  715. if ((u32)imm >> 16) {
  716. EMIT(MIPS_MOVT_IMM(r, (u32)imm >> 16));
  717. s = r;
  718. }
  719. if ((u16)imm)
  720. EMIT(MIPS_OR_IMM(r, s, (u16)imm));
  721. }
  722. }
  723. #define emith_move_r_ptr_imm(r, imm) \
  724. emith_move_imm(r, (uintptr_t)(imm))
  725. #define emith_move_r_imm(r, imm) \
  726. emith_move_imm(r, (u32)(imm))
  727. #define emith_move_r_imm_c(cond, r, imm) \
  728. emith_move_r_imm(r, imm)
  729. #define emith_move_r_imm_s8_patchable(r, imm) \
  730. EMIT(MIPS_ADD_IMM(r, Z0, (s8)(imm)))
  731. #define emith_move_r_imm_s8_patch(ptr, imm) do { \
  732. u32 *ptr_ = (u32 *)ptr; \
  733. while (*ptr_ >> 26 != OP_ADDIU) ptr_++; \
  734. EMIT_PTR(ptr_, (*ptr_ & 0xffff0000) | (u16)(s8)(imm)); \
  735. } while (0)
  736. // arithmetic, immediate - can only be ADDI[U], since SUBI[U] doesn't exist
  737. static void emith_add_imm(int ptr, int rd, int rs, u32 imm)
  738. {
  739. if ((s16)imm == imm) {
  740. if (imm || rd != rs)
  741. EMIT(MIPS_OP_IMM(ptr ? OP_PADDIU:OP_ADDIU, rd,rs,imm));
  742. } else if ((s32)imm < 0) {
  743. emith_move_r_imm(AT, -imm);
  744. EMIT(MIPS_OP_REG((ptr ? FN_PSUBU:FN_SUBU),_, rd,rs,AT));
  745. } else {
  746. emith_move_r_imm(AT, imm);
  747. EMIT(MIPS_OP_REG((ptr ? FN_PADDU:FN_ADDU),_, rd,rs,AT));
  748. }
  749. }
  750. #define emith_add_r_imm(r, imm) \
  751. emith_add_r_r_imm(r, r, imm)
  752. #define emith_add_r_imm_c(cond, r, imm) \
  753. emith_add_r_imm(r, imm)
  754. #define emith_addf_r_imm(r, imm) \
  755. emith_addf_r_r_imm(r, imm)
  756. #define emith_sub_r_imm(r, imm) \
  757. emith_sub_r_r_imm(r, r, imm)
  758. #define emith_sub_r_imm_c(cond, r, imm) \
  759. emith_sub_r_imm(r, imm)
  760. #define emith_subf_r_imm(r, imm) \
  761. emith_subf_r_r_imm(r, r, imm)
  762. #define emith_adc_r_imm(r, imm) \
  763. emith_adc_r_r_imm(r, r, imm)
  764. #define emith_adcf_r_imm(r, imm) \
  765. emith_adcf_r_r_imm(r, r, imm)
  766. #define emith_cmp_r_imm(r, imm) \
  767. emith_set_compare_flags(r, -1, imm)
  768. // emith_subf_r_r_imm(FNZ, r, (s16)imm)
  769. #define emith_add_r_r_ptr_imm(d, s, imm) \
  770. emith_add_imm(1, d, s, imm)
  771. #define emith_add_r_r_imm(d, s, imm) \
  772. emith_add_imm(0, d, s, imm)
  773. #define emith_addf_r_r_imm(d, s, imm) do { \
  774. emith_add_r_r_imm(FNZ, s, imm); \
  775. emith_set_arith_flags(d, s, -1, imm, 0); \
  776. } while (0)
  777. #define emith_adc_r_r_imm(d, s, imm) do { \
  778. emith_add_r_r_r(AT, s, FC); \
  779. emith_add_r_r_imm(d, AT, imm); \
  780. } while (0)
  781. #define emith_adcf_r_r_imm(d, s, imm) do { \
  782. if (imm == 0) { \
  783. emith_add_r_r_r(FNZ, s, FC); \
  784. emith_set_arith_flags(d, s, -1, 1, 0); \
  785. } else { \
  786. emith_add_r_r_r(FNZ, s, FC); \
  787. EMIT(MIPS_SLTU_REG(AT, FNZ, FC)); \
  788. emith_add_r_r_imm(FNZ, FNZ, imm); \
  789. emith_set_arith_flags(d, s, -1, imm, 0); \
  790. emith_or_r_r(FC, AT); \
  791. } \
  792. } while (0)
  793. // NB: no SUBI in MIPS II, since ADDI takes a signed imm
  794. #define emith_sub_r_r_imm(d, s, imm) \
  795. emith_add_r_r_imm(d, s, -(imm))
  796. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  797. emith_sub_r_r_imm(d, s, imm)
  798. #define emith_subf_r_r_imm(d, s, imm) do { \
  799. emith_sub_r_r_imm(FNZ, s, imm); \
  800. emith_set_arith_flags(d, s, -1, imm, 1); \
  801. } while (0)
  802. // logical, immediate
  803. static void emith_log_imm(int op, int rd, int rs, u32 imm)
  804. {
  805. if (imm >> 16) {
  806. emith_move_r_imm(AT, imm);
  807. EMIT(MIPS_OP_REG(FN_AND + (op-OP_ANDI),_, rd, rs, AT));
  808. } else if (op == OP_ANDI || imm || rd != rs)
  809. EMIT(MIPS_OP_IMM(op, rd, rs, imm));
  810. }
  811. #define emith_and_r_imm(r, imm) \
  812. emith_log_imm(OP_ANDI, r, r, imm)
  813. #define emith_or_r_imm(r, imm) \
  814. emith_log_imm(OP_ORI, r, r, imm)
  815. #define emith_or_r_imm_c(cond, r, imm) \
  816. emith_or_r_imm(r, imm)
  817. #define emith_eor_r_imm_ptr(r, imm) \
  818. emith_log_imm(OP_XORI, r, r, imm)
  819. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  820. emith_eor_r_imm_ptr(r, imm)
  821. #define emith_eor_r_imm(r, imm) \
  822. emith_eor_r_imm_ptr(r, imm)
  823. #define emith_eor_r_imm_c(cond, r, imm) \
  824. emith_eor_r_imm(r, imm)
  825. /* NB: BIC #imm not available in MIPS; use AND #~imm instead */
  826. #define emith_bic_r_imm(r, imm) \
  827. emith_log_imm(OP_ANDI, r, r, ~(imm))
  828. #define emith_bic_r_imm_c(cond, r, imm) \
  829. emith_bic_r_imm(r, imm)
  830. #define emith_tst_r_imm(r, imm) do { \
  831. emith_log_imm(OP_ANDI, FNZ, r, imm); \
  832. emith_cmp_rs = emith_cmp_rt = -1; \
  833. } while (0)
  834. #define emith_tst_r_imm_c(cond, r, imm) \
  835. emith_tst_r_imm(r, imm)
  836. #define emith_and_r_r_imm(d, s, imm) \
  837. emith_log_imm(OP_ANDI, d, s, imm)
  838. #define emith_or_r_r_imm(d, s, imm) \
  839. emith_log_imm(OP_ORI, d, s, imm)
  840. #define emith_eor_r_r_imm(d, s, imm) \
  841. emith_log_imm(OP_XORI, d, s, imm)
  842. // shift
  843. #define emith_lsl(d, s, cnt) \
  844. EMIT(MIPS_LSL_IMM(d, s, cnt))
  845. #define emith_lsr(d, s, cnt) \
  846. EMIT(MIPS_LSR_IMM(d, s, cnt))
  847. #define emith_asr(d, s, cnt) \
  848. EMIT(MIPS_ASR_IMM(d, s, cnt))
  849. #define emith_ror(d, s, cnt) do { \
  850. if (__mips_isa_rev < 2) { \
  851. EMIT(MIPS_LSL_IMM(AT, s, 32-(cnt))); \
  852. EMIT(MIPS_LSR_IMM(d, s, cnt)); \
  853. EMIT(MIPS_OR_REG(d, d, AT)); \
  854. } else EMIT(MIPS_ROR_IMM(d, s, cnt)); \
  855. } while (0)
  856. #define emith_ror_c(cond, d, s, cnt) \
  857. emith_ror(d, s, cnt)
  858. #define emith_rol(d, s, cnt) do { \
  859. if (__mips_isa_rev < 2) { \
  860. EMIT(MIPS_LSR_IMM(AT, s, 32-(cnt))); \
  861. EMIT(MIPS_LSL_IMM(d, s, cnt)); \
  862. EMIT(MIPS_OR_REG(d, d, AT)); \
  863. } else EMIT(MIPS_ROR_IMM(d, s, 32-(cnt))); \
  864. } while (0)
  865. #define emith_rorc(d) do { \
  866. emith_lsr(d, d, 1); \
  867. emith_lsl(AT, FC, 31); \
  868. emith_or_r_r(d, AT); \
  869. } while (0)
  870. #define emith_rolc(d) do { \
  871. emith_lsl(d, d, 1); \
  872. emith_or_r_r(d, FC); \
  873. } while (0)
  874. // NB: all flag setting shifts make V undefined
  875. #define emith_lslf(d, s, cnt) do { \
  876. int _s = s; \
  877. if ((cnt) > 1) { \
  878. emith_lsl(d, s, cnt-1); \
  879. _s = d; \
  880. } \
  881. if ((cnt) > 0) { \
  882. emith_lsr(FC, _s, 31); \
  883. emith_lsl(d, _s, 1); \
  884. } \
  885. emith_move_r_r(FNZ, d); \
  886. emith_cmp_rs = emith_cmp_rt = -1; \
  887. } while (0)
  888. #define emith_lsrf(d, s, cnt) do { \
  889. int _s = s; \
  890. if ((cnt) > 1) { \
  891. emith_lsr(d, s, cnt-1); \
  892. _s = d; \
  893. } \
  894. if ((cnt) > 0) { \
  895. emith_and_r_r_imm(FC, _s, 1); \
  896. emith_lsr(d, _s, 1); \
  897. } \
  898. emith_move_r_r(FNZ, d); \
  899. emith_cmp_rs = emith_cmp_rt = -1; \
  900. } while (0)
  901. #define emith_asrf(d, s, cnt) do { \
  902. int _s = s; \
  903. if ((cnt) > 1) { \
  904. emith_asr(d, s, cnt-1); \
  905. _s = d; \
  906. } \
  907. if ((cnt) > 0) { \
  908. emith_and_r_r_imm(FC, _s, 1); \
  909. emith_asr(d, _s, 1); \
  910. } \
  911. emith_move_r_r(FNZ, d); \
  912. emith_cmp_rs = emith_cmp_rt = -1; \
  913. } while (0)
  914. #define emith_rolf(d, s, cnt) do { \
  915. emith_rol(d, s, cnt); \
  916. emith_and_r_r_imm(FC, d, 1); \
  917. emith_move_r_r(FNZ, d); \
  918. emith_cmp_rs = emith_cmp_rt = -1; \
  919. } while (0)
  920. #define emith_rorf(d, s, cnt) do { \
  921. emith_ror(d, s, cnt); \
  922. emith_lsr(FC, d, 31); \
  923. emith_move_r_r(FNZ, d); \
  924. emith_cmp_rs = emith_cmp_rt = -1; \
  925. } while (0)
  926. #define emith_rolcf(d) do { \
  927. emith_lsr(AT, d, 31); \
  928. emith_lsl(d, d, 1); \
  929. emith_or_r_r(d, FC); \
  930. emith_move_r_r(FC, AT); \
  931. emith_move_r_r(FNZ, d); \
  932. emith_cmp_rs = emith_cmp_rt = -1; \
  933. } while (0)
  934. #define emith_rorcf(d) do { \
  935. emith_and_r_r_imm(AT, d, 1); \
  936. emith_lsr(d, d, 1); \
  937. emith_lsl(FC, FC, 31); \
  938. emith_or_r_r(d, FC); \
  939. emith_move_r_r(FC, AT); \
  940. emith_move_r_r(FNZ, d); \
  941. emith_cmp_rs = emith_cmp_rt = -1; \
  942. } while (0)
  943. // signed/unsigned extend
  944. #define emith_clear_msb(d, s, count) /* bits to clear */ do { \
  945. u32 t; \
  946. if (__mips_isa_rev >= 2) \
  947. EMIT(MIPS_EXT_IMM(d, s, 0, 32-(count))); \
  948. else if ((count) >= 16) { \
  949. t = (count) - 16; \
  950. t = 0xffff >> t; \
  951. emith_and_r_r_imm(d, s, t); \
  952. } else { \
  953. emith_lsl(d, s, count); \
  954. emith_lsr(d, d, count); \
  955. } \
  956. } while (0)
  957. #define emith_clear_msb_c(cond, d, s, count) \
  958. emith_clear_msb(d, s, count)
  959. #define emith_sext(d, s, count) /* bits to keep */ do { \
  960. if (__mips_isa_rev >= 2 && count == 8) \
  961. EMIT(MIPS_SEB_REG(d, s)); \
  962. else if (__mips_isa_rev >= 2 && count == 16) \
  963. EMIT(MIPS_SEH_REG(d, s)); \
  964. else { \
  965. emith_lsl(d, s, 32-(count)); \
  966. emith_asr(d, d, 32-(count)); \
  967. } \
  968. } while (0)
  969. // multiply Rd = Rn*Rm (+ Ra); NB: next 2 insns after MFLO/MFHI mustn't be MULT
  970. static u8 *last_lohi;
  971. static void emith_lohi_nops(void)
  972. {
  973. u32 d;
  974. while ((d = (u8 *)tcache_ptr - last_lohi) < 8 && d >= 0) EMIT(MIPS_NOP);
  975. }
  976. #define emith_mul(d, s1, s2) do { \
  977. emith_lohi_nops(); \
  978. EMIT(MIPS_MULTU(s1, s2)); \
  979. EMIT(MIPS_MFLO(d)); \
  980. last_lohi = (u8 *)tcache_ptr; \
  981. } while (0)
  982. #define emith_mul_u64(dlo, dhi, s1, s2) do { \
  983. emith_lohi_nops(); \
  984. EMIT(MIPS_MULTU(s1, s2)); \
  985. EMIT(MIPS_MFLO(dlo)); \
  986. EMIT(MIPS_MFHI(dhi)); \
  987. last_lohi = (u8 *)tcache_ptr; \
  988. } while (0)
  989. #define emith_mul_s64(dlo, dhi, s1, s2) do { \
  990. emith_lohi_nops(); \
  991. EMIT(MIPS_MULT(s1, s2)); \
  992. EMIT(MIPS_MFLO(dlo)); \
  993. EMIT(MIPS_MFHI(dhi)); \
  994. last_lohi = (u8 *)tcache_ptr; \
  995. } while (0)
  996. #define emith_mula_s64(dlo, dhi, s1, s2) do { \
  997. int t_ = rcache_get_tmp(); \
  998. emith_lohi_nops(); \
  999. EMIT(MIPS_MULT(s1, s2)); \
  1000. EMIT(MIPS_MFLO(AT)); \
  1001. EMIT(MIPS_MFHI(t_)); \
  1002. last_lohi = (u8 *)tcache_ptr; \
  1003. emith_add_r_r(dlo, AT); \
  1004. EMIT(MIPS_SLTU_REG(AT, dlo, AT)); \
  1005. emith_add_r_r(dhi, AT); \
  1006. emith_add_r_r(dhi, t_); \
  1007. rcache_free_tmp(t_); \
  1008. } while (0)
  1009. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  1010. emith_mula_s64(dlo, dhi, s1, s2)
  1011. // load/store. offs has 16 bits signed, which is currently sufficient
  1012. #define emith_read_r_r_offs_ptr(r, rs, offs) \
  1013. EMIT(MIPS_OP_IMM(OP_LP, r, rs, offs))
  1014. #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
  1015. emith_read_r_r_offs_ptr(r, rs, offs)
  1016. #define emith_read_r_r_offs(r, rs, offs) \
  1017. EMIT(MIPS_LW(r, rs, offs))
  1018. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  1019. emith_read_r_r_offs(r, rs, offs)
  1020. #define emith_read_r_r_r_ptr(r, rs, rm) do { \
  1021. emith_add_r_r_r(AT, rs, rm); \
  1022. EMIT(MIPS_OP_IMM(OP_LP, r, AT, 0)); \
  1023. } while (0)
  1024. #define emith_read_r_r_r(r, rs, rm) do { \
  1025. emith_add_r_r_r(AT, rs, rm); \
  1026. EMIT(MIPS_LW(r, AT, 0)); \
  1027. } while (0)
  1028. #define emith_read_r_r_r_c(cond, r, rs, rm) \
  1029. emith_read_r_r_r(r, rs, rm)
  1030. #define emith_read8_r_r_offs(r, rs, offs) \
  1031. EMIT(MIPS_LBU(r, rs, offs))
  1032. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  1033. emith_read8_r_r_offs(r, rs, offs)
  1034. #define emith_read8_r_r_r(r, rs, rm) do { \
  1035. emith_add_r_r_r(AT, rs, rm); \
  1036. EMIT(MIPS_LBU(r, AT, 0)); \
  1037. } while (0)
  1038. #define emith_read8_r_r_r_c(cond, r, rs, rm) \
  1039. emith_read8_r_r_r(r, rs, rm)
  1040. #define emith_read16_r_r_offs(r, rs, offs) \
  1041. EMIT(MIPS_LHU(r, rs, offs))
  1042. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  1043. emith_read16_r_r_offs(r, rs, offs)
  1044. #define emith_read16_r_r_r(r, rs, rm) do { \
  1045. emith_add_r_r_r(AT, rs, rm); \
  1046. EMIT(MIPS_LHU(r, AT, 0)); \
  1047. } while (0)
  1048. #define emith_read16_r_r_r_c(cond, r, rs, rm) \
  1049. emith_read16_r_r_r(r, rs, rm)
  1050. #define emith_read8s_r_r_offs(r, rs, offs) \
  1051. EMIT(MIPS_LB(r, rs, offs))
  1052. #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
  1053. emith_read8s_r_r_offs(r, rs, offs)
  1054. #define emith_read8s_r_r_r(r, rs, rm) do { \
  1055. emith_add_r_r_r(AT, rs, rm); \
  1056. EMIT(MIPS_LB(r, AT, 0)); \
  1057. } while (0)
  1058. #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
  1059. emith_read8s_r_r_r(r, rs, rm)
  1060. #define emith_read16s_r_r_offs(r, rs, offs) \
  1061. EMIT(MIPS_LH(r, rs, offs))
  1062. #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
  1063. emith_read16s_r_r_offs(r, rs, offs)
  1064. #define emith_read16s_r_r_r(r, rs, rm) do { \
  1065. emith_add_r_r_r(AT, rs, rm); \
  1066. EMIT(MIPS_LH(r, AT, 0)); \
  1067. } while (0)
  1068. #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
  1069. emith_read16s_r_r_r(r, rs, rm)
  1070. #define emith_write_r_r_offs_ptr(r, rs, offs) \
  1071. EMIT(MIPS_OP_IMM(OP_SP, r, rs, offs))
  1072. #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
  1073. emith_write_r_r_offs_ptr(r, rs, offs)
  1074. #define emith_write_r_r_r_ptr(r, rs, rm) do { \
  1075. emith_add_r_r_r(AT, rs, rm); \
  1076. EMIT(MIPS_OP_IMM(OP_SP, r, AT, 0)); \
  1077. } while (0)
  1078. #define emith_write_r_r_r_ptr_c(cond, r, rs, rm) \
  1079. emith_write_r_r_r_ptr(r, rs, rm)
  1080. #define emith_write_r_r_offs(r, rs, offs) \
  1081. EMIT(MIPS_SW(r, rs, offs))
  1082. #define emith_write_r_r_offs_c(cond, r, rs, offs) \
  1083. emith_write_r_r_offs(r, rs, offs)
  1084. #define emith_write_r_r_r(r, rs, rm) do { \
  1085. emith_add_r_r_r(AT, rs, rm); \
  1086. EMIT(MIPS_SW(r, AT, 0)); \
  1087. } while (0)
  1088. #define emith_write_r_r_r_c(cond, r, rs, rm) \
  1089. emith_write_r_r_r(r, rs, rm)
  1090. #define emith_ctx_read_ptr(r, offs) \
  1091. emith_read_r_r_offs_ptr(r, CONTEXT_REG, offs)
  1092. #define emith_ctx_read(r, offs) \
  1093. emith_read_r_r_offs(r, CONTEXT_REG, offs)
  1094. #define emith_ctx_read_c(cond, r, offs) \
  1095. emith_ctx_read(r, offs)
  1096. #define emith_ctx_write_ptr(r, offs) \
  1097. emith_write_r_r_offs_ptr(r, CONTEXT_REG, offs)
  1098. #define emith_ctx_write(r, offs) \
  1099. emith_write_r_r_offs(r, CONTEXT_REG, offs)
  1100. #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
  1101. int r_ = r, offs_ = offs, cnt_ = cnt; \
  1102. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  1103. emith_ctx_read(r_, offs_); \
  1104. } while (0)
  1105. #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
  1106. int r_ = r, offs_ = offs, cnt_ = cnt; \
  1107. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  1108. emith_ctx_write(r_, offs_); \
  1109. } while (0)
  1110. // function call handling
  1111. #define emith_save_caller_regs(mask) do { \
  1112. int _c; u32 _m = mask & 0x300fffc; /* r2-r15,r24-r25 */ \
  1113. if (__builtin_parity(_m) == 1) _m |= 0x1; /* ABI align */ \
  1114. int _s = count_bits(_m) * 4, _o = _s; \
  1115. if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
  1116. for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  1117. if (_m & (1 << _c)) \
  1118. { _o -= 4; if (_c) emith_write_r_r_offs(_c, SP, _o); } \
  1119. } while (0)
  1120. #define emith_restore_caller_regs(mask) do { \
  1121. int _c; u32 _m = mask & 0x300fffc; \
  1122. if (__builtin_parity(_m) == 1) _m |= 0x1; \
  1123. int _s = count_bits(_m) * 4, _o = 0; \
  1124. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1125. if (_m & (1 << _c)) \
  1126. { if (_c) emith_read_r_r_offs(_c, SP, _o); _o += 4; } \
  1127. if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
  1128. } while (0)
  1129. #define host_arg2reg(rd, arg) \
  1130. rd = (arg+4)
  1131. #define emith_pass_arg_r(arg, reg) \
  1132. emith_move_r_r(arg, reg)
  1133. #define emith_pass_arg_imm(arg, imm) \
  1134. emith_move_r_imm(arg, imm)
  1135. // branching
  1136. #define emith_invert_branch(cond) /* inverted conditional branch */ \
  1137. (((cond) >> 5) == OP__RT ? (cond) ^ 0x01 : (cond) ^ 0x20)
  1138. // evaluate the emulated condition, returns a register/branch type pair
  1139. static int emith_cmpr_check(int rs, int rt, int cond, int *r)
  1140. {
  1141. int b = 0;
  1142. // condition check for comparing 2 registers
  1143. switch (cond) {
  1144. case DCOND_EQ: *r = rs; b = MIPS_BEQ|rt; break;
  1145. case DCOND_NE: *r = rs; b = MIPS_BNE|rt; break;
  1146. case DCOND_LO: EMIT(MIPS_SLTU_REG(AT, rs, rt));
  1147. *r = AT, b = MIPS_BNE; break; // s < t unsigned
  1148. case DCOND_HS: EMIT(MIPS_SLTU_REG(AT, rs, rt));
  1149. *r = AT, b = MIPS_BEQ; break; // s >= t unsigned
  1150. case DCOND_LS: EMIT(MIPS_SLTU_REG(AT, rt, rs));
  1151. *r = AT, b = MIPS_BEQ; break; // s <= t unsigned
  1152. case DCOND_HI: EMIT(MIPS_SLTU_REG(AT, rt, rs));
  1153. *r = AT, b = MIPS_BNE; break; // s > t unsigned
  1154. case DCOND_LT: if (rt == 0) { *r = rs, b = MIPS_BLT; break; } // s < 0
  1155. EMIT(MIPS_SLT_REG(AT, rs, rt));
  1156. *r = AT, b = MIPS_BNE; break; // s < t
  1157. case DCOND_GE: if (rt == 0) { *r = rs, b = MIPS_BGE; break; } // s >= 0
  1158. EMIT(MIPS_SLT_REG(AT, rs, rt));
  1159. *r = AT, b = MIPS_BEQ; break; // s >= t
  1160. case DCOND_LE: if (rt == 0) { *r = rs, b = MIPS_BLE; break; } // s <= 0
  1161. EMIT(MIPS_SLT_REG(AT, rt, rs));
  1162. *r = AT, b = MIPS_BEQ; break; // s <= t
  1163. case DCOND_GT: if (rt == 0) { *r = rs, b = MIPS_BGT; break; } // s > 0
  1164. EMIT(MIPS_SLT_REG(AT, rt, rs));
  1165. *r = AT, b = MIPS_BNE; break; // s > t
  1166. }
  1167. return b;
  1168. }
  1169. static int emith_cmpi_check(int rs, s32 imm, int cond, int *r)
  1170. {
  1171. int b = 0;
  1172. // condition check for comparing register with immediate
  1173. if (imm == 0) return emith_cmpr_check(rs, Z0, cond, r);
  1174. switch (cond) {
  1175. case DCOND_EQ: emith_move_r_imm(AT, imm);
  1176. *r = rs; b = MIPS_BEQ|AT; break;
  1177. case DCOND_NE: emith_move_r_imm(AT, imm);
  1178. *r = rs; b = MIPS_BNE|AT; break;
  1179. case DCOND_LO: EMIT(MIPS_SLTU_IMM(AT, rs, imm));
  1180. *r = AT, b = MIPS_BNE; break; // s < imm unsigned
  1181. case DCOND_HS: EMIT(MIPS_SLTU_IMM(AT, rs, imm));
  1182. *r = AT, b = MIPS_BEQ; break; // s >= imm unsigned
  1183. case DCOND_LS: emith_move_r_imm(AT, imm);
  1184. EMIT(MIPS_SLTU_REG(AT, AT, rs));
  1185. *r = AT, b = MIPS_BEQ; break; // s <= imm unsigned
  1186. case DCOND_HI: emith_move_r_imm(AT, imm);
  1187. EMIT(MIPS_SLTU_REG(AT, AT, rs));
  1188. *r = AT, b = MIPS_BNE; break; // s > imm unsigned
  1189. case DCOND_LT: EMIT(MIPS_SLT_IMM(AT, rs, imm));
  1190. *r = AT, b = MIPS_BNE; break; // s < imm
  1191. case DCOND_GE: EMIT(MIPS_SLT_IMM(AT, rs, imm));
  1192. *r = AT, b = MIPS_BEQ; break; // s >= imm
  1193. case DCOND_LE: emith_move_r_imm(AT, imm);
  1194. EMIT(MIPS_SLT_REG(AT, AT, rs));
  1195. *r = AT, b = MIPS_BEQ; break; // s <= imm
  1196. case DCOND_GT: emith_move_r_imm(AT, imm);
  1197. EMIT(MIPS_SLT_REG(AT, AT, rs));
  1198. *r = AT, b = MIPS_BNE; break; // s > imm
  1199. }
  1200. return b;
  1201. }
  1202. static int emith_cond_check(int cond, int *r)
  1203. {
  1204. int b = 0;
  1205. if (emith_cmp_rs >= 0) {
  1206. if (emith_cmp_rt != -1)
  1207. b = emith_cmpr_check(emith_cmp_rs,emith_cmp_rt, cond,r);
  1208. else b = emith_cmpi_check(emith_cmp_rs,emith_cmp_imm,cond,r);
  1209. }
  1210. // shortcut for V known to be 0
  1211. if (!b && emith_flg_noV) switch (cond) {
  1212. case DCOND_VS: *r = Z0; b = MIPS_BNE; break; // never
  1213. case DCOND_VC: *r = Z0; b = MIPS_BEQ; break; // always
  1214. case DCOND_LT: *r = FNZ, b = MIPS_BLT; break; // N
  1215. case DCOND_GE: *r = FNZ, b = MIPS_BGE; break; // !N
  1216. case DCOND_LE: *r = FNZ, b = MIPS_BLE; break; // N || Z
  1217. case DCOND_GT: *r = FNZ, b = MIPS_BGT; break; // !N && !Z
  1218. }
  1219. // the full monty if no shortcut
  1220. if (!b) switch (cond) {
  1221. // conditions using NZ
  1222. case DCOND_EQ: *r = FNZ; b = MIPS_BEQ; break; // Z
  1223. case DCOND_NE: *r = FNZ; b = MIPS_BNE; break; // !Z
  1224. case DCOND_MI: *r = FNZ; b = MIPS_BLT; break; // N
  1225. case DCOND_PL: *r = FNZ; b = MIPS_BGE; break; // !N
  1226. // conditions using C
  1227. case DCOND_LO: *r = FC; b = MIPS_BNE; break; // C
  1228. case DCOND_HS: *r = FC; b = MIPS_BEQ; break; // !C
  1229. // conditions using CZ
  1230. case DCOND_LS: // C || Z
  1231. case DCOND_HI: // !C && !Z
  1232. EMIT(MIPS_ADD_IMM(AT, FC, -1)); // !C && !Z
  1233. EMIT(MIPS_AND_REG(AT, FNZ, AT));
  1234. *r = AT, b = (cond == DCOND_HI ? MIPS_BNE : MIPS_BEQ);
  1235. break;
  1236. // conditions using V
  1237. case DCOND_VS: // V
  1238. case DCOND_VC: // !V
  1239. EMIT(MIPS_XOR_REG(AT, FV, FNZ)); // V = Nt^Ns^Nd^C
  1240. EMIT(MIPS_LSR_IMM(AT, AT, 31));
  1241. EMIT(MIPS_XOR_REG(AT, AT, FC));
  1242. *r = AT, b = (cond == DCOND_VS ? MIPS_BNE : MIPS_BEQ);
  1243. break;
  1244. // conditions using VNZ
  1245. case DCOND_LT: // N^V
  1246. case DCOND_GE: // !(N^V)
  1247. EMIT(MIPS_LSR_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1248. EMIT(MIPS_XOR_REG(AT, FC, AT));
  1249. *r = AT, b = (cond == DCOND_LT ? MIPS_BNE : MIPS_BEQ);
  1250. break;
  1251. case DCOND_LE: // (N^V) || Z
  1252. case DCOND_GT: // !(N^V) && !Z
  1253. EMIT(MIPS_LSR_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1254. EMIT(MIPS_XOR_REG(AT, FC, AT));
  1255. EMIT(MIPS_ADD_IMM(AT, AT, -1)); // !(Nd^V) && !Z
  1256. EMIT(MIPS_AND_REG(AT, FNZ, AT));
  1257. *r = AT, b = (cond == DCOND_GT ? MIPS_BNE : MIPS_BEQ);
  1258. break;
  1259. }
  1260. return b;
  1261. }
  1262. // NB: assumes all targets are in the same 256MB segment
  1263. #define emith_jump(target) \
  1264. emith_branch(MIPS_J((uintptr_t)target & 0x0fffffff))
  1265. #define emith_jump_patchable(target) \
  1266. emith_jump(target)
  1267. // NB: MIPS conditional branches have only +/- 128KB range
  1268. #define emith_jump_cond(cond, target) do { \
  1269. int r_, mcond_ = emith_cond_check(cond, &r_); \
  1270. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr - 4; \
  1271. emith_branch(MIPS_BCONDZ(mcond_,r_,disp_ & 0x0003ffff)); \
  1272. } while (0)
  1273. #define emith_jump_cond_patchable(cond, target) \
  1274. emith_jump_cond(cond, target)
  1275. #define emith_jump_cond_inrange(target) \
  1276. ((u8 *)target - (u8 *)tcache_ptr - 4 < 0x20000 && \
  1277. (u8 *)target - (u8 *)tcache_ptr - 4 >= -0x20000+0x10) //mind cond_check
  1278. // NB: returns position of patch for cache maintenance
  1279. #define emith_jump_patch(ptr, target, pos) do { \
  1280. u32 *ptr_ = (u32 *)ptr-1; /* must skip condition check code */ \
  1281. u32 disp_, mask_; \
  1282. while (!emith_is_j(*ptr_) && !emith_is_b(*ptr_)) ptr_ ++; \
  1283. if (emith_is_b(*ptr_)) \
  1284. mask_ = 0xffff0000, disp_ = (u8 *)target - (u8 *)ptr_ - 4; \
  1285. else mask_ = 0xfc000000, disp_ = (uintptr_t)target; \
  1286. EMIT_PTR(ptr_, (*ptr_ & mask_) | ((disp_ >> 2) & ~mask_)); \
  1287. if ((void *)(pos) != NULL) *(u8 **)(pos) = (u8 *)(ptr_-1); \
  1288. } while (0)
  1289. #define emith_jump_patch_inrange(ptr, target) \
  1290. ((u8 *)target - (u8 *)ptr - 4 < 0x20000 && \
  1291. (u8 *)target - (u8 *)ptr - 4 >= -0x20000+0x10) // mind cond_check
  1292. #define emith_jump_patch_size() 4
  1293. #define emith_jump_at(ptr, target) do { \
  1294. u32 *ptr_ = (u32 *)ptr; \
  1295. EMIT_PTR(ptr_, MIPS_J((uintptr_t)target & 0x0fffffff)); \
  1296. EMIT_PTR(ptr_, MIPS_NOP); \
  1297. } while (0)
  1298. #define emith_jump_at_size() 8
  1299. #define emith_jump_reg(r) \
  1300. emith_branch(MIPS_JR(r))
  1301. #define emith_jump_reg_c(cond, r) \
  1302. emith_jump_reg(r)
  1303. #define emith_jump_ctx(offs) do { \
  1304. emith_ctx_read_ptr(CR, offs); \
  1305. emith_jump_reg(CR); \
  1306. } while (0)
  1307. #define emith_jump_ctx_c(cond, offs) \
  1308. emith_jump_ctx(offs)
  1309. #define emith_call(target) \
  1310. emith_branch(MIPS_JAL((uintptr_t)target & 0x0fffffff))
  1311. #define emith_call_cond(cond, target) \
  1312. emith_call(target)
  1313. #define emith_call_reg(r) \
  1314. emith_branch(MIPS_JALR(LR, r))
  1315. #define emith_call_ctx(offs) do { \
  1316. emith_ctx_read_ptr(CR, offs); \
  1317. emith_call_reg(CR); \
  1318. } while (0)
  1319. #define emith_abijump_reg(r) do { \
  1320. if ((r) != CR) emith_move_r_r(CR, r); \
  1321. emith_branch(MIPS_JR(CR)); \
  1322. } while (0)
  1323. #define emith_abijump_reg_c(cond, r) \
  1324. emith_abijump_reg(r)
  1325. #define emith_abicall(target) do { \
  1326. emith_move_r_imm(CR, target); \
  1327. emith_branch(MIPS_JALR(LR, CR)); \
  1328. } while (0)
  1329. #define emith_abicall_cond(cond, target) \
  1330. emith_abicall(target)
  1331. #define emith_abicall_reg(r) do { \
  1332. if ((r) != CR) emith_move_r_r(CR, r); \
  1333. emith_branch(MIPS_JALR(LR, CR)); \
  1334. } while (0)
  1335. #define emith_call_cleanup() /**/
  1336. #define emith_ret() \
  1337. emith_branch(MIPS_JR(LR))
  1338. #define emith_ret_c(cond) \
  1339. emith_ret()
  1340. #define emith_ret_to_ctx(offs) \
  1341. emith_ctx_write_ptr(LR, offs)
  1342. #define emith_add_r_ret(r) \
  1343. emith_add_r_r_ptr(r, LR)
  1344. // NB: ABI SP alignment is 8 for 64 bit, O32 has a 16 byte arg save area
  1345. #define emith_push_ret(r) do { \
  1346. int offs_ = 8+16 - 2*PTR_SIZE; \
  1347. emith_add_r_r_ptr_imm(SP, SP, -8-16); \
  1348. emith_write_r_r_offs_ptr(LR, SP, offs_ + PTR_SIZE); \
  1349. if ((r) > 0) emith_write_r_r_offs(r, SP, offs_); \
  1350. } while (0)
  1351. #define emith_pop_and_ret(r) do { \
  1352. int offs_ = 8+16 - 2*PTR_SIZE; \
  1353. if ((r) > 0) emith_read_r_r_offs(r, SP, offs_); \
  1354. emith_read_r_r_offs_ptr(LR, SP, offs_ + PTR_SIZE); \
  1355. emith_add_r_r_ptr_imm(SP, SP, 8+16); \
  1356. emith_ret(); \
  1357. } while (0)
  1358. // emitter ABI stuff
  1359. #define emith_pool_check() /**/
  1360. #define emith_pool_commit(j) /**/
  1361. #define emith_update_cache() /**/
  1362. #define emith_rw_offs_max() 0x7fff
  1363. #define emith_uext_ptr(r) /**/
  1364. #if __mips_isa_rev >= 2 && defined(MIPS_USE_SYNCI) && defined(__GNUC__)
  1365. // this should normally be in libc clear_cache; however, it sometimes isn't.
  1366. // core function taken from SYNCI description, MIPS32 instruction set manual
  1367. static NOINLINE void host_instructions_updated(void *base, void *end, int force)
  1368. {
  1369. int step, tmp;
  1370. asm volatile(
  1371. " rdhwr %2, $1;"
  1372. " bal 0f;" // needed to allow for jr.hb:
  1373. "0: addiu $ra, $ra, 3f-0b;" // set ra to insn after jr.hb
  1374. " beqz %2, 3f;"
  1375. "1: synci 0(%0);"
  1376. " sltu %3, %0, %1;"
  1377. " addu %0, %0, %2;"
  1378. " bnez %3, 1b;"
  1379. " sync;"
  1380. "2: jr.hb $ra;"
  1381. "3: " : "+r"(base), "+r"(end), "=r"(step), "=r"(tmp) :: "$31");
  1382. }
  1383. #else
  1384. #define host_instructions_updated(base, end, force) __builtin___clear_cache(base, end)
  1385. #endif
  1386. // SH2 drc specific
  1387. #define emith_sh2_drc_entry() do { \
  1388. int _c, _z = PTR_SIZE; u32 _m = 0xd0ff0000; \
  1389. if (__builtin_parity(_m) == 1) _m |= 0x1; /* ABI align for SP is 8 */ \
  1390. int _s = count_bits(_m) * _z + 16, _o = _s; /* 16 O32 arg save area */ \
  1391. if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
  1392. for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  1393. if (_m & (1 << _c)) \
  1394. { _o -= _z; if (_c) emith_write_r_r_offs_ptr(_c, SP, _o); } \
  1395. } while (0)
  1396. #define emith_sh2_drc_exit() do { \
  1397. int _c, _z = PTR_SIZE; u32 _m = 0xd0ff0000; \
  1398. if (__builtin_parity(_m) == 1) _m |= 0x1; \
  1399. int _s = count_bits(_m) * _z + 16, _o = 16; \
  1400. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1401. if (_m & (1 << _c)) \
  1402. { if (_c) emith_read_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
  1403. if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
  1404. emith_ret(); \
  1405. } while (0)
  1406. // NB: assumes a is in arg0, tab, func and mask are temp
  1407. #define emith_sh2_rcall(a, tab, func, mask) do { \
  1408. emith_lsr(mask, a, SH2_READ_SHIFT); \
  1409. emith_add_r_r_r_lsl_ptr(tab, tab, mask, PTR_SCALE+1); \
  1410. emith_read_r_r_offs_ptr(func, tab, 0); \
  1411. emith_read_r_r_offs(mask, tab, (1 << PTR_SCALE)); \
  1412. emith_addf_r_r_r_ptr(func, func, func); \
  1413. } while (0)
  1414. // NB: assumes a, val are in arg0 and arg1, tab and func are temp
  1415. #define emith_sh2_wcall(a, val, tab, func) do { \
  1416. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  1417. emith_lsl(func, func, PTR_SCALE); \
  1418. emith_read_r_r_r_ptr(func, tab, func); \
  1419. emith_move_r_r_ptr(6, CONTEXT_REG); /* arg2 */ \
  1420. emith_abijump_reg(func); \
  1421. } while (0)
  1422. #define emith_sh2_delay_loop(cycles, reg) do { \
  1423. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  1424. int t1 = rcache_get_tmp(); \
  1425. int t2 = rcache_get_tmp(); \
  1426. int t3 = rcache_get_tmp(); \
  1427. /* if (sr < 0) return */ \
  1428. emith_cmp_r_imm(sr, 0); \
  1429. EMITH_JMP_START(DCOND_LE); \
  1430. /* turns = sr.cycles / cycles */ \
  1431. emith_asr(t2, sr, 12); \
  1432. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  1433. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  1434. rcache_free_tmp(t3); \
  1435. if (reg >= 0) { \
  1436. /* if (reg <= turns) turns = reg-1 */ \
  1437. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  1438. emith_cmp_r_r(t3, t2); \
  1439. EMITH_SJMP_START(DCOND_HI); \
  1440. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  1441. EMITH_SJMP_END(DCOND_HI); \
  1442. /* if (reg <= 1) turns = 0 */ \
  1443. emith_cmp_r_imm(t3, 1); \
  1444. EMITH_SJMP_START(DCOND_HI); \
  1445. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  1446. EMITH_SJMP_END(DCOND_HI); \
  1447. /* reg -= turns */ \
  1448. emith_sub_r_r(t3, t2); \
  1449. } \
  1450. /* sr.cycles -= turns * cycles; */ \
  1451. emith_move_r_imm(t1, cycles); \
  1452. emith_mul(t1, t2, t1); \
  1453. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  1454. EMITH_JMP_END(DCOND_LE); \
  1455. rcache_free_tmp(t1); \
  1456. rcache_free_tmp(t2); \
  1457. } while (0)
  1458. /*
  1459. * T = !carry(Rn = (Rn << 1) | T)
  1460. * if Q
  1461. * C = carry(Rn += Rm)
  1462. * else
  1463. * C = carry(Rn -= Rm)
  1464. * T ^= C
  1465. */
  1466. #define emith_sh2_div1_step(rn, rm, sr) do { \
  1467. int t_ = rcache_get_tmp(); \
  1468. emith_and_r_r_imm(AT, sr, T); \
  1469. emith_lsr(FC, rn, 31); /*Rn = (Rn<<1)+T*/ \
  1470. emith_lsl(t_, rn, 1); \
  1471. emith_or_r_r(t_, AT); \
  1472. emith_or_r_imm(sr, T); /* T = !carry */ \
  1473. emith_eor_r_r(sr, FC); \
  1474. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  1475. EMITH_JMP3_START(DCOND_EQ); \
  1476. emith_add_r_r_r(rn, t_, rm); \
  1477. EMIT(MIPS_SLTU_REG(FC, rn, t_)); \
  1478. EMITH_JMP3_MID(DCOND_EQ); \
  1479. emith_sub_r_r_r(rn, t_, rm); \
  1480. EMIT(MIPS_SLTU_REG(FC, t_, rn)); \
  1481. EMITH_JMP3_END(); \
  1482. emith_eor_r_r(sr, FC); /* T ^= carry */ \
  1483. rcache_free_tmp(t_); \
  1484. } while (0)
  1485. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1486. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1487. emith_tst_r_imm(sr, S); \
  1488. EMITH_SJMP_START(DCOND_EQ); \
  1489. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1490. emith_sext(mh, mh, 16); \
  1491. EMITH_SJMP_END(DCOND_EQ); \
  1492. emith_mula_s64(ml, mh, rn, rm); \
  1493. emith_tst_r_imm(sr, S); \
  1494. EMITH_SJMP_START(DCOND_EQ); \
  1495. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1496. /* to check: add MACH >> 31 to MACH >> 15. this is 0 if no overflow */ \
  1497. emith_asr(rn, mh, 15); \
  1498. emith_add_r_r_r_lsr(rn, rn, mh, 31); /* sum = (MACH>>31)+(MACH>>15) */ \
  1499. emith_teq_r_r(rn, Z0); /* (need only N and Z flags) */ \
  1500. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1501. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1502. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1503. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> +ovl */ \
  1504. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0xffffffff */ \
  1505. emith_sub_r_imm_c(DCOND_MI, mh, 1); /* 0x00007fff */ \
  1506. EMITH_SJMP_END(DCOND_PL); \
  1507. EMITH_SJMP_END(DCOND_EQ); \
  1508. EMITH_SJMP_END(DCOND_EQ); \
  1509. } while (0)
  1510. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1511. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1512. emith_tst_r_imm(sr, S); \
  1513. EMITH_SJMP_START(DCOND_EQ); \
  1514. /* XXX: MACH should be untouched when S is set? */ \
  1515. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1516. EMITH_SJMP_END(DCOND_EQ); \
  1517. emith_mula_s64(ml, mh, rn, rm); \
  1518. emith_tst_r_imm(sr, S); \
  1519. EMITH_SJMP_START(DCOND_EQ); \
  1520. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1521. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1522. emith_lsr(rn, ml, 31); \
  1523. emith_add_r_r(rn, mh); /* sum = MACH + ((MACL>>31)&1) */ \
  1524. emith_teq_r_r(rn, Z0); /* (need only N and Z flags) */ \
  1525. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1526. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1527. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1528. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1529. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> positive ovrfl */ \
  1530. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0x7fffffff */ \
  1531. EMITH_SJMP_END(DCOND_PL); \
  1532. EMITH_SJMP_END(DCOND_EQ); \
  1533. EMITH_SJMP_END(DCOND_EQ); \
  1534. } while (0)
  1535. #define emith_write_sr(sr, srcr) do { \
  1536. if (__mips_isa_rev < 2) { \
  1537. emith_lsr(sr, sr , 10); emith_lsl(sr, sr, 10); \
  1538. emith_lsl(AT, srcr, 22); emith_lsr(AT, AT, 22); \
  1539. emith_or_r_r(sr, AT); \
  1540. } else EMIT(MIPS_INS_IMM(sr, srcr, 0, 10)); \
  1541. } while (0)
  1542. #define emith_carry_to_t(sr, is_sub) do { \
  1543. if (__mips_isa_rev < 2) { \
  1544. emith_and_r_imm(sr, 0xfffffffe); \
  1545. emith_or_r_r(sr, FC); \
  1546. } else EMIT(MIPS_INS_IMM(sr, FC, 0, 1)); \
  1547. } while (0)
  1548. #define emith_t_to_carry(sr, is_sub) do { \
  1549. emith_and_r_r_imm(FC, sr, 1); \
  1550. } while (0)
  1551. #define emith_tpop_carry(sr, is_sub) do { \
  1552. emith_and_r_r_imm(FC, sr, 1); \
  1553. emith_eor_r_r(sr, FC); \
  1554. } while (0)
  1555. #define emith_tpush_carry(sr, is_sub) \
  1556. emith_or_r_r(sr, FC)
  1557. #ifdef T
  1558. // T bit handling
  1559. #define emith_invert_cond(cond) \
  1560. ((cond) ^ 1)
  1561. static void emith_clr_t_cond(int sr)
  1562. {
  1563. emith_bic_r_imm(sr, T);
  1564. }
  1565. static void emith_set_t_cond(int sr, int cond)
  1566. {
  1567. int b, r;
  1568. u8 *ptr;
  1569. u32 val = 0, inv = 0;
  1570. // try to avoid jumping around if possible
  1571. if (emith_cmp_rs >= 0) {
  1572. if (emith_cmp_rt >= 0)
  1573. b = emith_cmpr_check(emith_cmp_rs, emith_cmp_rt, cond, &r);
  1574. else
  1575. b = emith_cmpi_check(emith_cmp_rs, emith_cmp_imm, cond, &r);
  1576. // XXX this relies on the inner workings of cmp_check...
  1577. if (r == AT)
  1578. // result of slt check which returns either 0 or 1 in AT
  1579. val++, inv = (b == MIPS_BEQ);
  1580. } else {
  1581. b = emith_cond_check(cond, &r);
  1582. if (r == Z0) {
  1583. if (b == MIPS_BEQ || b == MIPS_BLE || b == MIPS_BGE)
  1584. emith_or_r_imm(sr, T);
  1585. return;
  1586. } else if (r == FC)
  1587. val++, inv = (b == MIPS_BEQ);
  1588. }
  1589. if (!val) switch (b) { // cases: b..z r, aka cmp r,Z0 or cmp r,#0
  1590. case MIPS_BEQ: EMIT(MIPS_SLTU_IMM(AT, r, 1)); r=AT; val++; break;
  1591. case MIPS_BNE: EMIT(MIPS_SLTU_REG(AT,Z0, r)); r=AT; val++; break;
  1592. case MIPS_BLT: EMIT(MIPS_SLT_REG(AT, r, Z0)); r=AT; val++; break;
  1593. case MIPS_BGE: EMIT(MIPS_SLT_REG(AT, r, Z0)); r=AT; val++; inv++; break;
  1594. case MIPS_BLE: EMIT(MIPS_SLT_REG(AT, Z0, r)); r=AT; val++; inv++; break;
  1595. case MIPS_BGT: EMIT(MIPS_SLT_REG(AT, Z0, r)); r=AT; val++; break;
  1596. default: // cases: beq/bne r,s, aka cmp r,s
  1597. if ((b>>5) == OP_BEQ) {
  1598. EMIT(MIPS_XOR_REG(AT, r, b&0x1f));
  1599. EMIT(MIPS_SLTU_IMM(AT,AT, 1)); r=AT; val++; break;
  1600. } else if ((b>>5) == OP_BNE) {
  1601. EMIT(MIPS_XOR_REG(AT, r, b&0x1f));
  1602. EMIT(MIPS_SLTU_REG(AT,Z0,AT)); r=AT; val++; break;
  1603. }
  1604. }
  1605. if (val) {
  1606. emith_or_r_r(sr, r);
  1607. if (inv)
  1608. emith_eor_r_imm(sr, T);
  1609. return;
  1610. }
  1611. // can't obtain result directly, use presumably slower jump !cond + or sr,T
  1612. b = emith_invert_branch(b);
  1613. ptr = emith_branch(MIPS_BCONDZ(b, r, 0));
  1614. emith_or_r_imm(sr, T);
  1615. emith_flush(); // prohibit delay slot switching across jump targets
  1616. val = (u8 *)tcache_ptr - (u8 *)(ptr) - 4;
  1617. EMIT_PTR(ptr, MIPS_BCONDZ(b, r, val & 0x0003ffff));
  1618. }
  1619. #define emith_get_t_cond() -1
  1620. #define emith_sync_t(sr) ((void)sr)
  1621. #define emith_invalidate_t()
  1622. static void emith_set_t(int sr, int val)
  1623. {
  1624. if (val)
  1625. emith_or_r_imm(sr, T);
  1626. else
  1627. emith_bic_r_imm(sr, T);
  1628. }
  1629. static int emith_tst_t(int sr, int tf)
  1630. {
  1631. emith_tst_r_imm(sr, T);
  1632. return tf ? DCOND_NE: DCOND_EQ;
  1633. }
  1634. #endif