emit_ppc.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820
  1. /*
  2. * Basic macros to emit PowerISA 2.03 64 bit instructions and some utils
  3. * Copyright (C) 2020 kub
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. // NB bit numbers are reversed in PPC (MSB is bit 0). The emith_* functions and
  9. // macros must take this into account.
  10. // NB PPC was a 64 bit architecture from the onset, so basically all operations
  11. // are operating on 64 bits. 32 bit arch was only added later on, and there are
  12. // very few 32 bit operations (cmp*, shift/rotate, extract/insert, load/store).
  13. // For most operations the upper bits don't spill into the lower word, for the
  14. // others there is an appropriate 32 bit operation available.
  15. // NB PowerPC isn't a clean RISC design. Several insns use microcode, which is
  16. // AFAIK notably slower than using some 2-3 non-microcode insns. So, using
  17. // such insns should by avoided if possible. Listed in Cell handbook, App. A:
  18. // - shift/rotate having the amount in a register
  19. // - arithmetic/logical having the RC flag set (except cmp*)
  20. // - load/store algebraic (l?a*), multiple (lmw/stmw), string (ls*/sts*)
  21. // - mtcrf (and some more SPR related, not used here)
  22. // moreover, misaligned load/store crossing a cacheline boundary are microcoded.
  23. // Note also that load/store string isn't available in little endian mode.
  24. // NB flag handling in PPC differs grossly from the ARM/X86 model. There are 8
  25. // fields in the condition register, each having 4 condition bits. However, only
  26. // the EQ bit is similar to the Z flag. The CA and OV bits in the XER register
  27. // are similar to the C and V bits, but shifts don't use CA, and cmp* doesn't
  28. // use CA and OV.
  29. // Moreover, there's no easy possibility to get CA and OV for 32 bit arithmetic
  30. // since all arithmetic/logical insns use 64 bit.
  31. // For now, use the "no flags" code from the RISC-V backend.
  32. #define HOST_REGS 32
  33. // PPC64: params: r3-r10, return: r3, temp: r0,r11-r12, saved: r14-r31
  34. // reserved: r0(zero), r1(stack), r2(TOC), r13(TID)
  35. // additionally reserved on OSX: r31(PIC), r30(frame), r11(parentframe)
  36. // for OSX PIC code, on function calls r12 must contain the called address
  37. #define RET_REG 3
  38. #define PARAM_REGS { 3, 4, 5, 6, 7, 8, 9, 10 }
  39. #define PRESERVED_REGS { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29 }
  40. #define TEMPORARY_REGS { 12 }
  41. #define CONTEXT_REG 29
  42. #define STATIC_SH2_REGS { SHR_SR,28 , SHR_R(0),27 , SHR_R(1),26 }
  43. // if RA is 0 in non-update memory insns, ADDI/ADDIS, ISEL, it aliases with zero
  44. #define Z0 0 // zero register
  45. #define SP 1 // stack pointer
  46. #define CR 12 // call register
  47. // SPR registers
  48. #define XER -1 // exception register
  49. #define LR -8 // link register
  50. #define CTR -9 // counter register
  51. // internally used by code emitter:
  52. #define AT 0 // emitter temporary (can't be fully used anyway)
  53. #define FNZ 14 // emulated processor flags: N (bit 31) ,Z (all bits)
  54. #define FC 15 // emulated processor flags: C (bit 0), others 0
  55. #define FV 16 // emulated processor flags: Nt^Ns (bit 31). others x
  56. // unified conditions; virtual, not corresponding to anything real on PPC
  57. #define DCOND_EQ 0x0
  58. #define DCOND_NE 0x1
  59. #define DCOND_HS 0x2
  60. #define DCOND_LO 0x3
  61. #define DCOND_MI 0x4
  62. #define DCOND_PL 0x5
  63. #define DCOND_VS 0x6
  64. #define DCOND_VC 0x7
  65. #define DCOND_HI 0x8
  66. #define DCOND_LS 0x9
  67. #define DCOND_GE 0xa
  68. #define DCOND_LT 0xb
  69. #define DCOND_GT 0xc
  70. #define DCOND_LE 0xd
  71. #define DCOND_CS DCOND_LO
  72. #define DCOND_CC DCOND_HS
  73. // unified insn; use right-aligned bit offsets for the bitfields
  74. #define PPC_INSN(op, b10, b15, b20, b31) \
  75. (((op)<<26)|((b10)<<21)|((b15)<<16)|((b20)<<11)|((b31)<<0))
  76. #define _ 0 // marker for "field unused"
  77. #define __(n) o##n // enum marker for "undefined"
  78. #define _CB(v,l,s,d) ((((v)>>(s))&((1<<(l))-1))<<(d)) // copy l bits
  79. // NB everything privileged or unneeded at 1st sight is left out
  80. // opcode field (encoded in OPCD, bits 0-5)
  81. enum { OP__LMA=004, OP_MULLI=007,
  82. OP_SUBFIC, __(11), OP_CMPLI, OP_CMPI, OP_ADDIC, OP_ADDICF, OP_ADDI, OP_ADDIS,
  83. OP_BC, __(21), OP_B, OP__CR, OP_RLWIMI, OP_RLWINM, __(26), OP_RLWNM,
  84. OP_ORI, OP_ORIS, OP_XORI, OP_XORIS, OP_ANDI, OP_ANDIS, OP__RLD, OP__EXT,
  85. OP_LWZ, OP_LWZU, OP_LBZ, OP_LBZU, OP_STW, OP_STWU, OP_STB, OP_STBU,
  86. OP_LHZ, OP_LHZU, OP_LHA, OP_LHAU, OP_STH, OP_STHU, OP_LMW, OP_STMW,
  87. /*OP_LQ=070,*/ OP__LD=072, OP__ST=076 };
  88. // CR subops (encoded in bits 21-31)
  89. enum { OPC_MCRF=0, OPC_BCLR=32, OPC_BCCTR=1056 };
  90. // RLD subops (encoded in XO bits 27-31)
  91. enum { OPR_RLDICL=0, OPR_RLDICR=4, OPR_RLDIC=8, OPR_RLDIMI=12, OPR_RLDCL=16, OPR_RLDCR=18 };
  92. // EXT subops (encoded in XO bits 21-31)
  93. enum {
  94. // arith/logical
  95. OPE_CMP=0, OPE_SUBFC=16, OPE_ADDC=20, OPE_AND=56,
  96. OPE_CMPL=64, OPE_SUBF=80, OPE_ANDC=120, OPE_NEG=208, OPE_NOR=248,
  97. OPE_SUBFE=272, OPE_ADDE=276, OPE_SUBFZE=400, OPE_ADDZE=404, OPE_SUBFME=464, OPE_ADDME=468,
  98. OPE_ADD=532, OPE_EQV=568, OPE_XOR=632, OPE_ORC=824, OPE_OR=888, OPE_NAND=952,
  99. // shift
  100. OPE_SLW=48, OPE_SLD=54, OPE_SRW=1072, OPE_SRD=1078, OPE_SRAW=1584, OPE_SRAD=1588, OPE_SRAWI=1648, OPE_SRADI=1652,
  101. // extend, bitcount
  102. OPE_CNTLZW=52, OPE_CNTLZD=116, OPE_EXTSH=1844, OPE_EXTSB=1908, OPE_EXTSW=1972,
  103. // mult/div
  104. OPE_MULHDU=18, OPE_MULHWU=22, OPE_MULHD=146, OPE_MULHW=150, OPE_MULLD=466, OPE_MULLW=470,
  105. OPE_DIVDU=914, OPE_DIVWU=918, OPE_DIVD=978, OPE_DIVW=982,
  106. // load/store indexed
  107. OPE_LDX=42, OPE_LDUX=106, OPE_STDX=298, OPE_STDUX=362,
  108. OPE_LWZX=46, OPE_LWZUX=110, OPE_LWAX=682, OPE_LWAUX=746, OPE_STWX=302, OPE_STWUX=366,
  109. OPE_LBZX=174, OPE_LBZUX=238, /* no LBAX/LBAUX... */ OPE_STBX=430, OPE_STBUX=494,
  110. OPE_LHZX=558, OPE_LHZUX=622, OPE_LHAX=686, OPE_LHAUX=750, OPE_STHX=814, OPE_STHUX=878,
  111. // SPR, CR related
  112. OPE_ISEL=15, OPE_MFCR=38, OPE_MTCRF=288, OPE_MFSPR=678, OPE_MTSPR=934, OPE_MCRXR=1024,
  113. };
  114. // LD subops (encoded in XO bits 30-31)
  115. enum { OPL_LD, OPL_LDU, OPL_LWA };
  116. // ST subops (encoded in XO bits 30-31)
  117. enum { OPS_STD, OPS_STDU /*,OPS_STQ*/ };
  118. // X*,M*-forms insns often have overflow detect in b21 and CR0 update in b31
  119. #define XOE (1<<10) // (31-21)
  120. #define XRC (1<<0) // (31-31)
  121. #define XF (XOE|XRC)
  122. // MB and ME in M*-forms rotate left
  123. #define MM(b,e) (((b)<<6)|((e)<<1))
  124. #define MD(b,s) (_CB(b,5,0,6)|_CB(b,1,5,5)|_CB(s,5,0,11)|_CB(s,1,5,1))
  125. // AA and LK in I,B-forms branches
  126. #define BAA (1<<1)
  127. #define BLK (1<<0)
  128. // BO and BI condition codes in B-form, BO0-BO4:BI2-BI4 since we only need CR0
  129. #define BLT 0x60
  130. #define BGE 0x20
  131. #define BGT 0x61
  132. #define BLE 0x21
  133. #define BEQ 0x62
  134. #define BNE 0x22
  135. #define BXX 0xa0 // unconditional, aka always
  136. #define PPC_NOP \
  137. PPC_INSN(OP_ORI, 0, 0, _, 0) // ori r0, r0, 0
  138. // arithmetic/logical
  139. #define PPC_OP_REG(op, xop, rt, ra, rb) /* X*,M*-form */ \
  140. PPC_INSN((unsigned)op, rt, ra, rb, xop)
  141. #define PPC_OP_IMM(op, rt, ra, imm) /* D,B,I-form */ \
  142. PPC_INSN((unsigned)op, rt, ra, _, imm)
  143. // rt = ra OP rb
  144. #define PPC_ADD_REG(rt, ra, rb) \
  145. PPC_OP_REG(OP__EXT,OPE_ADD,rt,ra,rb)
  146. #define PPC_ADDC_REG(rt, ra, rb) \
  147. PPC_OP_REG(OP__EXT,OPE_ADDC,rt,ra,rb)
  148. #define PPC_SUB_REG(rt, rb, ra) /* NB reversed args (rb-ra) */ \
  149. PPC_OP_REG(OP__EXT,OPE_SUBF,rt,ra,rb)
  150. #define PPC_SUBC_REG(rt, rb, ra) \
  151. PPC_OP_REG(OP__EXT,OPE_SUBFC,rt,ra,rb)
  152. #define PPC_NEG_REG(rt, ra) \
  153. PPC_OP_REG(OP__EXT,OPE_NEG,rt,ra,_)
  154. #define PPC_CMP_REG(ra, rb) \
  155. PPC_OP_REG(OP__EXT,OPE_CMP,1,ra,rb)
  156. #define PPC_CMPL_REG(ra, rb) \
  157. PPC_OP_REG(OP__EXT,OPE_CMPL,1,ra,rb)
  158. #define PPC_CMPW_REG(ra, rb) \
  159. PPC_OP_REG(OP__EXT,OPE_CMP,0,ra,rb)
  160. #define PPC_CMPLW_REG(ra, rb) \
  161. PPC_OP_REG(OP__EXT,OPE_CMPL,0,ra,rb)
  162. #define PPC_XOR_REG(ra, rt, rb) \
  163. PPC_OP_REG(OP__EXT,OPE_XOR,rt,ra,rb)
  164. #define PPC_OR_REG(ra, rt, rb) \
  165. PPC_OP_REG(OP__EXT,OPE_OR,rt,ra,rb)
  166. #define PPC_ORN_REG(ra, rt, rb) \
  167. PPC_OP_REG(OP__EXT,OPE_ORC,rt,ra,rb)
  168. #define PPC_NOR_REG(ra, rt, rb) \
  169. PPC_OP_REG(OP__EXT,OPE_NOR,rt,ra,rb)
  170. #define PPC_AND_REG(ra, rt, rb) \
  171. PPC_OP_REG(OP__EXT,OPE_AND,rt,ra,rb)
  172. #define PPC_BIC_REG(ra, rt, rb) \
  173. PPC_OP_REG(OP__EXT,OPE_ANDC,rt,ra,rb)
  174. #define PPC_MOV_REG(rt, ra) \
  175. PPC_OR_REG(rt, ra, ra)
  176. #define PPC_MVN_REG(rt, ra) \
  177. PPC_NOR_REG(rt, ra, ra)
  178. // rt = ra OP rb OP carry
  179. #define PPC_ADC_REG(rt, ra, rb) \
  180. PPC_OP_REG(OP__EXT,OPE_ADDE,rt,ra,rb)
  181. #define PPC_SBC_REG(rt, rb, ra) \
  182. PPC_OP_REG(OP__EXT,OPE_SUBFE,rt,ra,rb)
  183. #define PPC_NGC_REG(rt, ra) \
  184. PPC_OP_REG(OP__EXT,OPE_SUBFZE,rt,ra,_)
  185. // rt = ra SHIFT rb
  186. #define PPC_LSL_REG(ra, rt, rb) \
  187. PPC_OP_REG(OP__EXT,OPE_SLD,rt,ra,rb)
  188. #define PPC_LSR_REG(ra, rt, rb) \
  189. PPC_OP_REG(OP__EXT,OPE_SRD,rt,ra,rb)
  190. #define PPC_ASR_REG(ra, rt, rb) \
  191. PPC_OP_REG(OP__EXT,OPE_SRAD,rt,ra,rb)
  192. #define PPC_ROL_REG(ra, rt, rb) \
  193. PPC_OP_REG(OP__RLD,OPR_RLDCL,rt,ra,rb,0)
  194. #define PPC_LSLW_REG(ra, rt, rb) \
  195. PPC_OP_REG(OP__EXT,OPE_SLW,rt,ra,rb)
  196. #define PPC_LSRW_REG(ra, rt, rb) \
  197. PPC_OP_REG(OP__EXT,OPE_SRW,rt,ra,rb)
  198. #define PPC_ASRW_REG(ra, rt, rb) \
  199. PPC_OP_REG(OP__EXT,OPE_SRAW,rt,ra,rb)
  200. #define PPC_ROLW_REG(ra, rt, rb) \
  201. PPC_OP_REG(OP_RLWNM,MM(0,31),rt,ra,rb)
  202. // rt = ra OP (imm16 << (0|16))
  203. #define PPC_ADD_IMM(rt, ra, imm16) \
  204. PPC_OP_IMM(OP_ADDI, rt, ra, imm16)
  205. #define PPC_ADDT_IMM(rt, ra, imm16) \
  206. PPC_OP_IMM(OP_ADDIS, rt, ra, imm16)
  207. #define PPC_XOR_IMM(ra, rt, imm16) \
  208. PPC_OP_IMM(OP_XORI, rt, ra, imm16)
  209. #define PPC_XORT_IMM(ra, rt, imm16) \
  210. PPC_OP_IMM(OP_XORIS, rt, ra, imm16)
  211. #define PPC_OR_IMM(ra, rt, imm16) \
  212. PPC_OP_IMM(OP_ORI, rt, ra, imm16)
  213. #define PPC_ORT_IMM(ra, rt, imm16) \
  214. PPC_OP_IMM(OP_ORIS, rt, ra, imm16)
  215. #define PPC_ANDS_IMM(rt, ra, imm16) \
  216. PPC_OP_IMM(OP_ANDI, rt, ra, imm16)
  217. #define PPC_ANDTS_IMM(rt, ra, imm16) \
  218. PPC_OP_IMM(OP_ANDIS, rt, ra, imm16)
  219. #define PPC_CMP_IMM(ra, imm16) \
  220. PPC_OP_IMM(OP_CMPI, 1, ra, imm16)
  221. #define PPC_CMPL_IMM(ra, imm16) \
  222. PPC_OP_IMM(OP_CMPLI, 1, ra, imm16)
  223. #define PPC_CMPW_IMM(ra, imm16) \
  224. PPC_OP_IMM(OP_CMPI, 0, ra, imm16)
  225. #define PPC_CMPLW_IMM(ra, imm16) \
  226. PPC_OP_IMM(OP_CMPLI, 0, ra, imm16)
  227. #define PPC_TST_IMM(rt, imm16) \
  228. PPC_ANDS_IMM(Z0,ra,imm16)
  229. #define PPC_MOV_IMM(rt, ra, imm16) \
  230. PPC_ADD_IMM(rt,ra,imm16)
  231. #define PPC_MOVT_IMM(rt, ra, imm16) \
  232. PPC_ADDT_IMM(rt,ra,imm16)
  233. // rt = EXTEND ra
  234. #define PPC_EXTSW_REG(ra, rt) \
  235. PPC_OP_REG(OP__EXT,OPE_EXTSW,rt,ra,_)
  236. #define PPC_EXTSH_REG(ra, rt) \
  237. PPC_OP_REG(OP__EXT,OPE_EXTSH,rt,ra,_)
  238. #define PPC_EXTSB_REG(ra, rt) \
  239. PPC_OP_REG(OP__EXT,OPE_EXTSB,rt,ra,_)
  240. #define PPC_EXTUW_REG(ra, rt) \
  241. PPC_OP_REG(OP__RLD,OPR_RLDICL|MD(32,0),rt,ra,_)
  242. #define PPC_EXTUH_REG(ra, rt) \
  243. PPC_OP_REG(OP__RLD,OPR_RLDICL|MD(48,0),rt,ra,_)
  244. #define PPC_EXTUB_REG(ra, rt) \
  245. PPC_OP_REG(OP__RLD,OPR_RLDICL|MD(56,0),rt,ra,_)
  246. // rt = ra SHIFT imm5/imm6
  247. #define PPC_LSL_IMM(ra, rt, bits) \
  248. PPC_OP_REG(OP__RLD,OPR_RLDICR|MD(63-(bits),bits),rt,ra,_)
  249. #define PPC_LSR_IMM(ra, rt, bits) \
  250. PPC_OP_REG(OP__RLD,OPR_RLDICL|MD(bits,64-(bits)),rt,ra,_)
  251. #define PPC_ASR_IMM(ra, rt, bits) \
  252. PPC_OP_REG(OP__EXT,OPE_SRADI|MD(_,bits),rt,ra,_)
  253. #define PPC_ROL_IMM(ra, rt, bits) \
  254. PPC_OP_REG(OP__RLD,OPR_RLDICL|MD(0,bits),rt,ra,_)
  255. #define PPC_LSLW_IMM(ra, rt, bits) \
  256. PPC_OP_REG(OP_RLWINM,MM(0,31-(bits)),rt,ra,bits)
  257. #define PPC_LSRW_IMM(ra, rt, bits) \
  258. PPC_OP_REG(OP_RLWINM,MM(bits,31),rt,ra,32-(bits))
  259. #define PPC_ASRW_IMM(ra, rt, bits) \
  260. PPC_OP_REG(OP__EXT,OPE_SRAWI,rt,ra,bits)
  261. #define PPC_ROLW_IMM(ra, rt, bits) \
  262. PPC_OP_REG(OP_RLWINM,MM(0,31),rt,ra,bits)
  263. // rt = EXTRACT/INSERT ra
  264. #define PPC_BFX_IMM(ra, rt, lsb, bits) \
  265. PPC_OP_REG(OP__RLD,OPR_RLDICL|MD(64-(bits),63&(lsb+bits)),rt,ra,_)
  266. #define PPC_BFXD_IMM(ra, rt, lsb, bits) /* extract to high bits, 64 bit */ \
  267. PPC_OP_REG(OP__RLD,OPR_RLDICR|MD(bits-1,lsb),rt,ra,_)
  268. #define PPC_BFI_IMM(ra, rt, lsb, bits) \
  269. PPC_OP_REG(OP__RLD,OPR_RLDIMI|MD(lsb,64-(lsb+bits)),rt,ra,_)
  270. #define PPC_BFXW_IMM(ra, rt, lsb, bits) \
  271. PPC_OP_REG(OP_RLWINM,MM(32-(bits),31),rt,ra,31&(lsb+bits))
  272. #define PPC_BFXT_IMM(ra, rt, lsb, bits) /* extract to high bits, 32 bit */ \
  273. PPC_OP_REG(OP_RLWINM,MM(0,bits-1),rt,ra,lsb)
  274. #define PPC_BFIW_IMM(ra, rt, lsb, bits) \
  275. PPC_OP_REG(OP_RLWIMI,MM(lsb,lsb+bits-1),rt,ra,32-(lsb+bits))
  276. // multiplication; NB in 32 bit results the topmost 32 bits are undefined
  277. #define PPC_MULL(rt, ra, rb) /* 64 bit */ \
  278. PPC_OP_REG(OP__EXT,OPE_MULLD,rt,ra,rb)
  279. #define PPC_MUL(rt, ra, rb) /* low 32 bit */ \
  280. PPC_OP_REG(OP__EXT,OPE_MULLW,rt,ra,rb)
  281. #define PPC_MULHS(rt, ra, rb) /* high 32 bit, signed */ \
  282. PPC_OP_REG(OP__EXT,OPE_MULHW,rt,ra,rb)
  283. #define PPC_MULHU(rt, ra, rb) /* high 32 bit, unsigned */ \
  284. PPC_OP_REG(OP__EXT,OPE_MULHWU,rt,ra,rb)
  285. // XXX use MAC* insns from the LMA group?
  286. // branching (only PC-relative)
  287. #define PPC_B(offs26) \
  288. PPC_OP_IMM(OP_B,_,_,(offs26)&~3)
  289. #define PPC_BL(offs26) \
  290. PPC_OP_IMM(OP_B,_,_,((offs26)&~3)|BLK)
  291. #define PPC_RET() \
  292. PPC_OP_REG(OP__CR,OPC_BCLR,BXX>>3,_,_)
  293. #define PPC_RETCOND(cond) \
  294. PPC_OP_REG(OP__CR,OPC_BCLR,(cond)>>3,(cond)&0x7,_)
  295. #define PPC_BCTRCOND(cond) \
  296. PPC_OP_REG(OP__CR,OPC_BCCTR,(cond)>>3,(cond)&0x7,_)
  297. #define PPC_BLCTRCOND(cond) \
  298. PPC_OP_REG(OP__CR,OPC_BCCTR|BLK,(cond)>>3,(cond)&0x7,_)
  299. #define PPC_BCOND(cond, offs19) \
  300. PPC_OP_IMM(OP_BC,(cond)>>3,(cond)&0x7,(offs19)&~3)
  301. // load/store, offset
  302. #define PPC_LDX_IMM(rt, ra, offs16) \
  303. PPC_OP_IMM(OP__LD,rt,ra,((u16)(offs16)&~3)|OPL_LD)
  304. #define PPC_LDW_IMM(rt, ra, offs16) \
  305. PPC_OP_IMM(OP_LWZ,rt,ra,(u16)(offs16))
  306. #define PPC_LDH_IMM(rt, ra, offs16) \
  307. PPC_OP_IMM(OP_LHZ,rt,ra,(u16)(offs16))
  308. #define PPC_LDB_IMM(rt, ra, offs16) \
  309. PPC_OP_IMM(OP_LBZ,rt,ra,(u16)(offs16))
  310. #define PPC_LDSH_IMM(rt, ra, offs16) \
  311. PPC_OP_IMM(OP_LHA,rt,ra,(u16)(offs16))
  312. #define PPC_STX_IMM(rt, ra, offs16) \
  313. PPC_OP_IMM(OP__ST,rt,ra,((u16)(offs16)&~3)|OPS_STD)
  314. #define PPC_STW_IMM(rt, ra, offs16) \
  315. PPC_OP_IMM(OP_STW,rt,ra,(u16)(offs16))
  316. #define PPC_STH_IMM(rt, ra, offs16) \
  317. PPC_OP_IMM(OP_STH,rt,ra,(u16)(offs16))
  318. #define PPC_STB_IMM(rt, ra, offs16) \
  319. PPC_OP_IMM(OP_STB,rt,ra,(u16)(offs16))
  320. #define PPC_STXU_IMM(rt, ra, offs16) \
  321. PPC_OP_IMM(OP__ST,rt,ra,((u16)(offs16)&~3)|OPS_STDU)
  322. #define PPC_STWU_IMM(rt, ra, offs16) \
  323. PPC_OP_IMM(OP_STWU,rt,ra,(u16)(offs16))
  324. // load/store, indexed
  325. #define PPC_LDX_REG(rt, ra, rb) \
  326. PPC_OP_REG(OP__EXT,OPE_LDX,rt,ra,rb)
  327. #define PPC_LDW_REG(rt, ra, rb) \
  328. PPC_OP_REG(OP__EXT,OPE_LWZX,rt,ra,rb)
  329. #define PPC_LDH_REG(rt, ra, rb) \
  330. PPC_OP_REG(OP__EXT,OPE_LHZX,rt,ra,rb)
  331. #define PPC_LDB_REG(rt, ra, rb) \
  332. PPC_OP_REG(OP__EXT,OPE_LBZX,rt,ra,rb)
  333. #define PPC_LDSH_REG(rt, ra, rb) \
  334. PPC_OP_REG(OP__EXT,OPE_LHAX,rt,ra,rb)
  335. #define PPC_STX_REG(rt, ra, rb) \
  336. PPC_OP_REG(OP__EXT,OPE_STX,rt,ra,rb)
  337. #define PPC_STW_REG(rt, ra, rb) \
  338. PPC_OP_REG(OP__EXT,OPE_STWX,rt,ra,rb)
  339. #define PPC_STH_REG(rt, ra, rb) \
  340. PPC_OP_REG(OP__EXT,OPE_STHX,rt,ra,rb)
  341. #define PPC_STB_REG(rt, ra, rb) \
  342. PPC_OP_REG(OP__EXT,OPE_STBX,rt,ra,rb)
  343. // special regs: LR, CTR, XER, CR
  344. #define PPC_MFSP_REG(rt, spr) \
  345. PPC_OP_REG(OP__EXT,OPE_MFSPR,rt,_,_CB(-(spr),5,0,5)|_CB(-(spr),5,5,0))
  346. #define PPC_MTSP_REG(rs, spr) \
  347. PPC_OP_REG(OP__EXT,OPE_MTSPR,rs,_,_CB(-(spr),5,0,5)|_CB(-(spr),5,5,0))
  348. #define PPC_MFCR_REG(rt) \
  349. PPC_OP_REG(OP__EXT,OPE_MFCR,rt,_,_)
  350. #define PPC_MTCRF_REG(rs, fm) \
  351. PPC_OP_REG(OP__EXT,OPE_MTCRF,rs,_,(fm)<<1)
  352. #define PPC_MCRXR_REG(crt) \
  353. PPC_OP_REG(OP__EXT,OPE_MCRXR,(crt)<<2,_,_)
  354. #define PPC_MCRCR_REG(crt, crf) \
  355. PPC_OP_REG(OP__CR,OPC_MCRF,(crt)<<2,(crf)<<1,_)
  356. #ifdef __powerpc64__
  357. #define PTR_SCALE 3
  358. #define PPC_LDP_IMM PPC_LDX_IMM
  359. #define PPC_LDP_REG PPC_LDX_REG
  360. #define PPC_STP_IMM PPC_STX_IMM
  361. #define PPC_STP_REG PPC_STX_REG
  362. #define PPC_STPU_IMM PPC_STXU_IMM
  363. #define PPC_BFXP_IMM PPC_BFX_IMM
  364. #define emith_uext_ptr(r) EMIT(PPC_EXTUW_REG(r, r))
  365. // "long" multiplication, 32x32 bit = 64 bit
  366. #define EMIT_PPC_MULLU_REG(dlo, dhi, s1, s2) do { \
  367. EMIT(PPC_EXTUW_REG(s1, s1)); \
  368. EMIT(PPC_EXTUW_REG(s2, s2)); \
  369. EMIT(PPC_MULL(dlo, s1, s2)); \
  370. EMIT(PPC_ASR_IMM(dhi, dlo, 32)); \
  371. } while (0)
  372. #define EMIT_PPC_MULLS_REG(dlo, dhi, s1, s2) do { \
  373. EMIT(PPC_EXTSW_REG(s1, s1)); \
  374. EMIT(PPC_EXTSW_REG(s2, s2)); \
  375. EMIT(PPC_MULL(dlo, s1, s2)); \
  376. EMIT(PPC_ASR_IMM(dhi, dlo, 32)); \
  377. } while (0)
  378. #define EMIT_PPC_MACLS_REG(dlo, dhi, s1, s2) do { \
  379. EMIT(PPC_EXTSW_REG(s1, s1)); \
  380. EMIT(PPC_EXTSW_REG(s2, s2)); \
  381. EMIT(PPC_MULL(AT, s1, s2)); \
  382. EMIT(PPC_BFI_IMM(dlo, dhi, 0, 32)); \
  383. emith_add_r_r(dlo, AT); \
  384. EMIT(PPC_ASR_IMM(dhi, dlo, 32)); \
  385. } while (0)
  386. #else
  387. #define PTR_SCALE 2
  388. #define PPC_LDP_IMM PPC_LDW_IMM
  389. #define PPC_LDP_REG PPC_LDW_REG
  390. #define PPC_STP_IMM PPC_STW_IMM
  391. #define PPC_STP_REG PPC_STW_REG
  392. #define PPC_STPU_IMM PPC_STWU_IMM
  393. #define PPC_BFXP_IMM PPC_BFXW_IMM
  394. #define emith_uext_ptr(r) /**/
  395. // "long" multiplication, 32x32 bit = 64 bit
  396. #define EMIT_PPC_MULLU_REG(dlo, dhi, s1, s2) do { \
  397. int at = (dlo == s1 || dlo == s2 ? AT : dlo); \
  398. EMIT(PPC_MUL(at, s1, s2)); \
  399. EMIT(PPC_MULHU(dhi, s1, s2)); \
  400. if (at != dlo) emith_move_r_r(dlo, at); \
  401. } while (0)
  402. #define EMIT_PPC_MULLS_REG(dlo, dhi, s1, s2) do { \
  403. int at = (dlo == s1 || dlo == s2 ? AT : dlo); \
  404. EMIT(PPC_MUL(at, s1, s2)); \
  405. EMIT(PPC_MULHS(dhi, s1, s2)); \
  406. if (at != dlo) emith_move_r_r(dlo, at); \
  407. } while (0)
  408. #define EMIT_PPC_MACLS_REG(dlo, dhi, s1, s2) do { \
  409. int t_ = rcache_get_tmp(); \
  410. EMIT_PPC_MULLS_REG(t_, AT, s1, s2); \
  411. EMIT(PPC_ADDC_REG(dlo, dlo, t_)); \
  412. EMIT(PPC_ADC_REG(dhi, dhi, AT)); \
  413. rcache_free_tmp(t_); \
  414. } while (0)
  415. #endif
  416. #define PTR_SIZE (1<<PTR_SCALE)
  417. // "emulated" RISC-V SLTU insn for the flag handling stuff XXX cumbersome
  418. #define EMIT_PPC_SLTWU_REG(rt, ra, rb) do { \
  419. EMIT(PPC_CMPLW_REG(ra, rb)); \
  420. EMIT(PPC_MFCR_REG(rt)); \
  421. EMIT(PPC_BFXW_IMM(rt, rt, 0, 1)); \
  422. } while (0)
  423. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  424. #define EMIT_PTR(ptr, x) \
  425. do { \
  426. *(u32 *)(ptr) = x; \
  427. ptr = (void *)((u8 *)(ptr) + sizeof(u32)); \
  428. } while (0)
  429. #define EMIT(op) \
  430. do { \
  431. EMIT_PTR(tcache_ptr, op); \
  432. COUNT_OP; \
  433. } while (0)
  434. // if-then-else conditional execution helpers
  435. #define JMP_POS(ptr) { \
  436. ptr = tcache_ptr; \
  437. EMIT(PPC_BCOND(cond_m, 0)); \
  438. }
  439. #define JMP_EMIT(cond, ptr) { \
  440. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
  441. EMIT_PTR(ptr, PPC_BCOND(cond_m, val_ & 0x0000fffc)); \
  442. }
  443. #define JMP_EMIT_NC(ptr) { \
  444. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
  445. EMIT_PTR(ptr, PPC_B(val_ & 0x03ffffffc)); \
  446. }
  447. #define EMITH_JMP_START(cond) { \
  448. int cond_m = emith_cond_check(cond); \
  449. u8 *cond_ptr; \
  450. JMP_POS(cond_ptr)
  451. #define EMITH_JMP_END(cond) \
  452. JMP_EMIT(cond, cond_ptr); \
  453. }
  454. #define EMITH_JMP3_START(cond) { \
  455. int cond_m = emith_cond_check(cond); \
  456. u8 *cond_ptr, *else_ptr; \
  457. JMP_POS(cond_ptr)
  458. #define EMITH_JMP3_MID(cond) \
  459. JMP_POS(else_ptr); \
  460. JMP_EMIT(cond, cond_ptr);
  461. #define EMITH_JMP3_END() \
  462. JMP_EMIT_NC(else_ptr); \
  463. }
  464. // "simple" jump (no more than a few insns)
  465. // ARM32 will use conditional instructions here
  466. #define EMITH_SJMP_START EMITH_JMP_START
  467. #define EMITH_SJMP_END EMITH_JMP_END
  468. #define EMITH_SJMP3_START EMITH_JMP3_START
  469. #define EMITH_SJMP3_MID EMITH_JMP3_MID
  470. #define EMITH_SJMP3_END EMITH_JMP3_END
  471. #define EMITH_SJMP2_START(cond) \
  472. EMITH_SJMP3_START(cond)
  473. #define EMITH_SJMP2_MID(cond) \
  474. EMITH_SJMP3_MID(cond)
  475. #define EMITH_SJMP2_END(cond) \
  476. EMITH_SJMP3_END()
  477. // flag register emulation. this is modelled after arm/x86.
  478. // the FNZ register stores the result of the last flag setting operation for
  479. // N and Z flag, used for EQ,NE,MI,PL branches.
  480. // the FC register stores the C flag (used for HI,HS,LO,LS,CC,CS).
  481. // the FV register stores information for V flag calculation (used for
  482. // GT,GE,LT,LE,VC,VS). V flag is costly and only fully calculated when needed.
  483. // the core registers may be temp registers, since the condition after calls
  484. // is undefined anyway.
  485. // flag emulation creates 2 (ie cmp #0/beq) up to 9 (ie adcf/ble) extra insns.
  486. // flag handling shortcuts may reduce this by 1-4 insns, see emith_cond_check()
  487. static int emith_cmp_ra, emith_cmp_rb; // registers used in cmp_r_r/cmp_r_imm
  488. static s32 emith_cmp_imm; // immediate value used in cmp_r_imm
  489. enum { _FHC=1, _FHV=2 } emith_flg_hint; // C/V flag usage hinted by compiler
  490. static int emith_flg_noV; // V flag known not to be set
  491. #define EMITH_HINT_COND(cond) do { \
  492. /* only need to check cond>>1 since the lowest bit inverts the cond */ \
  493. unsigned _mv = BITMASK3(DCOND_VS>>1,DCOND_GE>>1,DCOND_GT>>1); \
  494. unsigned _mc = _mv | BITMASK2(DCOND_HS>>1,DCOND_HI>>1); \
  495. emith_flg_hint = (_mv & BITMASK1(cond >> 1) ? _FHV : 0); \
  496. emith_flg_hint |= (_mc & BITMASK1(cond >> 1) ? _FHC : 0); \
  497. } while (0)
  498. // store minimal cc information: rt, rb^ra, carry
  499. // NB: the result *must* first go to FNZ, in case rt == ra or rt == rb.
  500. // NB: for adcf and sbcf, carry-in must be dealt with separately (see there)
  501. static void emith_set_arith_flags(int rt, int ra, int rb, s32 imm, int sub)
  502. {
  503. if (emith_flg_hint & _FHC) {
  504. if (sub) // C = sub:rb<rt, add:rt<rb
  505. EMIT_PPC_SLTWU_REG(FC, ra, FNZ);
  506. else EMIT_PPC_SLTWU_REG(FC, FNZ, ra);// C in FC, bit 0
  507. }
  508. if (emith_flg_hint & _FHV) {
  509. emith_flg_noV = 0;
  510. if (rb >= 0) // Nt^Ns in FV, bit 31
  511. EMIT(PPC_XOR_REG(FV, ra, rb));
  512. else if (imm == 0)
  513. emith_flg_noV = 1; // imm #0 can't overflow
  514. else if ((imm < 0) == !sub)
  515. EMIT(PPC_MVN_REG(FV, ra));
  516. else if ((imm > 0) == !sub)
  517. EMIT(PPC_MOV_REG(FV, ra));
  518. }
  519. // full V = Nd^Nt^Ns^C calculation is deferred until really needed
  520. if (rt && rt != FNZ)
  521. EMIT(PPC_MOV_REG(rt, FNZ)); // N,Z via result value in FNZ
  522. emith_cmp_ra = emith_cmp_rb = -1;
  523. }
  524. // handle cmp separately by storing the involved regs for later use.
  525. // this works for all conditions but VC/VS, but this is fortunately never used.
  526. static void emith_set_compare_flags(int ra, int rb, s32 imm)
  527. {
  528. emith_cmp_rb = rb;
  529. emith_cmp_ra = ra;
  530. emith_cmp_imm = imm;
  531. }
  532. // data processing, register
  533. #define emith_move_r_r_ptr(d, s) \
  534. EMIT(PPC_MOV_REG(d, s))
  535. #define emith_move_r_r_ptr_c(cond, d, s) \
  536. emith_move_r_r_ptr(d, s)
  537. #define emith_move_r_r(d, s) \
  538. emith_move_r_r_ptr(d, s)
  539. #define emith_move_r_r_c(cond, d, s) \
  540. emith_move_r_r(d, s)
  541. #define emith_mvn_r_r(d, s) \
  542. EMIT(PPC_MVN_REG(d, s))
  543. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  544. if (simm) { \
  545. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  546. EMIT(PPC_ADD_REG(d, s1, AT)); \
  547. } else EMIT(PPC_ADD_REG(d, s1, s2)); \
  548. } while (0)
  549. #define emith_add_r_r_r_lsl(d, s1, s2, simm) \
  550. emith_add_r_r_r_lsl_ptr(d, s1, s2, simm)
  551. #define emith_add_r_r_r_lsr(d, s1, s2, simm) do { \
  552. if (simm) { \
  553. EMIT(PPC_LSRW_IMM(AT, s2, simm)); \
  554. EMIT(PPC_ADD_REG(d, s1, AT)); \
  555. } else EMIT(PPC_ADD_REG(d, s1, s2)); \
  556. } while (0)
  557. #define emith_addf_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  558. if (simm) { \
  559. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  560. EMIT(PPC_ADD_REG(FNZ, s1, AT)); \
  561. emith_set_arith_flags(d, s1, AT, 0, 0); \
  562. } else { \
  563. EMIT(PPC_ADD_REG(FNZ, s1, s2)); \
  564. emith_set_arith_flags(d, s1, s2, 0, 0); \
  565. } \
  566. } while (0)
  567. #define emith_addf_r_r_r_lsl(d, s1, s2, simm) do { \
  568. if (simm) { \
  569. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  570. EMIT(PPC_ADD_REG(FNZ, s1, AT)); \
  571. emith_set_arith_flags(d, s1, AT, 0, 0); \
  572. } else { \
  573. EMIT(PPC_ADD_REG(FNZ, s1, s2)); \
  574. emith_set_arith_flags(d, s1, s2, 0, 0); \
  575. } \
  576. } while (0)
  577. #define emith_addf_r_r_r_lsr(d, s1, s2, simm) do { \
  578. if (simm) { \
  579. EMIT(PPC_LSRW_IMM(AT, s2, simm)); \
  580. EMIT(PPC_ADD_REG(FNZ, s1, AT)); \
  581. emith_set_arith_flags(d, s1, AT, 0, 0); \
  582. } else { \
  583. EMIT(PPC_ADD_REG(FNZ, s1, s2)); \
  584. emith_set_arith_flags(d, s1, s2, 0, 0); \
  585. } \
  586. } while (0)
  587. #define emith_sub_r_r_r_lsl(d, s1, s2, simm) do { \
  588. if (simm) { \
  589. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  590. EMIT(PPC_SUB_REG(d, s1, AT)); \
  591. } else EMIT(PPC_SUB_REG(d, s1, s2)); \
  592. } while (0)
  593. #define emith_subf_r_r_r_lsl(d, s1, s2, simm) do { \
  594. if (simm) { \
  595. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  596. EMIT(PPC_SUB_REG(FNZ, s1, AT)); \
  597. emith_set_arith_flags(d, s1, AT, 0, 1); \
  598. } else { \
  599. EMIT(PPC_SUB_REG(FNZ, s1, s2)); \
  600. emith_set_arith_flags(d, s1, s2, 0, 1); \
  601. } \
  602. } while (0)
  603. #define emith_or_r_r_r_lsl(d, s1, s2, simm) do { \
  604. if (simm) { \
  605. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  606. EMIT(PPC_OR_REG(d, s1, AT)); \
  607. } else EMIT(PPC_OR_REG(d, s1, s2)); \
  608. } while (0)
  609. #define emith_or_r_r_r_lsr(d, s1, s2, simm) do { \
  610. if (simm) { \
  611. EMIT(PPC_LSRW_IMM(AT, s2, simm)); \
  612. EMIT(PPC_OR_REG(d, s1, AT)); \
  613. } else EMIT(PPC_OR_REG(d, s1, s2)); \
  614. } while (0)
  615. #define emith_eor_r_r_r_lsl(d, s1, s2, simm) do { \
  616. if (simm) { \
  617. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  618. EMIT(PPC_XOR_REG(d, s1, AT)); \
  619. } else EMIT(PPC_XOR_REG(d, s1, s2)); \
  620. } while (0)
  621. #define emith_eor_r_r_r_lsr(d, s1, s2, simm) do { \
  622. if (simm) { \
  623. EMIT(PPC_LSRW_IMM(AT, s2, simm)); \
  624. EMIT(PPC_XOR_REG(d, s1, AT)); \
  625. } else EMIT(PPC_XOR_REG(d, s1, s2)); \
  626. } while (0)
  627. #define emith_and_r_r_r_lsl(d, s1, s2, simm) do { \
  628. if (simm) { \
  629. EMIT(PPC_LSLW_IMM(AT, s2, simm)); \
  630. EMIT(PPC_AND_REG(d, s1, AT)); \
  631. } else EMIT(PPC_AND_REG(d, s1, s2)); \
  632. } while (0)
  633. #define emith_or_r_r_lsl(d, s, lslimm) \
  634. emith_or_r_r_r_lsl(d, d, s, lslimm)
  635. #define emith_or_r_r_lsr(d, s, lsrimm) \
  636. emith_or_r_r_r_lsr(d, d, s, lsrimm)
  637. #define emith_eor_r_r_lsl(d, s, lslimm) \
  638. emith_eor_r_r_r_lsl(d, d, s, lslimm)
  639. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  640. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  641. #define emith_add_r_r_r(d, s1, s2) \
  642. emith_add_r_r_r_lsl(d, s1, s2, 0)
  643. #define emith_addf_r_r_r_ptr(d, s1, s2) \
  644. emith_addf_r_r_r_lsl_ptr(d, s1, s2, 0)
  645. #define emith_addf_r_r_r(d, s1, s2) \
  646. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  647. #define emith_sub_r_r_r(d, s1, s2) \
  648. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  649. #define emith_subf_r_r_r(d, s1, s2) \
  650. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  651. #define emith_or_r_r_r(d, s1, s2) \
  652. emith_or_r_r_r_lsl(d, s1, s2, 0)
  653. #define emith_eor_r_r_r(d, s1, s2) \
  654. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  655. #define emith_and_r_r_r(d, s1, s2) \
  656. emith_and_r_r_r_lsl(d, s1, s2, 0)
  657. #define emith_add_r_r_ptr(d, s) \
  658. emith_add_r_r_r_lsl_ptr(d, d, s, 0)
  659. #define emith_add_r_r(d, s) \
  660. emith_add_r_r_r(d, d, s)
  661. #define emith_sub_r_r(d, s) \
  662. emith_sub_r_r_r(d, d, s)
  663. #define emith_neg_r_r(d, s) \
  664. EMIT(PPC_NEG_REG(d, s))
  665. #define emith_adc_r_r_r(d, s1, s2) do { \
  666. emith_add_r_r_r(AT, s2, FC); \
  667. emith_add_r_r_r(d, s1, AT); \
  668. } while (0)
  669. #define emith_sbc_r_r_r(d, s1, s2) do { \
  670. emith_add_r_r_r(AT, s2, FC); \
  671. emith_sub_r_r_r(d, s1, AT); \
  672. } while (0)
  673. #define emith_adc_r_r(d, s) \
  674. emith_adc_r_r_r(d, d, s)
  675. #define emith_negc_r_r(d, s) do { \
  676. emith_neg_r_r(d, s); \
  677. emith_sub_r_r(d, FC); \
  678. } while (0)
  679. // NB: the incoming carry Cin can cause Cout if s2+Cin=0 (or s1+Cin=0 FWIW)
  680. // moreover, if s2+Cin=0 caused Cout, s1+s2+Cin=s1+0 can't cause another Cout
  681. #define emith_adcf_r_r_r(d, s1, s2) do { \
  682. emith_add_r_r_r(FNZ, s2, FC); \
  683. EMIT_PPC_SLTWU_REG(AT, FNZ, FC); \
  684. emith_add_r_r_r(FNZ, s1, FNZ); \
  685. emith_set_arith_flags(d, s1, s2, 0, 0); \
  686. emith_or_r_r(FC, AT); \
  687. } while (0)
  688. #define emith_sbcf_r_r_r(d, s1, s2) do { \
  689. emith_add_r_r_r(FNZ, s2, FC); \
  690. EMIT_PPC_SLTWU_REG(AT, FNZ, FC); \
  691. emith_sub_r_r_r(FNZ, s1, FNZ); \
  692. emith_set_arith_flags(d, s1, s2, 0, 1); \
  693. emith_or_r_r(FC, AT); \
  694. } while (0)
  695. #define emith_and_r_r(d, s) \
  696. emith_and_r_r_r(d, d, s)
  697. #define emith_and_r_r_c(cond, d, s) \
  698. emith_and_r_r(d, s)
  699. #define emith_or_r_r(d, s) \
  700. emith_or_r_r_r(d, d, s)
  701. #define emith_eor_r_r(d, s) \
  702. emith_eor_r_r_r(d, d, s)
  703. #define emith_tst_r_r_ptr(d, s) do { \
  704. if (d != s) { \
  705. emith_and_r_r_r(FNZ, d, s); \
  706. emith_cmp_ra = emith_cmp_rb = -1; \
  707. } else emith_cmp_ra = s, emith_cmp_rb = -1, emith_cmp_imm = 0; \
  708. } while (0)
  709. #define emith_tst_r_r(d, s) \
  710. emith_tst_r_r_ptr(d, s)
  711. #define emith_teq_r_r(d, s) do { \
  712. emith_eor_r_r_r(FNZ, d, s); \
  713. emith_cmp_ra = emith_cmp_rb = -1; \
  714. } while (0)
  715. #define emith_cmp_r_r(d, s) \
  716. emith_set_compare_flags(d, s, 0)
  717. // emith_subf_r_r_r(FNZ, d, s)
  718. #define emith_addf_r_r(d, s) \
  719. emith_addf_r_r_r(d, d, s)
  720. #define emith_subf_r_r(d, s) \
  721. emith_subf_r_r_r(d, d, s)
  722. #define emith_adcf_r_r(d, s) \
  723. emith_adcf_r_r_r(d, d, s)
  724. #define emith_sbcf_r_r(d, s) \
  725. emith_sbcf_r_r_r(d, d, s)
  726. #define emith_negcf_r_r(d, s) do { \
  727. emith_add_r_r_r(FNZ, s, FC); \
  728. EMIT_PPC_SLTWU_REG(AT, FNZ, FC); \
  729. emith_neg_r_r(FNZ, FNZ); \
  730. emith_set_arith_flags(d, Z0, s, 0, 1); \
  731. emith_or_r_r(FC, AT); \
  732. } while (0)
  733. // move immediate
  734. static void emith_move_imm(int r, int ptr, uintptr_t imm)
  735. {
  736. #ifdef __powerpc64__
  737. if (ptr && (s32)imm != imm) {
  738. emith_move_imm(r, 0, imm >> 32);
  739. if (imm >> 32)
  740. EMIT(PPC_LSL_IMM(r, r, 32));
  741. if (imm & 0x0000ffff)
  742. EMIT(PPC_OR_IMM(r, r, imm & 0x0000ffff));
  743. if (imm & 0xffff0000)
  744. EMIT(PPC_ORT_IMM(r, r, (imm & 0xffff0000) >> 16));
  745. } else
  746. #endif
  747. if ((s16)imm != (s32)imm) {
  748. EMIT(PPC_ADDT_IMM(r, Z0, (u16)(imm>>16)));
  749. if ((s16)imm)
  750. EMIT(PPC_OR_IMM(r, r, (u16)(imm)));
  751. } else EMIT(PPC_ADD_IMM(r, Z0, (u16)imm));
  752. }
  753. #define emith_move_r_ptr_imm(r, imm) \
  754. emith_move_imm(r, 1, (uintptr_t)(imm))
  755. #define emith_move_r_imm(r, imm) \
  756. emith_move_imm(r, 0, (u32)(imm))
  757. #define emith_move_r_imm_c(cond, r, imm) \
  758. emith_move_r_imm(r, imm)
  759. #define emith_move_r_imm_s8_patchable(r, imm) \
  760. EMIT(PPC_ADD_IMM(r, Z0, (s8)(imm)))
  761. #define emith_move_r_imm_s8_patch(ptr, imm) do { \
  762. u32 *ptr_ = (u32 *)ptr; \
  763. EMIT_PTR(ptr_, (*ptr_ & 0xffff0000) | (u16)(s8)(imm)); \
  764. } while (0)
  765. // arithmetic, immediate - can only be ADDI, since SUBI doesn't exist
  766. static void emith_add_imm(int rt, int ra, u32 imm)
  767. {
  768. int s = ra;
  769. if ((u16)imm) {
  770. EMIT(PPC_ADD_IMM(rt, s, (u16)imm));
  771. s = rt;
  772. }
  773. // adjust for sign extension in ADDI
  774. imm = (imm >> 16) + ((s16)imm < 0);
  775. if ((u16)imm || rt != s)
  776. EMIT(PPC_ADDT_IMM(rt, s, (u16)imm));
  777. }
  778. #define emith_add_r_imm(r, imm) \
  779. emith_add_r_r_imm(r, r, imm)
  780. #define emith_add_r_imm_c(cond, r, imm) \
  781. emith_add_r_imm(r, imm)
  782. #define emith_addf_r_imm(r, imm) \
  783. emith_addf_r_r_imm(r, imm)
  784. #define emith_sub_r_imm(r, imm) \
  785. emith_sub_r_r_imm(r, r, imm)
  786. #define emith_sub_r_imm_c(cond, r, imm) \
  787. emith_sub_r_imm(r, imm)
  788. #define emith_subf_r_imm(r, imm) \
  789. emith_subf_r_r_imm(r, r, imm)
  790. #define emith_adc_r_imm(r, imm) \
  791. emith_adc_r_r_imm(r, r, imm)
  792. #define emith_adcf_r_imm(r, imm) \
  793. emith_adcf_r_r_imm(r, r, imm)
  794. #define emith_cmp_r_imm(r, imm) \
  795. emith_set_compare_flags(r, -1, imm)
  796. // emith_subf_r_r_imm(FNZ, r, (s16)imm)
  797. #define emith_add_r_r_ptr_imm(d, s, imm) \
  798. emith_add_imm(d, s, imm)
  799. #define emith_add_r_r_imm(d, s, imm) \
  800. emith_add_r_r_ptr_imm(d, s, imm)
  801. #define emith_addf_r_r_imm(d, s, imm) do { \
  802. emith_add_r_r_imm(FNZ, s, imm); \
  803. emith_set_arith_flags(d, s, -1, imm, 0); \
  804. } while (0)
  805. #define emith_adc_r_r_imm(d, s, imm) do { \
  806. emith_add_r_r_r(AT, s, FC); \
  807. emith_add_r_r_imm(d, AT, imm); \
  808. } while (0)
  809. #define emith_adcf_r_r_imm(d, s, imm) do { \
  810. if (imm == 0) { \
  811. emith_add_r_r_r(FNZ, s, FC); \
  812. emith_set_arith_flags(d, s, -1, 1, 0); \
  813. } else { \
  814. emith_add_r_r_r(FNZ, s, FC); \
  815. EMIT_PPC_SLTWU_REG(AT, FNZ, FC); \
  816. emith_add_r_r_imm(FNZ, FNZ, imm); \
  817. emith_set_arith_flags(d, s, -1, imm, 0); \
  818. emith_or_r_r(FC, AT); \
  819. } \
  820. } while (0)
  821. // NB: no SUBI, since ADDI takes a signed imm
  822. #define emith_sub_r_r_imm(d, s, imm) \
  823. emith_add_r_r_imm(d, s, -(imm))
  824. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  825. emith_sub_r_r_imm(d, s, imm)
  826. #define emith_subf_r_r_imm(d, s, imm) do { \
  827. emith_sub_r_r_imm(FNZ, s, imm); \
  828. emith_set_arith_flags(d, s, -1, imm, 1); \
  829. } while (0)
  830. // logical, immediate
  831. #define emith_log_imm2(opi, opr, rt, ra, imm) do { \
  832. if ((imm) >> 16 || opi == OP_ANDI) { /* too big, or microcoded ANDI */ \
  833. emith_move_r_imm(AT, imm); \
  834. EMIT(PPC_OP_REG(OP__EXT, opr, ra, rt, AT)); \
  835. } else if (/*opi == OP_ANDI ||*/ imm || rt != ra) \
  836. EMIT(PPC_OP_IMM(opi, ra, rt, imm)); \
  837. } while (0)
  838. #define emith_log_imm(op, rt, ra, imm) \
  839. emith_log_imm2(OP_##op##I, OPE_##op, rt, ra, imm)
  840. #define emith_and_r_imm(r, imm) \
  841. emith_log_imm(AND, r, r, imm)
  842. #define emith_or_r_imm(r, imm) \
  843. emith_log_imm(OR, r, r, imm)
  844. #define emith_or_r_imm_c(cond, r, imm) \
  845. emith_or_r_imm(r, imm)
  846. #define emith_eor_r_imm_ptr(r, imm) \
  847. emith_log_imm(XOR, r, r, imm)
  848. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  849. emith_eor_r_imm_ptr(r, imm)
  850. #define emith_eor_r_imm(r, imm) \
  851. emith_eor_r_imm_ptr(r, imm)
  852. #define emith_eor_r_imm_c(cond, r, imm) \
  853. emith_eor_r_imm(r, imm)
  854. /* NB: BIC #imm not available; use AND #~imm instead */
  855. #define emith_bic_r_imm(r, imm) \
  856. emith_log_imm(AND, r, r, ~(imm))
  857. #define emith_bic_r_imm_c(cond, r, imm) \
  858. emith_bic_r_imm(r, imm)
  859. #define emith_tst_r_imm(r, imm) do { \
  860. emith_log_imm(AND, FNZ, r, imm); \
  861. emith_cmp_ra = emith_cmp_rb = -1; \
  862. } while (0)
  863. #define emith_tst_r_imm_c(cond, r, imm) \
  864. emith_tst_r_imm(r, imm)
  865. #define emith_and_r_r_imm(d, s, imm) \
  866. emith_log_imm(AND, d, s, imm)
  867. #define emith_or_r_r_imm(d, s, imm) \
  868. emith_log_imm(OR, d, s, imm)
  869. #define emith_eor_r_r_imm(d, s, imm) \
  870. emith_log_imm(XOR, d, s, imm)
  871. // shift
  872. #define emith_lsl(d, s, cnt) \
  873. EMIT(PPC_LSLW_IMM(d, s, cnt))
  874. #define emith_lsr(d, s, cnt) \
  875. EMIT(PPC_LSRW_IMM(d, s, cnt))
  876. #define emith_asr(d, s, cnt) \
  877. EMIT(PPC_ASRW_IMM(d, s, cnt))
  878. #define emith_ror(d, s, cnt) \
  879. EMIT(PPC_ROLW_IMM(d, s, 32-(cnt)))
  880. #define emith_ror_c(cond, d, s, cnt) \
  881. emith_ror(d, s, cnt)
  882. #define emith_rol(d, s, cnt) \
  883. EMIT(PPC_ROLW_IMM(d, s, cnt)); \
  884. #define emith_rorc(d) do { \
  885. emith_lsr(d, d, 1); \
  886. emith_lsl(AT, FC, 31); \
  887. emith_or_r_r(d, AT); \
  888. } while (0)
  889. #define emith_rolc(d) do { \
  890. emith_lsl(d, d, 1); \
  891. emith_or_r_r(d, FC); \
  892. } while (0)
  893. // NB: all flag setting shifts make V undefined
  894. #define emith_lslf(d, s, cnt) do { \
  895. int _s = s; \
  896. if ((cnt) > 1) { \
  897. emith_lsl(d, s, cnt-1); \
  898. _s = d; \
  899. } \
  900. if ((cnt) > 0) { \
  901. emith_lsr(FC, _s, 31); \
  902. emith_lsl(d, _s, 1); \
  903. } \
  904. emith_move_r_r(FNZ, d); \
  905. emith_cmp_ra = emith_cmp_rb = -1; \
  906. } while (0)
  907. #define emith_lsrf(d, s, cnt) do { \
  908. int _s = s; \
  909. if ((cnt) > 1) { \
  910. emith_lsr(d, s, cnt-1); \
  911. _s = d; \
  912. } \
  913. if ((cnt) > 0) { \
  914. emith_and_r_r_imm(FC, _s, 1); \
  915. emith_lsr(d, _s, 1); \
  916. } \
  917. emith_move_r_r(FNZ, d); \
  918. emith_cmp_ra = emith_cmp_rb = -1; \
  919. } while (0)
  920. #define emith_asrf(d, s, cnt) do { \
  921. int _s = s; \
  922. if ((cnt) > 1) { \
  923. emith_asr(d, s, cnt-1); \
  924. _s = d; \
  925. } \
  926. if ((cnt) > 0) { \
  927. emith_and_r_r_imm(FC, _s, 1); \
  928. emith_asr(d, _s, 1); \
  929. } \
  930. emith_move_r_r(FNZ, d); \
  931. emith_cmp_ra = emith_cmp_rb = -1; \
  932. } while (0)
  933. #define emith_rolf(d, s, cnt) do { \
  934. emith_rol(d, s, cnt); \
  935. emith_and_r_r_imm(FC, d, 1); \
  936. emith_move_r_r(FNZ, d); \
  937. emith_cmp_ra = emith_cmp_rb = -1; \
  938. } while (0)
  939. #define emith_rorf(d, s, cnt) do { \
  940. emith_ror(d, s, cnt); \
  941. emith_lsr(FC, d, 31); \
  942. emith_move_r_r(FNZ, d); \
  943. emith_cmp_ra = emith_cmp_rb = -1; \
  944. } while (0)
  945. #define emith_rolcf(d) do { \
  946. emith_lsr(AT, d, 31); \
  947. emith_lsl(d, d, 1); \
  948. emith_or_r_r(d, FC); \
  949. emith_move_r_r(FC, AT); \
  950. emith_move_r_r(FNZ, d); \
  951. emith_cmp_ra = emith_cmp_rb = -1; \
  952. } while (0)
  953. #define emith_rorcf(d) do { \
  954. emith_and_r_r_imm(AT, d, 1); \
  955. emith_lsr(d, d, 1); \
  956. emith_lsl(FC, FC, 31); \
  957. emith_or_r_r(d, FC); \
  958. emith_move_r_r(FC, AT); \
  959. emith_move_r_r(FNZ, d); \
  960. emith_cmp_ra = emith_cmp_rb = -1; \
  961. } while (0)
  962. // signed/unsigned extend
  963. #define emith_clear_msb(d, s, count) /* bits to clear */ \
  964. EMIT(PPC_BFXW_IMM(d, s, count, 32-(count)))
  965. #define emith_clear_msb_c(cond, d, s, count) \
  966. emith_clear_msb(d, s, count)
  967. #define emith_sext(d, s, count) /* bits to keep */ do { \
  968. if (count == 8) \
  969. EMIT(PPC_EXTSB_REG(d, s)); \
  970. else if (count == 16) \
  971. EMIT(PPC_EXTSH_REG(d, s)); \
  972. else { \
  973. emith_lsl(d, s, 32-(count)); \
  974. emith_asr(d, d, 32-(count)); \
  975. } \
  976. } while (0)
  977. // multiply Rd = Rn*Rm (+ Ra)
  978. #define emith_mul(d, s1, s2) \
  979. EMIT(PPC_MUL(d, s1, s2))
  980. #define emith_mul_u64(dlo, dhi, s1, s2) \
  981. EMIT_PPC_MULLU_REG(dlo, dhi, s1, s2)
  982. #define emith_mul_s64(dlo, dhi, s1, s2) \
  983. EMIT_PPC_MULLS_REG(dlo, dhi, s1, s2)
  984. #define emith_mula_s64(dlo, dhi, s1, s2) \
  985. EMIT_PPC_MACLS_REG(dlo, dhi, s1, s2)
  986. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  987. emith_mula_s64(dlo, dhi, s1, s2)
  988. // load/store. offs has 16 bits signed, which is currently sufficient
  989. #define emith_read_r_r_offs_ptr(r, ra, offs) \
  990. EMIT(PPC_LDP_IMM(r, ra, offs))
  991. #define emith_read_r_r_offs_ptr_c(cond, r, ra, offs) \
  992. emith_read_r_r_offs_ptr(r, ra, offs)
  993. #define emith_read_r_r_offs(r, ra, offs) \
  994. EMIT(PPC_LDW_IMM(r, ra, offs))
  995. #define emith_read_r_r_offs_c(cond, r, ra, offs) \
  996. emith_read_r_r_offs(r, ra, offs)
  997. #define emith_read_r_r_r_ptr(r, ra, rm) \
  998. EMIT(PPC_LDP_REG(r, ra, rm))
  999. #define emith_read_r_r_r(r, ra, rm) \
  1000. EMIT(PPC_LDW_REG(r, ra, rm))
  1001. #define emith_read_r_r_r_c(cond, r, ra, rm) \
  1002. emith_read_r_r_r(r, ra, rm)
  1003. #define emith_read8_r_r_offs(r, ra, offs) \
  1004. EMIT(PPC_LDB_IMM(r, ra, offs))
  1005. #define emith_read8_r_r_offs_c(cond, r, ra, offs) \
  1006. emith_read8_r_r_offs(r, ra, offs)
  1007. #define emith_read8_r_r_r(r, ra, rm) \
  1008. EMIT(PPC_LDB_REG(r, ra, rm))
  1009. #define emith_read8_r_r_r_c(cond, r, ra, rm) \
  1010. emith_read8_r_r_r(r, ra, rm)
  1011. #define emith_read16_r_r_offs(r, ra, offs) \
  1012. EMIT(PPC_LDH_IMM(r, ra, offs))
  1013. #define emith_read16_r_r_offs_c(cond, r, ra, offs) \
  1014. emith_read16_r_r_offs(r, ra, offs)
  1015. #define emith_read16_r_r_r(r, ra, rm) \
  1016. EMIT(PPC_LDH_REG(r, ra, rm))
  1017. #define emith_read16_r_r_r_c(cond, r, ra, rm) \
  1018. emith_read16_r_r_r(r, ra, rm)
  1019. #define emith_read8s_r_r_offs(r, ra, offs) do { \
  1020. EMIT(PPC_LDB_IMM(r, ra, offs)); \
  1021. EMIT(PPC_EXTSB_REG(r, r)); \
  1022. } while (0)
  1023. #define emith_read8s_r_r_offs_c(cond, r, ra, offs) \
  1024. emith_read8s_r_r_offs(r, ra, offs)
  1025. #define emith_read8s_r_r_r(r, ra, rm) do { \
  1026. EMIT(PPC_LDB_REG(r, ra, rm)); \
  1027. EMIT(PPC_EXTSB_REG(r, r)); \
  1028. } while (0)
  1029. #define emith_read8s_r_r_r_c(cond, r, ra, rm) \
  1030. emith_read8s_r_r_r(r, ra, rm)
  1031. #define emith_read16s_r_r_offs(r, ra, offs) do { \
  1032. EMIT(PPC_LDH_IMM(r, ra, offs)); \
  1033. EMIT(PPC_EXTSH_REG(r, r)); \
  1034. } while (0)
  1035. #define emith_read16s_r_r_offs_c(cond, r, ra, offs) \
  1036. emith_read16s_r_r_offs(r, ra, offs)
  1037. #define emith_read16s_r_r_r(r, ra, rm) do { \
  1038. EMIT(PPC_LDH_REG(r, ra, rm)); \
  1039. EMIT(PPC_EXTSH_REG(r, r)); \
  1040. } while (0)
  1041. #define emith_read16s_r_r_r_c(cond, r, ra, rm) \
  1042. emith_read16s_r_r_r(r, ra, rm)
  1043. #define emith_write_r_r_offs_ptr(r, ra, offs) \
  1044. EMIT(PPC_STP_IMM(r, ra, offs))
  1045. #define emith_write_r_r_offs_ptr_c(cond, r, ra, offs) \
  1046. emith_write_r_r_offs_ptr(r, ra, offs)
  1047. #define emith_write_r_r_r_ptr(r, ra, rm) \
  1048. EMIT(PPC_STP_REG(r, ra, rm))
  1049. #define emith_write_r_r_r_ptr_c(cond, r, ra, rm) \
  1050. emith_write_r_r_r_ptr(r, ra, rm)
  1051. #define emith_write_r_r_offs(r, ra, offs) \
  1052. EMIT(PPC_STW_IMM(r, ra, offs))
  1053. #define emith_write_r_r_offs_c(cond, r, ra, offs) \
  1054. emith_write_r_r_offs(r, ra, offs)
  1055. #define emith_write_r_r_r(r, ra, rm) \
  1056. EMIT(PPC_STW_REG(r, ra, rm))
  1057. #define emith_write_r_r_r_c(cond, r, ra, rm) \
  1058. emith_write_r_r_r(r, ra, rm)
  1059. #define emith_ctx_read_ptr(r, offs) \
  1060. emith_read_r_r_offs_ptr(r, CONTEXT_REG, offs)
  1061. #define emith_ctx_read(r, offs) \
  1062. emith_read_r_r_offs(r, CONTEXT_REG, offs)
  1063. #define emith_ctx_read_c(cond, r, offs) \
  1064. emith_ctx_read(r, offs)
  1065. #define emith_ctx_write_ptr(r, offs) \
  1066. emith_write_r_r_offs_ptr(r, CONTEXT_REG, offs)
  1067. #define emith_ctx_write(r, offs) \
  1068. emith_write_r_r_offs(r, CONTEXT_REG, offs)
  1069. #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
  1070. int r_ = r, offs_ = offs, cnt_ = cnt; \
  1071. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  1072. emith_ctx_read(r_, offs_); \
  1073. } while (0)
  1074. #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
  1075. int r_ = r, offs_ = offs, cnt_ = cnt; \
  1076. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  1077. emith_ctx_write(r_, offs_); \
  1078. } while (0)
  1079. // function call handling
  1080. #define emith_save_caller_regs(mask) do { \
  1081. int _c, _z = PTR_SIZE; u32 _m = mask & 0x1ff8; /* r3-r12 */ \
  1082. if (__builtin_parity(_m) == 1) _m |= 0x1; /* ABI align */ \
  1083. int _s = count_bits(_m) * _z, _o = _s; \
  1084. if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
  1085. for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  1086. if (_m & (1 << _c)) \
  1087. { _o -= _z; if (_c) emith_write_r_r_offs_ptr(_c, SP, _o); } \
  1088. } while (0)
  1089. #define emith_restore_caller_regs(mask) do { \
  1090. int _c, _z = PTR_SIZE; u32 _m = mask & 0x1ff8; \
  1091. if (__builtin_parity(_m) == 1) _m |= 0x1; \
  1092. int _s = count_bits(_m) * _z, _o = 0; \
  1093. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1094. if (_m & (1 << _c)) \
  1095. { if (_c) emith_read_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
  1096. if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
  1097. } while (0)
  1098. #define host_arg2reg(rt, arg) \
  1099. rt = (arg+3)
  1100. #define emith_pass_arg_r(arg, reg) \
  1101. emith_move_r_r(arg, reg)
  1102. #define emith_pass_arg_imm(arg, imm) \
  1103. emith_move_r_imm(arg, imm)
  1104. // branching
  1105. #define emith_invert_branch(cond) /* inverted conditional branch */ \
  1106. ((cond) ^ 0x40)
  1107. // evaluate the emulated condition, returns a register/branch type pair
  1108. static int emith_cmpr_check(int rs, int rt, int cond, u32 *op)
  1109. {
  1110. int b = -1;
  1111. // condition check for comparing 2 registers
  1112. switch (cond) {
  1113. case DCOND_EQ: *op = PPC_CMPW_REG(rs, rt); b = BEQ; break;
  1114. case DCOND_NE: *op = PPC_CMPW_REG(rs, rt); b = BNE; break;
  1115. case DCOND_LO: *op = PPC_CMPLW_REG(rs, rt); b = BLT; break;
  1116. case DCOND_HS: *op = PPC_CMPLW_REG(rs, rt); b = BGE; break;
  1117. case DCOND_LS: *op = PPC_CMPLW_REG(rs, rt); b = BLE; break;
  1118. case DCOND_HI: *op = PPC_CMPLW_REG(rs, rt); b = BGT; break;
  1119. case DCOND_LT: *op = PPC_CMPW_REG(rs, rt); b = BLT; break;
  1120. case DCOND_GE: *op = PPC_CMPW_REG(rs, rt); b = BGE; break;
  1121. case DCOND_LE: *op = PPC_CMPW_REG(rs, rt); b = BLE; break;
  1122. case DCOND_GT: *op = PPC_CMPW_REG(rs, rt); b = BGT; break;
  1123. }
  1124. return b;
  1125. }
  1126. static int emith_cmpi_check(int rs, s32 imm, int cond, u32 *op)
  1127. {
  1128. int b = -1;
  1129. // condition check for comparing register with immediate
  1130. switch (cond) {
  1131. case DCOND_EQ: *op = PPC_CMPW_IMM(rs, (u16)imm), b = BEQ; break;
  1132. case DCOND_NE: *op = PPC_CMPW_IMM(rs, (u16)imm), b = BNE; break;
  1133. case DCOND_LO: *op = PPC_CMPLW_IMM(rs, (u16)imm), b = BLT; break;
  1134. case DCOND_HS: *op = PPC_CMPLW_IMM(rs, (u16)imm), b = BGE; break;
  1135. case DCOND_LS: *op = PPC_CMPLW_IMM(rs, (u16)imm), b = BLE; break;
  1136. case DCOND_HI: *op = PPC_CMPLW_IMM(rs, (u16)imm), b = BGT; break;
  1137. case DCOND_LT: *op = PPC_CMPW_IMM(rs, (u16)imm), b = BLT; break;
  1138. case DCOND_GE: *op = PPC_CMPW_IMM(rs, (u16)imm), b = BGE; break;
  1139. case DCOND_LE: *op = PPC_CMPW_IMM(rs, (u16)imm), b = BLE; break;
  1140. case DCOND_GT: *op = PPC_CMPW_IMM(rs, (u16)imm), b = BGT; break;
  1141. }
  1142. return b;
  1143. }
  1144. static int emith_cond_check(int cond)
  1145. {
  1146. int b = -1;
  1147. u32 op = 0;
  1148. if (emith_cmp_ra >= 0) {
  1149. if (emith_cmp_rb != -1)
  1150. b = emith_cmpr_check(emith_cmp_ra,emith_cmp_rb, cond,&op);
  1151. else b = emith_cmpi_check(emith_cmp_ra,emith_cmp_imm,cond,&op);
  1152. }
  1153. // shortcut for V known to be 0
  1154. if (b < 0 && emith_flg_noV) switch (cond) {
  1155. case DCOND_VS: /* no branch */ break; // never
  1156. case DCOND_VC: b = BXX; break; // always
  1157. case DCOND_LT: op = PPC_CMPW_IMM(FNZ, 0); b = BLT; break; // N
  1158. case DCOND_GE: op = PPC_CMPW_IMM(FNZ, 0); b = BGE; break; // !N
  1159. case DCOND_LE: op = PPC_CMPW_IMM(FNZ, 0); b = BLE; break; // N || Z
  1160. case DCOND_GT: op = PPC_CMPW_IMM(FNZ, 0); b = BGT; break; // !N && !Z
  1161. }
  1162. // the full monty if no shortcut
  1163. if (b < 0) switch (cond) {
  1164. // conditions using NZ
  1165. case DCOND_EQ: op = PPC_CMPW_IMM(FNZ, 0); b = BEQ; break; // Z
  1166. case DCOND_NE: op = PPC_CMPW_IMM(FNZ, 0); b = BNE; break; // !Z
  1167. case DCOND_MI: op = PPC_CMPW_IMM(FNZ, 0); b = BLT; break; // N
  1168. case DCOND_PL: op = PPC_CMPW_IMM(FNZ, 0); b = BGE; break; // !N
  1169. // conditions using C
  1170. case DCOND_LO: op = PPC_CMPW_IMM(FC , 0); b = BNE; break; // C
  1171. case DCOND_HS: op = PPC_CMPW_IMM(FC , 0); b = BEQ; break; // !C
  1172. // conditions using CZ
  1173. case DCOND_LS: // C || Z
  1174. case DCOND_HI: // !C && !Z
  1175. EMIT(PPC_ADD_IMM(AT, FC, -1)); // !C && !Z
  1176. EMIT(PPC_AND_REG(AT, FNZ, AT));
  1177. op = PPC_CMPW_IMM(AT , 0); b = (cond == DCOND_HI ? BNE : BEQ);
  1178. break;
  1179. // conditions using V
  1180. case DCOND_VS: // V
  1181. case DCOND_VC: // !V
  1182. EMIT(PPC_XOR_REG(AT, FV, FNZ)); // V = Nt^Ns^Nd^C
  1183. EMIT(PPC_LSRW_IMM(AT, AT, 31));
  1184. EMIT(PPC_XOR_REG(AT, AT, FC));
  1185. op = PPC_CMPW_IMM(AT , 0); b = (cond == DCOND_VS ? BNE : BEQ);
  1186. break;
  1187. // conditions using VNZ
  1188. case DCOND_LT: // N^V
  1189. case DCOND_GE: // !(N^V)
  1190. EMIT(PPC_LSRW_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1191. EMIT(PPC_XOR_REG(AT, FC, AT));
  1192. op = PPC_CMPW_IMM(AT , 0); b = (cond == DCOND_LT ? BNE : BEQ);
  1193. break;
  1194. case DCOND_LE: // (N^V) || Z
  1195. case DCOND_GT: // !(N^V) && !Z
  1196. EMIT(PPC_LSRW_IMM(AT, FV, 31)); // Nd^V = Nt^Ns^C
  1197. EMIT(PPC_XOR_REG(AT, FC, AT));
  1198. EMIT(PPC_ADD_IMM(AT, AT, -1)); // !(Nd^V) && !Z
  1199. EMIT(PPC_AND_REG(AT, FNZ, AT));
  1200. op = PPC_CMPW_IMM(AT , 0); b = (cond == DCOND_GT ? BNE : BEQ);
  1201. break;
  1202. }
  1203. if (op) EMIT(op);
  1204. return b;
  1205. }
  1206. #define emith_jump(target) do { \
  1207. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
  1208. EMIT(PPC_B((uintptr_t)disp_ & 0x03ffffff)); \
  1209. } while (0)
  1210. #define emith_jump_patchable(target) \
  1211. emith_jump(target)
  1212. // NB: PPC conditional branches have only +/- 64KB range
  1213. #define emith_jump_cond(cond, target) do { \
  1214. int mcond_ = emith_cond_check(cond); \
  1215. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
  1216. if (mcond_ >= 0) EMIT(PPC_BCOND(mcond_,disp_ & 0x0000ffff)); \
  1217. } while (0)
  1218. #define emith_jump_cond_patchable(cond, target) \
  1219. emith_jump_cond(cond, target)
  1220. #define emith_jump_cond_inrange(target) \
  1221. ((u8 *)target - (u8 *)tcache_ptr < 0x8000 && \
  1222. (u8 *)target - (u8 *)tcache_ptr >= -0x8000+0x10) //mind cond_check
  1223. // NB: returns position of patch for cache maintenance
  1224. #define emith_jump_patch(ptr, target, pos) do { \
  1225. u32 *ptr_ = (u32 *)ptr; /* must skip condition check code */ \
  1226. u32 disp_, mask_; \
  1227. while (*ptr_>>26 != OP_BC && *ptr_>>26 != OP_B) ptr_ ++; \
  1228. disp_ = (u8 *)target - (u8 *)ptr_; \
  1229. mask_ = (*ptr_>>26 == OP_BC ? 0xffff0003 : 0xfc000003); \
  1230. EMIT_PTR(ptr_, (*ptr_ & mask_) | (disp_ & ~mask_)); \
  1231. if ((void *)(pos) != NULL) *(u8 **)(pos) = (u8 *)(ptr_-1); \
  1232. } while (0)
  1233. #define emith_jump_patch_inrange(ptr, target) \
  1234. ((u8 *)target - (u8 *)ptr < 0x8000 && \
  1235. (u8 *)target - (u8 *)ptr >= -0x8000+0x10) // mind cond_check
  1236. #define emith_jump_patch_size() 4
  1237. #define emith_jump_at(ptr, target) do { \
  1238. u32 disp_ = (u8 *)target - (u8 *)ptr; \
  1239. u32 *ptr_ = (u32 *)ptr; \
  1240. EMIT_PTR(ptr_, PPC_B((uintptr_t)disp_ & 0x03ffffff)); \
  1241. } while (0)
  1242. #define emith_jump_at_size() 4
  1243. #define emith_jump_reg(r) do { \
  1244. EMIT(PPC_MTSP_REG(r, CTR)); \
  1245. EMIT(PPC_BCTRCOND(BXX)); \
  1246. } while(0)
  1247. #define emith_jump_reg_c(cond, r) \
  1248. emith_jump_reg(r)
  1249. #define emith_jump_ctx(offs) do { \
  1250. emith_ctx_read_ptr(CR, offs); \
  1251. emith_jump_reg(CR); \
  1252. } while (0)
  1253. #define emith_jump_ctx_c(cond, offs) \
  1254. emith_jump_ctx(offs)
  1255. #define emith_call(target) do { \
  1256. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
  1257. EMIT(PPC_BL((uintptr_t)disp_ & 0x03ffffff)); \
  1258. } while(0)
  1259. #define emith_call_cond(cond, target) \
  1260. emith_call(target)
  1261. #define emith_call_reg(r) do { \
  1262. EMIT(PPC_MTSP_REG(r, CTR)); \
  1263. EMIT(PPC_BLCTRCOND(BXX)); \
  1264. } while(0)
  1265. #define emith_call_ctx(offs) do { \
  1266. emith_ctx_read_ptr(CR, offs); \
  1267. emith_call_reg(CR); \
  1268. } while (0)
  1269. #define emith_abijump_reg(r) \
  1270. if ((r) != CR) emith_move_r_r(CR, r); \
  1271. emith_jump_reg(CR)
  1272. #define emith_abijump_reg_c(cond, r) \
  1273. emith_abijump_reg(r)
  1274. #define emith_abicall(target) \
  1275. emith_move_r_ptr_imm(CR, target); \
  1276. emith_call_reg(CR);
  1277. #define emith_abicall_cond(cond, target) \
  1278. emith_abicall(target)
  1279. #define emith_abicall_reg(r) \
  1280. if ((r) != CR) emith_move_r_r(CR, r); \
  1281. emith_call_reg(CR)
  1282. #define emith_call_cleanup() /**/
  1283. #define emith_ret() \
  1284. EMIT(PPC_RET())
  1285. #define emith_ret_c(cond) \
  1286. emith_ret()
  1287. #define emith_ret_to_ctx(offs) do { \
  1288. EMIT(PPC_MFSP_REG(AT, LR)); \
  1289. emith_ctx_write_ptr(AT, offs); \
  1290. } while (0)
  1291. #define emith_add_r_ret(r) do { \
  1292. EMIT(PPC_MFSP_REG(AT, LR)); \
  1293. emith_add_r_r_ptr(r, AT); \
  1294. } while (0)
  1295. // NB: ABI SP alignment is 16 in 64 bit mode
  1296. #define emith_push_ret(r) do { \
  1297. int offs_ = 16 - 2*PTR_SIZE; \
  1298. emith_add_r_r_ptr_imm(SP, SP, -16); \
  1299. EMIT(PPC_MFSP_REG(AT, LR)); \
  1300. emith_write_r_r_offs_ptr(AT, SP, offs_ + PTR_SIZE); \
  1301. if ((r) > 0) emith_write_r_r_offs(r, SP, offs_); \
  1302. } while (0)
  1303. #define emith_pop_and_ret(r) do { \
  1304. int offs_ = 16 - 2*PTR_SIZE; \
  1305. if ((r) > 0) emith_read_r_r_offs(r, SP, offs_); \
  1306. emith_read_r_r_offs_ptr(AT, SP, offs_ + PTR_SIZE); \
  1307. EMIT(PPC_MTSP_REG(AT, LR)); \
  1308. emith_add_r_r_ptr_imm(SP, SP, 16); \
  1309. emith_ret(); \
  1310. } while (0)
  1311. // this should normally be in libc clear_cache; however, it sometimes isn't.
  1312. static NOINLINE void host_instructions_updated(void *base, void *end, int force)
  1313. {
  1314. int step = 32, lgstep = 5;
  1315. char *_base = (char *)((uptr)base & ~(step-1));
  1316. int count = (((char *)end - _base) >> lgstep) + 1;
  1317. if (count <= 0) count = 1; // make sure count is positive
  1318. base = _base;
  1319. asm volatile(
  1320. " mtctr %1;"
  1321. "0: dcbst 0,%0;"
  1322. " add %0, %0, %2;"
  1323. " bdnz 0b;"
  1324. " sync"
  1325. : "+r"(_base) : "r"(count), "r"(step) : "ctr");
  1326. asm volatile(
  1327. " mtctr %1;"
  1328. "0: icbi 0,%0;"
  1329. " add %0, %0, %2;"
  1330. " bdnz 0b;"
  1331. " isync"
  1332. : "+r"(base) : "r"(count), "r"(step) : "ctr");
  1333. }
  1334. // emitter ABI stuff
  1335. #define emith_pool_check() /**/
  1336. #define emith_pool_commit(j) /**/
  1337. #define emith_insn_ptr() ((u8 *)tcache_ptr)
  1338. #define emith_flush() /**/
  1339. #define emith_update_cache() /**/
  1340. #define emith_rw_offs_max() 0x7fff
  1341. // SH2 drc specific
  1342. #define STACK_EXTRA ((8+6)*PTR_SIZE) // Param, ABI (LR,CR,FP etc) save areas
  1343. #define emith_sh2_drc_entry() do { \
  1344. int _c, _z = PTR_SIZE; u32 _m = 0xffffc000; /* r14-r31 */ \
  1345. if (__builtin_parity(_m) == 1) _m |= 0x1; /* ABI align for SP is 16 */ \
  1346. int _s = count_bits(_m) * _z, _o = STACK_EXTRA; \
  1347. EMIT(PPC_STPU_IMM(SP, SP, -_s-STACK_EXTRA)); \
  1348. EMIT(PPC_MFSP_REG(AT, LR)); \
  1349. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1350. if (_m & (1 << _c)) \
  1351. { if (_c) emith_write_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
  1352. emith_write_r_r_offs_ptr(AT, SP, _o + _z); \
  1353. } while (0)
  1354. #define emith_sh2_drc_exit() do { \
  1355. int _c, _z = PTR_SIZE; u32 _m = 0xffffc000; \
  1356. if (__builtin_parity(_m) == 1) _m |= 0x1; \
  1357. int _s = count_bits(_m) * _z, _o = STACK_EXTRA; \
  1358. emith_read_r_r_offs_ptr(AT, SP, _o+_s + _z); \
  1359. EMIT(PPC_MTSP_REG(AT, LR)); \
  1360. for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  1361. if (_m & (1 << _c)) \
  1362. { if (_c) emith_read_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
  1363. emith_add_r_r_ptr_imm(SP, SP, _s+STACK_EXTRA); \
  1364. emith_ret(); \
  1365. } while (0)
  1366. // NB: assumes a is in arg0, tab, func and mask are temp
  1367. #define emith_sh2_rcall(a, tab, func, mask) do { \
  1368. emith_lsr(mask, a, SH2_READ_SHIFT); \
  1369. emith_add_r_r_r_lsl_ptr(tab, tab, mask, PTR_SCALE+1); \
  1370. emith_read_r_r_offs_ptr(func, tab, 0); \
  1371. emith_read_r_r_offs(mask, tab, PTR_SIZE); \
  1372. EMIT(PPC_BFXP_IMM(FC, func, 0, 1)); \
  1373. emith_add_r_r_ptr(func, func); \
  1374. emith_cmp_ra = emith_cmp_rb = -1; \
  1375. } while (0)
  1376. // NB: assumes a, val are in arg0 and arg1, tab and func are temp
  1377. #define emith_sh2_wcall(a, val, tab, func) do { \
  1378. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  1379. emith_lsl(func, func, PTR_SCALE); \
  1380. emith_read_r_r_r_ptr(func, tab, func); \
  1381. emith_move_r_r_ptr(5, CONTEXT_REG); /* arg2 */ \
  1382. emith_jump_reg(func); \
  1383. } while (0)
  1384. #define emith_sh2_delay_loop(cycles, reg) do { \
  1385. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  1386. int t1 = rcache_get_tmp(); \
  1387. int t2 = rcache_get_tmp(); \
  1388. int t3 = rcache_get_tmp(); \
  1389. /* if (sr < 0) return */ \
  1390. emith_cmp_r_imm(sr, 0); \
  1391. EMITH_JMP_START(DCOND_LE); \
  1392. /* turns = sr.cycles / cycles */ \
  1393. emith_asr(t2, sr, 12); \
  1394. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  1395. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  1396. rcache_free_tmp(t3); \
  1397. if (reg >= 0) { \
  1398. /* if (reg <= turns) turns = reg-1 */ \
  1399. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  1400. emith_cmp_r_r(t3, t2); \
  1401. EMITH_SJMP_START(DCOND_HI); \
  1402. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  1403. EMITH_SJMP_END(DCOND_HI); \
  1404. /* if (reg <= 1) turns = 0 */ \
  1405. emith_cmp_r_imm(t3, 1); \
  1406. EMITH_SJMP_START(DCOND_HI); \
  1407. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  1408. EMITH_SJMP_END(DCOND_HI); \
  1409. /* reg -= turns */ \
  1410. emith_sub_r_r(t3, t2); \
  1411. } \
  1412. /* sr.cycles -= turns * cycles; */ \
  1413. emith_move_r_imm(t1, cycles); \
  1414. emith_mul(t1, t2, t1); \
  1415. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  1416. EMITH_JMP_END(DCOND_LE); \
  1417. rcache_free_tmp(t1); \
  1418. rcache_free_tmp(t2); \
  1419. } while (0)
  1420. /*
  1421. * T = !carry(Rn = (Rn << 1) | T)
  1422. * if Q
  1423. * C = carry(Rn += Rm)
  1424. * else
  1425. * C = carry(Rn -= Rm)
  1426. * T ^= C
  1427. */
  1428. #define emith_sh2_div1_step(rn, rm, sr) do { \
  1429. int t_ = rcache_get_tmp(); \
  1430. emith_and_r_r_imm(AT, sr, T); \
  1431. emith_lsr(FC, rn, 31); /*Rn = (Rn<<1)+T*/ \
  1432. emith_lsl(t_, rn, 1); \
  1433. emith_or_r_r(t_, AT); \
  1434. emith_or_r_imm(sr, T); /* T = !carry */ \
  1435. emith_eor_r_r(sr, FC); \
  1436. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  1437. EMITH_JMP3_START(DCOND_EQ); \
  1438. emith_add_r_r_r(rn, t_, rm); \
  1439. EMIT(PPC_CMPLW_REG(rn, t_)); \
  1440. EMITH_JMP3_MID(DCOND_EQ); \
  1441. emith_sub_r_r_r(rn, t_, rm); \
  1442. EMIT(PPC_CMPLW_REG(t_, rn)); \
  1443. EMITH_JMP3_END(); \
  1444. EMIT(PPC_MFCR_REG(FC)); \
  1445. EMIT(PPC_BFXW_IMM(FC, FC, 0, 1)); \
  1446. emith_eor_r_r(sr, FC); /* T ^= carry */ \
  1447. rcache_free_tmp(t_); \
  1448. } while (0)
  1449. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1450. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1451. emith_tst_r_imm(sr, S); \
  1452. EMITH_SJMP_START(DCOND_EQ); \
  1453. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1454. emith_sext(mh, mh, 16); \
  1455. EMITH_SJMP_END(DCOND_EQ); \
  1456. emith_mula_s64(ml, mh, rn, rm); \
  1457. emith_tst_r_imm(sr, S); \
  1458. EMITH_SJMP_START(DCOND_EQ); \
  1459. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1460. /* to check: add MACH >> 31 to MACH >> 15. this is 0 if no overflow */ \
  1461. emith_asr(rn, mh, 15); \
  1462. emith_add_r_r_r_lsr(rn, rn, mh, 31); /* sum = (MACH>>31)+(MACH>>15) */ \
  1463. emith_tst_r_r(rn, rn); /* (need only N and Z flags) */ \
  1464. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1465. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1466. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1467. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> +ovl */ \
  1468. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0xffffffff */ \
  1469. emith_sub_r_imm_c(DCOND_MI, mh, 1); /* 0x00007fff */ \
  1470. EMITH_SJMP_END(DCOND_PL); \
  1471. EMITH_SJMP_END(DCOND_EQ); \
  1472. EMITH_SJMP_END(DCOND_EQ); \
  1473. } while (0)
  1474. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1475. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1476. emith_tst_r_imm(sr, S); \
  1477. EMITH_SJMP_START(DCOND_EQ); \
  1478. /* XXX: MACH should be untouched when S is set? */ \
  1479. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1480. EMITH_SJMP_END(DCOND_EQ); \
  1481. emith_mula_s64(ml, mh, rn, rm); \
  1482. emith_tst_r_imm(sr, S); \
  1483. EMITH_SJMP_START(DCOND_EQ); \
  1484. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1485. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1486. emith_lsr(rn, ml, 31); \
  1487. emith_add_r_r(rn, mh); /* sum = MACH + ((MACL>>31)&1) */ \
  1488. emith_tst_r_r(rn, rn); /* (need only N and Z flags) */ \
  1489. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1490. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1491. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1492. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1493. EMITH_SJMP_START(DCOND_PL); /* sum > 0 -> positive ovrfl */ \
  1494. emith_sub_r_imm_c(DCOND_MI, ml, 1); /* 0x7fffffff */ \
  1495. EMITH_SJMP_END(DCOND_PL); \
  1496. EMITH_SJMP_END(DCOND_EQ); \
  1497. EMITH_SJMP_END(DCOND_EQ); \
  1498. } while (0)
  1499. #define emith_write_sr(sr, srcr) \
  1500. EMIT(PPC_BFIW_IMM(sr, srcr, 22, 10))
  1501. #define emith_carry_to_t(sr, is_sub) \
  1502. EMIT(PPC_BFIW_IMM(sr, FC, 32-__builtin_ffs(T), 1))
  1503. #define emith_t_to_carry(sr, is_sub) \
  1504. emith_and_r_r_imm(FC, sr, 1)
  1505. #define emith_tpop_carry(sr, is_sub) do { \
  1506. emith_and_r_r_imm(FC, sr, 1); \
  1507. emith_eor_r_r(sr, FC); \
  1508. } while (0)
  1509. #define emith_tpush_carry(sr, is_sub) \
  1510. emith_or_r_r(sr, FC)
  1511. #ifdef T
  1512. #define emith_invert_cond(cond) \
  1513. ((cond) ^ 1)
  1514. // T bit handling
  1515. static void emith_set_t_cond(int sr, int cond)
  1516. {
  1517. int b;
  1518. // catch never and always cases
  1519. if ((b = emith_cond_check(cond)) < 0)
  1520. return;
  1521. else if (b == BXX) {
  1522. emith_or_r_imm(sr, T);
  1523. return;
  1524. }
  1525. // extract bit from CR and insert into T
  1526. EMIT(PPC_MFCR_REG(AT));
  1527. EMIT(PPC_BFXW_IMM(AT, AT, (b&7), 1));
  1528. if (!(b & 0x40)) EMIT(PPC_XOR_IMM(AT, AT, 1));
  1529. EMIT(PPC_BFIW_IMM(sr, AT, 32-__builtin_ffs(T), 1));
  1530. }
  1531. #define emith_clr_t_cond(sr) ((void)sr)
  1532. #define emith_get_t_cond() -1
  1533. #define emith_sync_t(sr) ((void)sr)
  1534. #define emith_invalidate_t()
  1535. static void emith_set_t(int sr, int val)
  1536. {
  1537. if (val)
  1538. emith_or_r_imm(sr, T);
  1539. else
  1540. emith_bic_r_imm(sr, T);
  1541. }
  1542. static int emith_tst_t(int sr, int tf)
  1543. {
  1544. emith_tst_r_imm(sr, T);
  1545. return tf ? DCOND_NE: DCOND_EQ;
  1546. }
  1547. #endif