emit_arm64.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. /*
  2. * Basic macros to emit ARM A64 instructions and some utils
  3. * Copyright (C) 2019 kub
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. #define HOST_REGS 32
  9. #define CONTEXT_REG 19
  10. #define RET_REG 0
  11. // R31 doesn't exist, it aliases either with zero or SP
  12. #define SP 31 // stack pointer
  13. #define Z0 31 // zero register
  14. #define LR 30 // link register
  15. #define FP 29 // frame pointer
  16. #define PR 18 // platform register
  17. // All operations but ptr ops are using the lower 32 bits of the A64 registers.
  18. // The upper 32 bits are only used in ptr ops.
  19. #define A64_COND_EQ 0x0
  20. #define A64_COND_NE 0x1
  21. #define A64_COND_HS 0x2
  22. #define A64_COND_LO 0x3
  23. #define A64_COND_MI 0x4
  24. #define A64_COND_PL 0x5
  25. #define A64_COND_VS 0x6
  26. #define A64_COND_VC 0x7
  27. #define A64_COND_HI 0x8
  28. #define A64_COND_LS 0x9
  29. #define A64_COND_GE 0xa
  30. #define A64_COND_LT 0xb
  31. #define A64_COND_GT 0xc
  32. #define A64_COND_LE 0xd
  33. #define A64_COND_CS A64_COND_HS
  34. #define A64_COND_CC A64_COND_LO
  35. #define A64_COND_AL 0xe
  36. #define A64_COND_NV 0xf
  37. /* unified conditions */
  38. #define DCOND_EQ A64_COND_EQ
  39. #define DCOND_NE A64_COND_NE
  40. #define DCOND_MI A64_COND_MI
  41. #define DCOND_PL A64_COND_PL
  42. #define DCOND_HI A64_COND_HI
  43. #define DCOND_HS A64_COND_HS
  44. #define DCOND_LO A64_COND_LO
  45. #define DCOND_GE A64_COND_GE
  46. #define DCOND_GT A64_COND_GT
  47. #define DCOND_LT A64_COND_LT
  48. #define DCOND_LS A64_COND_LS
  49. #define DCOND_LE A64_COND_LE
  50. #define DCOND_VS A64_COND_VS
  51. #define DCOND_VC A64_COND_VC
  52. #define DCOND_CS A64_COND_HS
  53. #define DCOND_CC A64_COND_LO
  54. // unified insn
  55. #define A64_INSN(op, b29, b22, b21, b16, b12, b10, b5, b0) \
  56. (((op)<<25)|((b29)<<29)|((b22)<<22)|((b21)<<21)|((b16)<<16)|((b12)<<12)|((b10)<<10)|((b5)<<5)|((b0)<<0))
  57. #define _ 0 // marker for "field unused"
  58. #define A64_NOP \
  59. A64_INSN(0xa,0x6,0x4,_,0x3,0x2,_,0,0x1f) // 0xd503201f
  60. // arithmetic/logical
  61. enum { OP_AND, OP_OR, OP_EOR, OP_ANDS, OP_ADD, OP_ADDS, OP_SUB, OP_SUBS };
  62. enum { ST_LSL, ST_LSR, ST_ASR, ST_ROR };
  63. enum { XT_UXTW=0x4, XT_UXTX=0x6, XT_LSL=0x7, XT_SXTW=0xc, XT_SXTX=0xe };
  64. #define OP_SZ64 (1 << 31) // bit for 64 bit op selection
  65. #define OP_N64 (1 << 22) // N-bit for 64 bit logical immediate ops
  66. #define A64_OP_REG(op, n, rd, rn, rm, stype, simm) /* arith+logical, ST_ */ \
  67. A64_INSN(0x5,(op)&3,((op)&4)|stype,n,rm,_,simm,rn,rd)
  68. #define A64_OP_XREG(op, rd, rn, rm, xtopt, simm) /* arith, XT_ */ \
  69. A64_INSN(0x5,(op)&3,0x4,1,rm,xtopt,simm,rn,rd)
  70. #define A64_OP_IMM12(op, rd, rn, imm, lsl12) /* arith */ \
  71. A64_INSN(0x8,(op)&3,((op)&4)|lsl12,_,_,_,(imm)&0xfff,rn,rd)
  72. #define A64_OP_IMMBM(op, rd, rn, immr, imms) /* logical */ \
  73. A64_INSN(0x9,(op)&3,0x0,_,immr,_,(imms)&0x3f,rn,rd)
  74. // rd = rn OP (rm SHIFT simm)
  75. #define A64_ADD_REG(rd, rn, rm, stype, simm) \
  76. A64_OP_REG(OP_ADD,0,rd,rn,rm,stype,simm)
  77. #define A64_ADDS_REG(rd, rn, rm, stype, simm) \
  78. A64_OP_REG(OP_ADDS,0,rd,rn,rm,stype,simm)
  79. #define A64_SUB_REG(rd, rn, rm, stype, simm) \
  80. A64_OP_REG(OP_SUB,0,rd,rn,rm,stype,simm)
  81. #define A64_SUBS_REG(rd, rn, rm, stype, simm) \
  82. A64_OP_REG(OP_SUBS,0,rd,rn,rm,stype,simm)
  83. #define A64_NEG_REG(rd, rm, stype, simm) \
  84. A64_SUB_REG(rd,Z0,rm,stype,simm)
  85. #define A64_NEGS_REG(rd, rm, stype, simm) \
  86. A64_SUBS_REG(rd,Z0,rm,stype,simm)
  87. #define A64_NEGC_REG(rd, rm) \
  88. A64_SBC_REG(rd,Z0,rm,stype,simm)
  89. #define A64_NEGCS_REG(rd, rm) \
  90. A64_SBCS_REG(rd,Z0,rm,stype,simm)
  91. #define A64_CMP_REG(rn, rm, stype, simm) \
  92. A64_SUBS_REG(Z0, rn, rm, stype, simm)
  93. #define A64_CMN_REG(rn, rm, stype, simm) \
  94. A64_ADDS_REG(Z0, rn, rm, stype, simm)
  95. #define A64_EOR_REG(rd, rn, rm, stype, simm) \
  96. A64_OP_REG(OP_EOR,0,rd,rn,rm,stype,simm)
  97. #define A64_OR_REG(rd, rn, rm, stype, simm) \
  98. A64_OP_REG(OP_OR,0,rd,rn,rm,stype,simm)
  99. #define A64_ORN_REG(rd, rn, rm, stype, simm) \
  100. A64_OP_REG(OP_OR,1,rd,rn,rm,stype,simm)
  101. #define A64_AND_REG(rd, rn, rm, stype, simm) \
  102. A64_OP_REG(OP_AND,0,rd,rn,rm,stype,simm)
  103. #define A64_ANDS_REG(rd, rn, rm, stype, simm) \
  104. A64_OP_REG(OP_ANDS,0,rd,rn,rm,stype,simm)
  105. #define A64_BIC_REG(rd, rn, rm, stype, simm) \
  106. A64_OP_REG(OP_AND,1,rd,rn,rm,stype,simm)
  107. #define A64_BICS_REG(rd, rn, rm, stype, simm) \
  108. A64_OP_REG(OP_ANDS,1,rd,rn,rm,stype,simm)
  109. #define A64_TST_REG(rn, rm, stype, simm) \
  110. A64_ANDS_REG(Z0, rn, rm, stype, simm)
  111. #define A64_MOV_REG(rd, rm, stype, simm) \
  112. A64_OR_REG(rd, Z0, rm, stype, simm);
  113. #define A64_MVN_REG(rd, rm, stype, simm) \
  114. A64_ORN_REG(rd, Z0, rm, stype, simm);
  115. // rd = rn OP (rm EXTEND simm)
  116. #define A64_ADD_XREG(rd, rn, rm, xtopt, simm) \
  117. A64_OP_XREG(OP_ADD,rd,rn,rm,xtopt,simm)
  118. #define A64_ADDS_XREG(rd, rn, rm, xtopt, simm) \
  119. A64_OP_XREG(OP_ADDS,rd,rn,rm,xtopt,simm)
  120. #define A64_SUB_XREG(rd, rn, rm, stype, simm) \
  121. A64_OP_XREG(OP_SUB,rd,rn,rm,xtopt,simm)
  122. #define A64_SUBS_XREG(rd, rn, rm, stype, simm) \
  123. A64_OP_XREG(OP_SUBS,rd,rn,rm,xtopt,simm)
  124. // rd = rn OP rm OP carry
  125. #define A64_ADC_REG(rd, rn, rm) \
  126. A64_INSN(0xd,OP_ADD &3,0x0,_,rm,_,_,rn,rd)
  127. #define A64_ADCS_REG(rd, rn, rm) \
  128. A64_INSN(0xd,OP_ADDS&3,0x0,_,rm,_,_,rn,rd)
  129. #define A64_SBC_REG(rd, rn, rm, s) \
  130. A64_INSN(0xd,OP_SUB &3,0x0,_,rm,_,_,rn,rd)
  131. #define A64_SBCS_REG(rd, rn, rm) \
  132. A64_INSN(0xd,OP_SUBS&3,0x0,_,rm,_,_,rn,rd)
  133. // rd = rn SHIFT rm
  134. #define A64_LSL_REG(rd, rn, rm) \
  135. A64_INSN(0xd,0x0,0x3,_,rm,_,0x8,rn,rd)
  136. #define A64_LSR_REG(rd, rn, rm) \
  137. A64_INSN(0xd,0x0,0x3,_,rm,_,0xa,rn,rd)
  138. #define A64_ASR_REG(rd, rn, rm) \
  139. A64_INSN(0xd,0x0,0x3,_,rm,_,0x9,rn,rd)
  140. #define A64_ROR_REG(rd, rn, rm) \
  141. A64_INSN(0xd,0x0,0x3,_,rm,_,0xb,rn,rd)
  142. // rd = REVERSE(n) rn
  143. #define A64_RBIT_REG(rd, rn) \
  144. A64_INSN(0xd,0x2,0x3,_,_,_,_,rn,rd)
  145. // rd = rn OP (imm12 << (0|12))
  146. #define A64_ADD_IMM(rd, rn, imm12, lsl12) \
  147. A64_OP_IMM12(OP_ADD, rd, rn, imm12, lsl12)
  148. #define A64_ADDS_IMM(rd, rn, imm12, lsl12) \
  149. A64_OP_IMM12(OP_ADDS, rd, rn, imm12, lsl12)
  150. #define A64_SUB_IMM(rd, rn, imm12, lsl12) \
  151. A64_OP_IMM12(OP_SUB, rd, rn, imm12, lsl12)
  152. #define A64_SUBS_IMM(rd, rn, imm12, lsl12) \
  153. A64_OP_IMM12(OP_SUBS, rd, rn, imm12, lsl12)
  154. #define A64_CMP_IMM(rn, imm12, lsl12) \
  155. A64_SUBS_IMM(Z0,rn,imm12,lsl12)
  156. #define A64_CMN_IMM(rn, imm12, lsl12) \
  157. A64_ADDS_IMM(Z0,rn,imm12,lsl12)
  158. // rd = rn OP immbm; immbm is a repeated special pattern of 2^n bits length
  159. #define A64_EOR_IMM(rd, rn, immr, imms) \
  160. A64_OP_IMMBM(OP_EOR,rd,rn,immr,imms)
  161. #define A64_OR_IMM(rd, rn, immr, imms) \
  162. A64_OP_IMMBM(OP_OR,rd,rn,immr,imms)
  163. #define A64_AND_IMM(rd, rn, immr, imms) \
  164. A64_OP_IMMBM(OP_AND,rd,rn,immr,imms)
  165. #define A64_ANDS_IMM(rd, rn, immr, imms) \
  166. A64_OP_IMMBM(OP_ANDS,rd,rn,immr,imms)
  167. #define A64_TST_IMM(rn, immr, imms) \
  168. A64_OP_IMMBM(OP_ANDS,Z0,rn,immr,imms)
  169. #define A64_MOV_IMM(rd, rn, immr, imms) \
  170. A64_OP_IMMBM(OP_OR,rd,Z0,immr,imms)
  171. // rd = (imm16 << (0|16|32|48))
  172. #define A64_MOVN_IMM(rd, imm16, lsl16) \
  173. A64_INSN(0x9,0x0,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
  174. #define A64_MOVZ_IMM(rd, imm16, lsl16) \
  175. A64_INSN(0x9,0x2,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
  176. #define A64_MOVK_IMM(rd, imm16, lsl16) \
  177. A64_INSN(0x9,0x3,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
  178. #define A64_MOVT_IMM(rd, imm16, lsl16) \
  179. A64_INSN(0x9,0x3,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
  180. // rd = rn SHIFT imm6
  181. #define A64_LSL_IMM(rd, rn, bits) /* UBFM */ \
  182. A64_INSN(0x9,0x2,0x4,_,32-(bits),_,31-(bits),rn,rd)
  183. #define A64_LSR_IMM(rd, rn, bits) /* UBFM */ \
  184. A64_INSN(0x9,0x2,0x4,_,bits,_,31,rn,rd)
  185. #define A64_ASR_IMM(rd, rn, bits) /* SBFM */ \
  186. A64_INSN(0x9,0x0,0x4,_,bits,_,31,rn,rd)
  187. #define A64_ROR_IMM(rd, rn, bits) /* EXTR */ \
  188. A64_INSN(0x9,0x0,0x6,_,rn,_,bits,rn,rd)
  189. #define A64_SXT_IMM(rd, rn, bits) \
  190. A64_INSN(0x9,0x0,0x4,0,0,_,bits-1,rn,rd)
  191. #define A64_UXT_IMM(rd, rn, bits) \
  192. A64_INSN(0x9,0x2,0x4,0,0,_,bits-1,rn,rd)
  193. // multiplication
  194. #define A64_SMULL(rd, rn, rm) /* Xd = Wn*Wm (+ Xa) */ \
  195. A64_INSN(0xd,0x4,0x4,1,rm,_,Z0,rn,rd)
  196. #define A64_SMADDL(rd, rn, rm, ra) \
  197. A64_INSN(0xd,0x4,0x4,1,rm,_,ra,rn,rd)
  198. #define A64_UMULL(rd, rn, rm) \
  199. A64_INSN(0xd,0x4,0x6,1,rm,_,Z0,rn,rd)
  200. #define A64_UMADDL(rd, rn, rm, ra) \
  201. A64_INSN(0xd,0x4,0x6,1,rm,_,ra,rn,rd)
  202. #define A64_MUL(rd, rn, rm) /* Wd = Wn*Wm (+ Wa) */ \
  203. A64_INSN(0xd,0x0,0x4,0,rm,_,Z0,rn,rd)
  204. #define A64_MADD(rd, rn, rm, ra) \
  205. A64_INSN(0xd,0x0,0x4,0,rm,_,ra,rn,rd)
  206. // branching
  207. #define A64_B(offs26) \
  208. A64_INSN(0xa,0x0,_,_,_,_,_,_,(offs26) >> 2)
  209. #define A64_BL(offs26) \
  210. A64_INSN(0xa,0x4,_,_,_,_,_,_,(offs26) >> 2)
  211. #define A64_BR(rn) \
  212. A64_INSN(0xb,0x6,_,_,0x1f,_,_,rn,_)
  213. #define A64_BLR(rn) \
  214. A64_INSN(0xb,0x6,_,_,0x3f,_,_,rn,_)
  215. #define A64_RET(rn) /* same as BR, but hint for cpu */ \
  216. A64_INSN(0xb,0x6,_,_,0x5f,_,_,rn,_)
  217. #define A64_BCOND(cond, offs19) \
  218. A64_INSN(0xa,0x2,_,_,_,_,_,(offs19) >> 2,(cond))
  219. // load pc-relative
  220. #define A64_LDRLIT_IMM(rd, offs19) \
  221. A64_INSN(0xc,0x0,0x0,_,_,_,_,(offs19) >> 2,rd)
  222. #define A64_LDRXLIT_IMM(rd, offs19) \
  223. A64_INSN(0xc,0x2,0x0,_,_,_,_,(offs19) >> 2,rd)
  224. #define A64_ADRXLIT_IMM(rd, offs21) \
  225. A64_INSN(0x8,(offs21)&3,0x0,_,_,_,_,(offs21) >> 2,rd)
  226. // load/store indexed base. Only the signed unscaled variant is used here.
  227. enum { LT_ST, LT_LD, LT_LDSX, LT_LDS };
  228. enum { AM_B=0x1, AM_H=0x3, AM_W=0x5, AM_X=0x7 };
  229. enum { AM_IDX, AM_IDXPOST, AM_IDXREG, AM_IDXPRE };
  230. #define A64_LDST_AM(ir,rm,optimm) (((ir)<<9)|((rm)<<4)|((optimm)&0x1ff))
  231. #define A64_OP_LDST(sz, op, am, mode, rm, rd) \
  232. A64_INSN(0xc,sz,op,_,_,am,mode,rm,rd)
  233. #define A64_LDSTX_IMM(rd, rn, offs9, ld, mode) \
  234. A64_OP_LDST(AM_X,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
  235. #define A64_LDST_IMM(rd, rn, offs9, ld, mode) \
  236. A64_OP_LDST(AM_W,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
  237. #define A64_LDSTH_IMM(rd, rn, offs9, ld, mode) \
  238. A64_OP_LDST(AM_H,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
  239. #define A64_LDSTB_IMM(rd, rn, offs9, ld, mode) \
  240. A64_OP_LDST(AM_B,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
  241. // NB: pre/postindex isn't available with register offset
  242. #define A64_LDSTX_REG(rd, rn, rm, ld, opt) \
  243. A64_OP_LDST(AM_X,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
  244. #define A64_LDST_REG(rd, rn, rm, ld, opt) \
  245. A64_OP_LDST(AM_W,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
  246. #define A64_LDSTH_REG(rd, rn, rm, ld, opt) \
  247. A64_OP_LDST(AM_H,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
  248. #define A64_LDSTB_REG(rd, rn, rm, ld, opt) \
  249. A64_OP_LDST(AM_B,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
  250. #define A64_LDSTPX_IMM(rn, r1, r2, offs7, ld, mode) \
  251. A64_INSN(0x4,0x5,(mode<<1)|ld,_,_,(offs7)&0x3f8,r2,rn,r1)
  252. // 64 bit stuff for pointer handling
  253. #define A64_ADDX_XREG(rd, rn, rm, xtopt, simm) \
  254. OP_SZ64|A64_OP_XREG(OP_ADD,rd,rn,rm,xtopt,simm)
  255. #define A64_ADDX_REG(rd, rn, rm, stype, simm) \
  256. OP_SZ64|A64_ADD_REG(rd, rn, rm, stype, simm)
  257. #define A64_ADDXS_REG(rd, rn, rm, stype, simm) \
  258. OP_SZ64|A64_ADDS_REG(rd, rn, rm, stype, simm)
  259. #define A64_ORX_REG(rd, rn, rm, stype, simm) \
  260. OP_SZ64|A64_OR_REG(rd, rn, rm, stype, simm)
  261. #define A64_TSTX_REG(rn, rm, stype, simm) \
  262. OP_SZ64|A64_TST_REG(rn, rm, stype, simm)
  263. #define A64_MOVX_REG(rd, rm, stype, simm) \
  264. OP_SZ64|A64_MOV_REG(rd, rm, stype, simm)
  265. #define A64_ADDX_IMM(rd, rn, imm12) \
  266. OP_SZ64|A64_ADD_IMM(rd, rn, imm12, 0)
  267. #define A64_EORX_IMM(rd, rn, immr, imms) \
  268. OP_SZ64|OP_N64|A64_EOR_IMM(rd, rn, immr, imms)
  269. #define A64_UXTX_IMM(rd, rn, bits) \
  270. OP_SZ64|OP_N64|A64_UXT_IMM(rd, rn, bits)
  271. #define A64_LSRX_IMM(rd, rn, bits) \
  272. OP_SZ64|OP_N64|A64_LSR_IMM(rd, rn, bits)|(63<<10)
  273. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  274. #define EMIT_PTR(ptr, x) \
  275. do { \
  276. *(u32 *)(ptr) = x; \
  277. ptr = (void *)((u8 *)(ptr) + sizeof(u32)); \
  278. } while (0)
  279. #define EMIT(op) \
  280. do { \
  281. EMIT_PTR(tcache_ptr, op); \
  282. COUNT_OP; \
  283. } while (0)
  284. // if-then-else conditional execution helpers
  285. #define JMP_POS(ptr) \
  286. ptr = tcache_ptr; \
  287. EMIT(A64_B(0));
  288. #define JMP_EMIT(cond, ptr) { \
  289. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
  290. EMIT_PTR(ptr, A64_BCOND(cond, val_ & 0x001fffff)); \
  291. }
  292. #define JMP_EMIT_NC(ptr) { \
  293. u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
  294. EMIT_PTR(ptr, A64_B(val_ & 0x0fffffff)); \
  295. }
  296. #define EMITH_JMP_START(cond) { \
  297. u8 *cond_ptr; \
  298. JMP_POS(cond_ptr)
  299. #define EMITH_JMP_END(cond) \
  300. JMP_EMIT(cond, cond_ptr); \
  301. }
  302. #define EMITH_JMP3_START(cond) { \
  303. u8 *cond_ptr, *else_ptr; \
  304. JMP_POS(cond_ptr)
  305. #define EMITH_JMP3_MID(cond) \
  306. JMP_POS(else_ptr); \
  307. JMP_EMIT(cond, cond_ptr);
  308. #define EMITH_JMP3_END() \
  309. JMP_EMIT_NC(else_ptr); \
  310. }
  311. // "simple" jump (no more then a few insns)
  312. // ARM32 will use conditional instructions here
  313. #define EMITH_SJMP_START EMITH_JMP_START
  314. #define EMITH_SJMP_END EMITH_JMP_END
  315. #define EMITH_SJMP3_START EMITH_JMP3_START
  316. #define EMITH_SJMP3_MID EMITH_JMP3_MID
  317. #define EMITH_SJMP3_END EMITH_JMP3_END
  318. #define EMITH_SJMP2_START(cond) \
  319. EMITH_SJMP3_START(cond)
  320. #define EMITH_SJMP2_MID(cond) \
  321. EMITH_SJMP3_MID(cond)
  322. #define EMITH_SJMP2_END(cond) \
  323. EMITH_SJMP3_END()
  324. // data processing, register
  325. #define emith_move_r_r_ptr(d, s) \
  326. EMIT(A64_MOVX_REG(d, s, ST_LSL, 0))
  327. #define emith_move_r_r_ptr_c(cond, d, s) \
  328. emith_move_r_r_ptr(d, s)
  329. #define emith_move_r_r(d, s) \
  330. EMIT(A64_MOV_REG(d, s, ST_LSL, 0))
  331. #define emith_move_r_r_c(cond, d, s) \
  332. emith_move_r_r(d, s)
  333. #define emith_mvn_r_r(d, s) \
  334. EMIT(A64_MVN_REG(d, s, ST_LSL, 0))
  335. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
  336. if (simm < 4) EMIT(A64_ADDX_XREG(d, s1, s2, XT_SXTW, simm)); \
  337. else EMIT(A64_ADDX_REG(d, s1, s2, ST_LSL, simm)); \
  338. } while (0)
  339. #define emith_add_r_r_r_lsl(d, s1, s2, simm) \
  340. EMIT(A64_ADD_REG(d, s1, s2, ST_LSL, simm))
  341. #define emith_addf_r_r_r_lsl(d, s1, s2, simm) \
  342. EMIT(A64_ADDS_REG(d, s1, s2, ST_LSL, simm))
  343. #define emith_addf_r_r_r_lsr(d, s1, s2, simm) \
  344. EMIT(A64_ADDS_REG(d, s1, s2, ST_LSR, simm))
  345. #define emith_sub_r_r_r_lsl(d, s1, s2, simm) \
  346. EMIT(A64_SUB_REG(d, s1, s2, ST_LSL, simm))
  347. #define emith_subf_r_r_r_lsl(d, s1, s2, simm) \
  348. EMIT(A64_SUBS_REG(d, s1, s2, ST_LSL, simm))
  349. #define emith_or_r_r_r_lsl(d, s1, s2, simm) \
  350. EMIT(A64_OR_REG(d, s1, s2, ST_LSL, simm))
  351. #define emith_eor_r_r_r_lsl(d, s1, s2, simm) \
  352. EMIT(A64_EOR_REG(d, s1, s2, ST_LSL, simm))
  353. #define emith_eor_r_r_r_lsr(d, s1, s2, simm) \
  354. EMIT(A64_EOR_REG(d, s1, s2, ST_LSR, simm))
  355. #define emith_and_r_r_r_lsl(d, s1, s2, simm) \
  356. EMIT(A64_AND_REG(d, s1, s2, ST_LSL, simm))
  357. #define emith_or_r_r_lsl(d, s, lslimm) \
  358. emith_or_r_r_r_lsl(d, d, s, lslimm)
  359. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  360. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  361. #define emith_add_r_r_r(d, s1, s2) \
  362. emith_add_r_r_r_lsl(d, s1, s2, 0)
  363. #define emith_addf_r_r_r(d, s1, s2) \
  364. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  365. #define emith_sub_r_r_r(d, s1, s2) \
  366. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  367. #define emith_subf_r_r_r(d, s1, s2) \
  368. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  369. #define emith_or_r_r_r(d, s1, s2) \
  370. emith_or_r_r_r_lsl(d, s1, s2, 0)
  371. #define emith_eor_r_r_r(d, s1, s2) \
  372. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  373. #define emith_and_r_r_r(d, s1, s2) \
  374. emith_and_r_r_r_lsl(d, s1, s2, 0)
  375. #define emith_add_r_r_ptr(d, s) \
  376. emith_add_r_r_r_lsl_ptr(d, d, s, 0)
  377. #define emith_add_r_r(d, s) \
  378. emith_add_r_r_r(d, d, s)
  379. #define emith_sub_r_r(d, s) \
  380. emith_sub_r_r_r(d, d, s)
  381. #define emith_neg_r_r(d, s) \
  382. EMIT(A64_NEG_REG(d, s, ST_LSL, 0))
  383. #define emith_adc_r_r_r(d, s1, s2) \
  384. EMIT(A64_ADC_REG(d, s1, s2))
  385. #define emith_adc_r_r(d, s) \
  386. EMIT(A64_ADC_REG(d, d, s))
  387. #define emith_adcf_r_r_r(d, s1, s2) \
  388. EMIT(A64_ADCS_REG(d, s1, s2))
  389. #define emith_sbcf_r_r_r(d, s1, s2) \
  390. EMIT(A64_SBCS_REG(d, s1, s2))
  391. #define emith_and_r_r(d, s) \
  392. emith_and_r_r_r(d, d, s)
  393. #define emith_and_r_r_c(cond, d, s) \
  394. emith_and_r_r(d, s)
  395. #define emith_or_r_r(d, s) \
  396. emith_or_r_r_r(d, d, s)
  397. #define emith_eor_r_r(d, s) \
  398. emith_eor_r_r_r(d, d, s)
  399. #define emith_tst_r_r_ptr(d, s) \
  400. EMIT(A64_TSTX_REG(d, s, ST_LSL, 0))
  401. #define emith_tst_r_r(d, s) \
  402. EMIT(A64_TST_REG(d, s, ST_LSL, 0))
  403. #define emith_teq_r_r(d, s) do { \
  404. int _t = rcache_get_tmp(); \
  405. emith_eor_r_r_r(_t, d, s); \
  406. emith_cmp_r_imm(_t, 0); \
  407. rcache_free_tmp(_t); \
  408. } while (0)
  409. #define emith_cmp_r_r(d, s) \
  410. EMIT(A64_CMP_REG(d, s, ST_LSL, 0))
  411. #define emith_addf_r_r(d, s) \
  412. emith_addf_r_r_r(d, d, s)
  413. #define emith_subf_r_r(d, s) \
  414. emith_subf_r_r_r(d, d, s)
  415. #define emith_adcf_r_r(d, s) \
  416. emith_adcf_r_r_r(d, d, s)
  417. #define emith_sbcf_r_r(d, s) \
  418. emith_sbcf_r_r_r(d, d, s)
  419. #define emith_negcf_r_r(d, s) \
  420. emith_sbcf_r_r_r(d, Z0, s)
  421. // move immediate
  422. static void emith_move_imm64(int r, int wx, int64_t imm)
  423. {
  424. int sz64 = wx ? OP_SZ64:0;
  425. int c, s;
  426. if (!imm) {
  427. EMIT(sz64|A64_MOVZ_IMM(r, imm, 0));
  428. return;
  429. }
  430. if (imm && -imm == (u16)-imm) {
  431. EMIT(sz64|A64_MOVN_IMM(r, ~imm, 0));
  432. return;
  433. }
  434. for (c = s = 0; s < (wx ? 4:2) && imm; s++, imm >>= 16)
  435. if ((u16)(imm)) {
  436. if (c++) EMIT(sz64|A64_MOVK_IMM(r, imm, s));
  437. else EMIT(sz64|A64_MOVZ_IMM(r, imm, s));
  438. }
  439. }
  440. #define emith_move_r_ptr_imm(r, imm) \
  441. emith_move_imm64(r, 1, (intptr_t)(imm))
  442. #define emith_move_r_imm(r, imm) \
  443. emith_move_imm64(r, 0, (s32)(imm))
  444. #define emith_move_r_imm_c(cond, r, imm) \
  445. emith_move_r_imm(r, imm)
  446. // arithmetic, immediate
  447. static void emith_arith_imm(int op, int wx, int rd, int rn, s32 imm)
  448. {
  449. u32 sz64 = wx ? OP_SZ64:0;
  450. if (imm < 0) {
  451. op ^= (OP_ADD ^ OP_SUB);
  452. imm = -imm;
  453. }
  454. if (imm == 0) {
  455. // value 0, must emit if op is *S or source isn't dest
  456. if ((op & 1) || rd != rn)
  457. EMIT(sz64|A64_OP_IMM12(op, rd, rn, 0, 0));
  458. } else if (imm >> 24) {
  459. // value too large
  460. int _t = rcache_get_tmp();
  461. emith_move_r_imm(_t, imm);
  462. EMIT(sz64|A64_OP_REG(op, 0, rd, rn, _t, ST_LSL, 0));
  463. rcache_free_tmp(_t);
  464. } else {
  465. int rs = rn;
  466. if ((imm) & 0x000fff) {
  467. EMIT(sz64|A64_OP_IMM12(op, rd, rs, imm, 0)); rs = rd;
  468. }
  469. if ((imm) & 0xfff000) {
  470. EMIT(sz64|A64_OP_IMM12(op, rd, rs, imm >>12, 1));
  471. }
  472. }
  473. }
  474. #define emith_add_r_imm(r, imm) \
  475. emith_arith_imm(OP_ADD, 0, r, r, imm)
  476. #define emith_add_r_imm_c(cond, r, imm) \
  477. emith_add_r_imm(r, imm)
  478. #define emith_addf_r_imm(r, imm) \
  479. emith_arith_imm(OP_ADDS, 0, r, r, imm)
  480. #define emith_sub_r_imm(r, imm) \
  481. emith_arith_imm(OP_SUB, 0, r, r, imm)
  482. #define emith_sub_r_imm_c(cond, r, imm) \
  483. emith_sub_r_imm(r, imm)
  484. #define emith_subf_r_imm(r, imm) \
  485. emith_arith_imm(OP_SUBS, 0, r, r, imm)
  486. #define emith_adc_r_imm(r, imm) do { \
  487. int _t = rcache_get_tmp(); \
  488. emith_move_r_imm(_t, imm); \
  489. emith_adc_r_r(r, _t); \
  490. rcache_free_tmp(_t); \
  491. } while (0)
  492. #define emith_adcf_r_imm(r, imm) do { \
  493. int _t = rcache_get_tmp(); \
  494. emith_move_r_imm(_t, imm); \
  495. emith_adcf_r_r(r, _t); \
  496. rcache_free_tmp(_t); \
  497. } while (0)
  498. #define emith_cmp_r_imm(r, imm) do { \
  499. u32 op_ = OP_SUBS, imm_ = (u8)imm; \
  500. if ((s8)imm_ < 0) { \
  501. imm_ = (u8)-imm_; \
  502. op_ = OP_ADDS; \
  503. } \
  504. EMIT(A64_OP_IMM12(op_, Z0, r, imm_, 0)); \
  505. } while (0)
  506. #define emith_add_r_r_ptr_imm(d, s, imm) \
  507. emith_arith_imm(OP_ADD, 1, d, s, imm)
  508. #define emith_add_r_r_imm(d, s, imm) \
  509. emith_arith_imm(OP_ADD, 0, d, s, imm)
  510. #define emith_sub_r_r_imm(d, s, imm) \
  511. emith_arith_imm(OP_SUB, 0, d, s, imm)
  512. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  513. emith_sub_r_r_imm(d, s, imm)
  514. #define emith_subf_r_r_imm(d, s, imm) \
  515. emith_arith_imm(OP_SUBS, 0, d, s, imm)
  516. // logical, immediate; the value describes a bitmask, see ARMv8 ArchRefMan
  517. // NB: deal only with simple masks 0{n}1{m}0{o} or 1{n}0{m}1{o}, 0<m<32 n+m+o=32
  518. static int emith_log_isbm(u32 imm, int *n, int *m, int *invert)
  519. {
  520. *invert = (s32)imm < 0; // topmost bit set?
  521. if (*invert)
  522. imm = ~imm;
  523. if (imm) {
  524. *n = __builtin_clz(imm); imm = ~(imm << *n); // insert 1's
  525. *m = __builtin_clz(imm); imm = ~ imm << *m; // insert 0's
  526. return !imm;
  527. } else {
  528. *n = *m = 0;
  529. return 0;
  530. }
  531. }
  532. static void emith_log_imm(int op, int wx, int rd, int rn, u32 imm)
  533. {
  534. int n, m, invert;
  535. u32 sz64 = wx ? OP_SZ64:0;
  536. if (emith_log_isbm(imm, &n, &m, &invert) && (!wx || !invert)) {
  537. n += (wx ? 32:0); // extend pattern if 64 bit regs are used
  538. if (invert) EMIT(sz64|A64_OP_IMMBM(op, rd, rn, n, 32-m-1));
  539. else EMIT(sz64|A64_OP_IMMBM(op, rd, rn, n+m, m-1));
  540. } else {
  541. // imm too complex
  542. int _t = rcache_get_tmp();
  543. if (count_bits(imm) > 16) {
  544. emith_move_r_imm(_t, ~imm);
  545. EMIT(sz64|A64_OP_REG(op, 1, rd, rn, _t, ST_LSL, 0));
  546. } else {
  547. emith_move_r_imm(_t, imm);
  548. EMIT(sz64|A64_OP_REG(op, 0, rd, rn, _t, ST_LSL, 0));
  549. }
  550. rcache_free_tmp(_t);
  551. }
  552. }
  553. #define emith_and_r_imm(r, imm) \
  554. emith_log_imm(OP_AND, 0, r, r, imm)
  555. #define emith_or_r_imm(r, imm) \
  556. emith_log_imm(OP_OR, 0, r, r, imm)
  557. #define emith_or_r_imm_c(cond, r, imm) \
  558. emith_or_r_imm(r, imm)
  559. #define emith_eor_r_imm_ptr(r, imm) \
  560. emith_log_imm(OP_EOR, 1, r, r, imm)
  561. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  562. emith_eor_r_imm_ptr(r, imm)
  563. #define emith_eor_r_imm(r, imm) \
  564. emith_log_imm(OP_EOR, 0, r, r, imm)
  565. #define emith_eor_r_imm_c(cond, r, imm) \
  566. emith_eor_r_imm(r, imm)
  567. /* NB: BIC #imm not available in A64; use AND #~imm instead */
  568. #define emith_bic_r_imm(r, imm) \
  569. emith_log_imm(OP_AND, 0, r, r, ~(imm))
  570. #define emith_bic_r_imm_c(cond, r, imm) \
  571. emith_bic_r_imm(r, imm)
  572. #define emith_tst_r_imm(r, imm) \
  573. emith_log_imm(OP_ANDS, 0, Z0, r, imm)
  574. #define emith_tst_r_imm_c(cond, r, imm) \
  575. emith_tst_r_imm(r, imm)
  576. #define emith_and_r_r_imm(d, s, imm) \
  577. emith_log_imm(OP_AND, 0, d, s, imm)
  578. #define emith_or_r_r_imm(d, s, imm) \
  579. emith_log_imm(OP_OR, 0, d, s, imm)
  580. #define emith_eor_r_r_imm(d, s, imm) \
  581. emith_log_imm(OP_EOR, 0, d, s, imm)
  582. // shift
  583. #define emith_lsl(d, s, cnt) \
  584. EMIT(A64_LSL_IMM(d, s, cnt))
  585. #define emith_lsr(d, s, cnt) \
  586. EMIT(A64_LSR_IMM(d, s, cnt))
  587. #define emith_asr(d, s, cnt) \
  588. EMIT(A64_ASR_IMM(d, s, cnt))
  589. #define emith_ror(d, s, cnt) \
  590. EMIT(A64_ROR_IMM(d, s, cnt))
  591. #define emith_ror_c(cond, d, s, cnt) \
  592. emith_ror(d, s, cnt)
  593. #define emith_rol(d, s, cnt) \
  594. EMIT(A64_ROR_IMM(d, s, 32-(cnt)))
  595. // NB: shift with carry not directly supported in A64 :-|.
  596. #define emith_lslf(d, s, cnt) do { \
  597. if ((cnt) > 1) { \
  598. emith_lsl(d, s, cnt-1); \
  599. emith_addf_r_r_r(d, d, d); \
  600. } else if ((cnt) > 0) \
  601. emith_addf_r_r_r(d, s, s); \
  602. } while (0)
  603. #define emith_lsrf(d, s, cnt) do { \
  604. EMIT(A64_RBIT_REG(d, s)); \
  605. emith_lslf(d, d, cnt); \
  606. EMIT(A64_RBIT_REG(d, d)); \
  607. } while (0)
  608. #define emith_asrf(d, s, cnt) do { \
  609. int _s = s; \
  610. if ((cnt) > 1) { \
  611. emith_asr(d, s, cnt-1); \
  612. _s = d; \
  613. } \
  614. if ((cnt) > 0) { \
  615. emith_addf_r_r_r(Z0, _s, _s); \
  616. EMIT(A64_RBIT_REG(d, _s)); \
  617. emith_adcf_r_r_r(d, d, d); \
  618. EMIT(A64_RBIT_REG(d, d)); \
  619. } \
  620. } while (0)
  621. #define emith_rolf(d, s, cnt) do { \
  622. int _s = s; \
  623. if ((cnt) > 1) { \
  624. emith_rol(d, s, cnt-1); \
  625. _s = d; \
  626. } \
  627. if ((cnt) > 0) { \
  628. emith_addf_r_r_r(d, _s, _s); \
  629. emith_adc_r_r_r(d, d, Z0); \
  630. } \
  631. } while (0)
  632. #define emith_rorf(d, s, cnt) do { \
  633. if ((cnt) > 0) { \
  634. emith_ror(d, s, cnt); \
  635. emith_addf_r_r_r(Z0, d, d); \
  636. } \
  637. } while (0)
  638. #define emith_rolcf(d) \
  639. emith_adcf_r_r(d, d)
  640. #define emith_rorcf(d) do { \
  641. EMIT(A64_RBIT_REG(d, d)); \
  642. emith_adcf_r_r(d, d); \
  643. EMIT(A64_RBIT_REG(d, d)); \
  644. } while (0)
  645. // signed/unsigned extend
  646. #define emith_clear_msb(d, s, count) /* bits to clear */ \
  647. EMIT(A64_UXT_IMM(d, s, 32-(count)))
  648. #define emith_clear_msb_c(cond, d, s, count) \
  649. emith_clear_msb(d, s, count)
  650. #define emith_sext(d, s, count) /* bits to keep */ \
  651. EMIT(A64_SXT_IMM(d, s, count))
  652. // multiply Rd = Rn*Rm (+ Ra)
  653. #define emith_mul(d, s1, s2) \
  654. EMIT(A64_MUL(d, s1, s2))
  655. // NB: must combine/split Xd from/into 2 Wd's; play safe and clear upper bits
  656. #define emith_combine64(dlo, dhi) \
  657. EMIT(A64_UXTX_IMM(dlo, dlo, 32)); \
  658. EMIT(A64_ORX_REG(dlo, dlo, dhi, ST_LSL, 32));
  659. #define emith_split64(dlo, dhi) \
  660. EMIT(A64_LSRX_IMM(dhi, dlo, 32)); \
  661. EMIT(A64_UXTX_IMM(dlo, dlo, 32));
  662. #define emith_mul_u64(dlo, dhi, s1, s2) do { \
  663. EMIT(A64_UMULL(dlo, s1, s2)); \
  664. emith_split64(dlo, dhi); \
  665. } while (0)
  666. #define emith_mul_s64(dlo, dhi, s1, s2) do { \
  667. EMIT(A64_SMULL(dlo, s1, s2)); \
  668. emith_split64(dlo, dhi); \
  669. } while (0)
  670. #define emith_mula_s64(dlo, dhi, s1, s2) do { \
  671. emith_combine64(dlo, dhi); \
  672. EMIT(A64_SMADDL(dlo, s1, s2, dlo)); \
  673. emith_split64(dlo, dhi); \
  674. } while (0)
  675. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  676. emith_mula_s64(dlo, dhi, s1, s2)
  677. // load/store. offs has 9 bits signed, hence larger offs may use a temp
  678. static void emith_ldst_offs(int sz, int rd, int rn, int o9, int ld, int mode)
  679. {
  680. if (o9 >= -256 && o9 < 256) {
  681. EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,o9), mode, rn, rd));
  682. } else if (mode == AM_IDXPRE) {
  683. emith_add_r_r_ptr_imm(rn, rn, o9);
  684. EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,0), AM_IDX, rn, rd));
  685. } else if (mode == AM_IDXPOST) {
  686. EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,0), AM_IDX, rn, rd));
  687. emith_add_r_r_ptr_imm(rn, rn, o9);
  688. } else {
  689. int _t = rcache_get_tmp();
  690. emith_add_r_r_ptr_imm(_t, rn, o9);
  691. EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,0), AM_IDX, _t, rd));
  692. rcache_free_tmp(_t);
  693. }
  694. }
  695. #define emith_read_r_r_offs_ptr(r, rs, offs) \
  696. emith_ldst_offs(AM_X, r, rs, offs, LT_LD, AM_IDX)
  697. #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
  698. emith_read_r_r_offs_ptr(r, rs, offs)
  699. #define emith_read_r_r_offs(r, rs, offs) \
  700. emith_ldst_offs(AM_W, r, rs, offs, LT_LD, AM_IDX)
  701. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  702. emith_read_r_r_offs(r, rs, offs)
  703. #define emith_read_r_r_r_ptr(r, rs, rm) \
  704. EMIT(A64_LDSTX_REG(r, rs, rm, LT_LD, XT_SXTW))
  705. #define emith_read_r_r_r(r, rs, rm) \
  706. EMIT(A64_LDST_REG(r, rs, rm, LT_LD, XT_SXTW))
  707. #define emith_read_r_r_r_c(cond, r, rs, rm) \
  708. emith_read_r_r_r(r, rs, rm)
  709. #define emith_read_r_r_r_ptr_wb(r, rs, rm) do { \
  710. emith_read_r_r_r_ptr(r, rs, rm); \
  711. emith_add_r_r_ptr(rs, rm); \
  712. } while (0)
  713. #define emith_read_r_r_r_wb(r, rs, rm) do { \
  714. emith_read_r_r_r(r, rs, rm); \
  715. emith_add_r_r_ptr(rs, rm); \
  716. } while (0)
  717. #define emith_read8_r_r_offs(r, rs, offs) \
  718. emith_ldst_offs(AM_B, r, rs, offs, LT_LD, AM_IDX)
  719. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  720. emith_read8_r_r_offs(r, rs, offs)
  721. #define emith_read8_r_r_r(r, rs, rm) \
  722. EMIT(A64_LDSTB_REG(r, rs, rm, LT_LD, XT_SXTW))
  723. #define emith_read8_r_r_r_c(cond, r, rs, rm) \
  724. emith_read8_r_r_r(r, rs, rm)
  725. #define emith_read16_r_r_offs(r, rs, offs) \
  726. emith_ldst_offs(AM_H, r, rs, offs, LT_LD, AM_IDX)
  727. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  728. emith_read16_r_r_offs(r, rs, offs)
  729. #define emith_read16_r_r_r(r, rs, rm) \
  730. EMIT(A64_LDSTH_REG(r, rs, rm, LT_LD, XT_SXTW))
  731. #define emith_read16_r_r_r_c(cond, r, rs, rm) \
  732. emith_read16_r_r_r(r, rs, rm)
  733. #define emith_read8s_r_r_offs(r, rs, offs) \
  734. emith_ldst_offs(AM_B, r, rs, offs, LT_LDS, AM_IDX)
  735. #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
  736. emith_read8s_r_r_offs(r, rs, offs)
  737. #define emith_read8s_r_r_r(r, rs, rm) \
  738. EMIT(A64_LDSTB_REG(r, rs, rm, LT_LDS, XT_SXTW))
  739. #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
  740. emith_read8s_r_r_r(r, rs, rm)
  741. #define emith_read16s_r_r_offs(r, rs, offs) \
  742. emith_ldst_offs(AM_H, r, rs, offs, LT_LDS, AM_IDX)
  743. #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
  744. emith_read16s_r_r_offs(r, rs, offs)
  745. #define emith_read16s_r_r_r(r, rs, rm) \
  746. EMIT(A64_LDSTH_REG(r, rs, rm, LT_LDS, XT_SXTW))
  747. #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
  748. emith_read16s_r_r_r(r, rs, rm)
  749. #define emith_write_r_r_offs_ptr(r, rs, offs) \
  750. emith_ldst_offs(AM_X, r, rs, offs, LT_ST, AM_IDX)
  751. #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
  752. emith_write_r_r_offs_ptr(r, rs, offs)
  753. #define emith_write_r_r_r_ptr(r, rs, rm) \
  754. EMIT(A64_LDSTX_REG(r, rs, rm, LT_ST, XT_SXTW))
  755. #define emith_write_r_r_r_ptr_c(cond, r, rs, rm) \
  756. emith_write_r_r_r_ptr(r, rs, rm)
  757. #define emith_write_r_r_offs(r, rs, offs) \
  758. emith_ldst_offs(AM_W, r, rs, offs, LT_ST, AM_IDX)
  759. #define emith_write_r_r_offs_c(cond, r, rs, offs) \
  760. emith_write_r_r_offs(r, rs, offs)
  761. #define emith_write_r_r_r(r, rs, rm) \
  762. EMIT(A64_LDST_REG(r, rs, rm, LT_ST, XT_SXTW))
  763. #define emith_write_r_r_r_c(cond, r, rs, rm) \
  764. emith_write_r_r_r(r, rs, rm)
  765. #define emith_write_r_r_r_ptr_wb(r, rs, rm) do { \
  766. emith_write_r_r_r_ptr(r, rs, rm); \
  767. emith_add_r_r_ptr(rs, rm); \
  768. } while (0)
  769. #define emith_write_r_r_r_wb(r, rs, rm) do { \
  770. emith_write_r_r_r(r, rs, rm); \
  771. emith_add_r_r_ptr(rs, rm); \
  772. } while (0)
  773. #define emith_ctx_read_ptr(r, offs) \
  774. emith_read_r_r_offs_ptr(r, CONTEXT_REG, offs)
  775. #define emith_ctx_read(r, offs) \
  776. emith_read_r_r_offs(r, CONTEXT_REG, offs)
  777. #define emith_ctx_read_c(cond, r, offs) \
  778. emith_ctx_read(r, offs)
  779. #define emith_ctx_write_ptr(r, offs) \
  780. emith_write_r_r_offs_ptr(r, CONTEXT_REG, offs)
  781. #define emith_ctx_write(r, offs) \
  782. emith_write_r_r_offs(r, CONTEXT_REG, offs)
  783. #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
  784. int r_ = r, offs_ = offs, cnt_ = cnt; \
  785. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  786. emith_ctx_read(r_, offs_); \
  787. } while (0)
  788. #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
  789. int r_ = r, offs_ = offs, cnt_ = cnt; \
  790. for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
  791. emith_ctx_write(r_, offs_); \
  792. } while (0)
  793. // push pairs; NB: SP must be 16 byte aligned (HW requirement!)
  794. #define emith_push2(r1, r2) \
  795. EMIT(A64_LDSTPX_IMM(SP, r1, r2, -2*8, LT_ST, AM_IDXPRE))
  796. #define emith_pop2(r1, r2) \
  797. EMIT(A64_LDSTPX_IMM(SP, r1, r2, 2*8, LT_LD, AM_IDXPOST))
  798. // function call handling
  799. #define emith_save_caller_regs(mask) do { \
  800. int _c, _r1, _r2; u32 _m = mask & 0x3ffff; \
  801. if (__builtin_parity(_m) == 1) _m |= 0x40000; /* hardware align */ \
  802. for (_c = HOST_REGS, _r1 = -1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
  803. if (_m & (1 << _c)) { \
  804. _r2 = _r1, _r1 = _c; \
  805. if (_r2 != -1) { \
  806. emith_push2(_r1, _r2); \
  807. _r1 = -1; \
  808. } \
  809. } \
  810. } while (0)
  811. #define emith_restore_caller_regs(mask) do { \
  812. int _c, _r1, _r2; u32 _m = mask & 0x3ffff; \
  813. if (__builtin_parity(_m) == 1) _m |= 0x40000; /* hardware align */ \
  814. for (_c = 0, _r1 = -1; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
  815. if (_m & (1 << _c)) { \
  816. _r2 = _r1, _r1 = _c; \
  817. if (_r2 != -1) { \
  818. emith_pop2(_r2, _r1); \
  819. _r1 = -1; \
  820. } \
  821. } \
  822. } while (0)
  823. #define host_arg2reg(rd, arg) \
  824. rd = arg
  825. #define emith_pass_arg_r(arg, reg) \
  826. emith_move_r_r(arg, reg)
  827. #define emith_pass_arg_imm(arg, imm) \
  828. emith_move_r_imm(arg, imm)
  829. // branching; NB: A64 B.cond has only +/- 1MB range
  830. #define emith_bcond(ptr, patch, cond, target) do { \
  831. u32 disp_ = (u8 *)target - (u8 *)ptr; \
  832. if (disp_ >= 0xfff00000 || disp_ <= 0x000fffff) { /* can use near B.c */ \
  833. EMIT_PTR(ptr, A64_BCOND(cond, disp_ & 0x001fffff)); \
  834. if (patch) EMIT_PTR(ptr, A64_NOP); /* reserve space for far B */ \
  835. } else { /* far branch if near branch isn't possible */ \
  836. EMIT_PTR(ptr, A64_BCOND(emith_invert_cond(cond), 8)); \
  837. EMIT_PTR(ptr, A64_B((disp_ - 4) & 0x0fffffff)); \
  838. } \
  839. } while (0)
  840. #define emith_jump(target) do {\
  841. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
  842. EMIT(A64_B(disp_ & 0x0fffffff)); \
  843. } while (0)
  844. #define emith_jump_patchable(target) \
  845. emith_jump(target)
  846. #define emith_jump_cond(cond, target) \
  847. emith_bcond(tcache_ptr, 0, cond, target)
  848. #define emith_jump_cond_patchable(cond, target) \
  849. emith_bcond(tcache_ptr, 1, cond, target)
  850. #define emith_jump_patch(ptr, target) ({ \
  851. u32 *ptr_ = (u32 *)ptr; \
  852. u32 disp_ = (u8 *)(target) - (u8 *)(ptr_); \
  853. int cond_ = ptr_[0] & 0xf; \
  854. if ((ptr_[0] & 0xff000000) == 0x54000000) { /* B.cond */ \
  855. if (ptr_[1] != A64_NOP) cond_ = emith_invert_cond(cond_); \
  856. emith_bcond(ptr_, 1, cond_, target); \
  857. } else if (ptr_[0] & 0x80000000) \
  858. EMIT_PTR(ptr_, A64_BL((disp_) & 0x0fffffff)); \
  859. else EMIT_PTR(ptr_, A64_B((disp_) & 0x0fffffff)); \
  860. (u8 *)ptr; \
  861. })
  862. #define emith_jump_reg(r) \
  863. EMIT(A64_BR(r))
  864. #define emith_jump_reg_c(cond, r) \
  865. emith_jump_reg(r)
  866. #define emith_jump_ctx(offs) do { \
  867. int _t = rcache_get_tmp(); \
  868. emith_ctx_read_ptr(_t, offs); \
  869. emith_jump_reg(_t); \
  870. rcache_free_tmp(_t); \
  871. } while (0)
  872. #define emith_jump_ctx_c(cond, offs) \
  873. emith_jump_ctx(offs)
  874. #define emith_call(target) do { \
  875. u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
  876. EMIT(A64_BL(disp_ & 0x0fffffff)); \
  877. } while (0)
  878. #define emith_call_cond(cond, target) \
  879. emith_call(target)
  880. #define emith_call_reg(r) \
  881. EMIT(A64_BLR(r))
  882. #define emith_call_ctx(offs) do { \
  883. int _t = rcache_get_tmp(); \
  884. emith_ctx_read_ptr(_t, offs); \
  885. emith_call_reg(_t); \
  886. rcache_free_tmp(_t); \
  887. } while (0)
  888. #define emith_call_link(r, target) do { \
  889. EMIT(A64_ADRXLIT_IMM(r, 8)); \
  890. emith_jump(target); \
  891. } while (0)
  892. #define emith_call_cleanup() /**/
  893. #define emith_ret() \
  894. EMIT(A64_RET(LR))
  895. #define emith_ret_c(cond) \
  896. emith_ret()
  897. #define emith_ret_to_ctx(offs) \
  898. emith_ctx_write_ptr(LR, offs)
  899. // NB: pushes r or r18 for SP hardware alignment
  900. #define emith_push_ret(r) do { \
  901. int r_ = (r >= 0 ? r : 18); \
  902. emith_push2(r_, LR); \
  903. } while (0)
  904. #define emith_pop_and_ret(r) do { \
  905. int r_ = (r >= 0 ? r : 18); \
  906. emith_pop2(r_, LR); \
  907. emith_ret(); \
  908. } while (0)
  909. // emitter ABI stuff
  910. #define emith_pool_check() /**/
  911. #define emith_pool_commit(j) /**/
  912. #define emith_insn_ptr() ((u8 *)tcache_ptr)
  913. #define emith_flush() /**/
  914. #define host_instructions_updated(base, end) __builtin___clear_cache(base, end)
  915. #define emith_jump_patch_size() 8
  916. #define emith_rw_offs_max() 0xff
  917. // SH2 drc specific
  918. #define emith_sh2_drc_entry() do { \
  919. emith_push2(LR, FP); \
  920. emith_push2(28, 27); \
  921. emith_push2(26, 25); \
  922. emith_push2(24, 23); \
  923. emith_push2(22, 21); \
  924. emith_push2(20, 19); \
  925. } while (0)
  926. #define emith_sh2_drc_exit() do { \
  927. emith_pop2(20, 19); \
  928. emith_pop2(22, 21); \
  929. emith_pop2(24, 23); \
  930. emith_pop2(26, 25); \
  931. emith_pop2(28, 27); \
  932. emith_pop2(LR, FP); \
  933. emith_ret(); \
  934. } while (0)
  935. // NB: assumes a is in arg0, tab, func and mask are temp
  936. #define emith_sh2_rcall(a, tab, func, mask) do { \
  937. emith_lsr(mask, a, SH2_READ_SHIFT); \
  938. EMIT(A64_ADDX_REG(tab, tab, mask, ST_LSL, 4)); \
  939. emith_read_r_r_offs_ptr(func, tab, 0); \
  940. emith_read_r_r_offs(mask, tab, 8); \
  941. EMIT(A64_ADDXS_REG(func, func, func, ST_LSL, 0)); \
  942. } while (0)
  943. // NB: assumes a, val are in arg0 and arg1, tab and func are temp
  944. #define emith_sh2_wcall(a, val, tab, func) do { \
  945. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  946. emith_lsl(func, func, 3); \
  947. emith_read_r_r_r_ptr(func, tab, func); \
  948. emith_move_r_r_ptr(2, CONTEXT_REG); /* arg2 */ \
  949. emith_jump_reg(func); \
  950. } while (0)
  951. #define emith_sh2_delay_loop(cycles, reg) do { \
  952. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  953. int t1 = rcache_get_tmp(); \
  954. int t2 = rcache_get_tmp(); \
  955. int t3 = rcache_get_tmp(); \
  956. /* if (sr < 0) return */ \
  957. emith_asrf(t2, sr, 12); \
  958. EMITH_JMP_START(DCOND_LE); \
  959. /* turns = sr.cycles / cycles */ \
  960. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  961. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  962. rcache_free_tmp(t3); \
  963. if (reg >= 0) { \
  964. /* if (reg <= turns) turns = reg-1 */ \
  965. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  966. emith_cmp_r_r(t3, t2); \
  967. EMITH_SJMP_START(DCOND_HI); \
  968. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  969. EMITH_SJMP_END(DCOND_HI); \
  970. /* if (reg <= 1) turns = 0 */ \
  971. emith_cmp_r_imm(t3, 1); \
  972. EMITH_SJMP_START(DCOND_HI); \
  973. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  974. EMITH_SJMP_END(DCOND_HI); \
  975. /* reg -= turns */ \
  976. emith_sub_r_r(t3, t2); \
  977. } \
  978. /* sr.cycles -= turns * cycles; */ \
  979. emith_move_r_imm(t1, cycles); \
  980. emith_mul(t1, t2, t1); \
  981. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  982. EMITH_JMP_END(DCOND_LE); \
  983. rcache_free_tmp(t1); \
  984. rcache_free_tmp(t2); \
  985. } while (0)
  986. /*
  987. * if Q
  988. * t = carry(Rn += Rm)
  989. * else
  990. * t = carry(Rn -= Rm)
  991. * T ^= t
  992. */
  993. #define emith_sh2_div1_step(rn, rm, sr) do { \
  994. int tmp_ = rcache_get_tmp(); \
  995. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  996. EMITH_SJMP3_START(DCOND_EQ); \
  997. emith_addf_r_r(rn, rm); \
  998. emith_adc_r_r_r(tmp_, Z0, Z0); \
  999. EMITH_SJMP3_MID(DCOND_EQ); \
  1000. emith_subf_r_r(rn, rm); \
  1001. emith_adc_r_r_r(tmp_, Z0, Z0); \
  1002. emith_eor_r_imm(tmp_, 1); \
  1003. EMITH_SJMP3_END(); \
  1004. emith_eor_r_r(sr, tmp_); \
  1005. rcache_free_tmp(tmp_); \
  1006. } while (0)
  1007. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1008. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1009. emith_tst_r_imm(sr, S); \
  1010. EMITH_SJMP_START(DCOND_EQ); \
  1011. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1012. emith_sext(mh, mh, 16); \
  1013. EMITH_SJMP_END(DCOND_EQ); \
  1014. emith_mula_s64(ml, mh, rn, rm); \
  1015. emith_tst_r_imm(sr, S); \
  1016. EMITH_SJMP_START(DCOND_EQ); \
  1017. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1018. /* to check: add MACH[15] to MACH[31:16]. this is 0 if no overflow */ \
  1019. emith_asrf(rn, mh, 16); /* sum = (MACH>>16) + ((MACH>>15)&1) */ \
  1020. emith_adcf_r_imm(rn, 0); /* (MACH>>15) is in carry after shift */ \
  1021. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1022. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1023. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1024. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> +ovl */ \
  1025. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0xffffffff */ \
  1026. emith_sub_r_imm_c(DCOND_GT, mh, 1); /* 0x00007fff */ \
  1027. EMITH_SJMP_END(DCOND_LE); \
  1028. EMITH_SJMP_END(DCOND_EQ); \
  1029. EMITH_SJMP_END(DCOND_EQ); \
  1030. } while (0)
  1031. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1032. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1033. emith_tst_r_imm(sr, S); \
  1034. EMITH_SJMP_START(DCOND_EQ); \
  1035. /* XXX: MACH should be untouched when S is set? */ \
  1036. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1037. EMITH_SJMP_END(DCOND_EQ); \
  1038. emith_mula_s64(ml, mh, rn, rm); \
  1039. emith_tst_r_imm(sr, S); \
  1040. EMITH_SJMP_START(DCOND_EQ); \
  1041. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1042. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1043. emith_lsr(rn, ml, 31); \
  1044. emith_addf_r_r(rn, mh); /* sum = MACH + ((MACL>>31)&1) */ \
  1045. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1046. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1047. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1048. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1049. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> positive ovrfl */ \
  1050. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0x7fffffff */ \
  1051. EMITH_SJMP_END(DCOND_LE); \
  1052. EMITH_SJMP_END(DCOND_EQ); \
  1053. EMITH_SJMP_END(DCOND_EQ); \
  1054. } while (0)
  1055. #define emith_write_sr(sr, srcr) do { \
  1056. emith_lsr(sr, sr, 10); \
  1057. emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
  1058. emith_ror(sr, sr, 22); \
  1059. } while (0)
  1060. #define emith_carry_to_t(srr, is_sub) do { \
  1061. emith_lsr(sr, sr, 1); \
  1062. emith_adc_r_r(sr, sr); \
  1063. if (is_sub) /* SUB has inverted C on ARM */ \
  1064. emith_eor_r_imm(sr, 1); \
  1065. } while (0)
  1066. #define emith_tpop_carry(sr, is_sub) do { \
  1067. if (is_sub) \
  1068. emith_eor_r_imm(sr, 1); \
  1069. emith_lsrf(sr, sr, 1); \
  1070. } while (0)
  1071. #define emith_tpush_carry(sr, is_sub) do { \
  1072. emith_adc_r_r(sr, sr); \
  1073. if (is_sub) \
  1074. emith_eor_r_imm(sr, 1); \
  1075. } while (0)
  1076. #ifdef T
  1077. // T bit handling
  1078. #define emith_invert_cond(cond) \
  1079. ((cond) ^ 1)
  1080. static void emith_clr_t_cond(int sr)
  1081. {
  1082. emith_bic_r_imm(sr, T);
  1083. }
  1084. static void emith_set_t_cond(int sr, int cond)
  1085. {
  1086. EMITH_SJMP_START(emith_invert_cond(cond));
  1087. emith_or_r_imm_c(cond, sr, T);
  1088. EMITH_SJMP_END(emith_invert_cond(cond));
  1089. }
  1090. #define emith_get_t_cond() -1
  1091. #define emith_sync_t(sr) ((void)sr)
  1092. #define emith_invalidate_t()
  1093. static void emith_set_t(int sr, int val)
  1094. {
  1095. if (val)
  1096. emith_or_r_imm(sr, T);
  1097. else
  1098. emith_bic_r_imm(sr, T);
  1099. }
  1100. static int emith_tst_t(int sr, int tf)
  1101. {
  1102. emith_tst_r_imm(sr, T);
  1103. return tf ? DCOND_NE: DCOND_EQ;
  1104. }
  1105. #endif