emit_arm.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401
  1. /*
  2. * Basic macros to emit ARM instructions and some utils
  3. * Copyright (C) 2008,2009,2010 notaz
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. #define HOST_REGS 16
  9. #define CONTEXT_REG 11
  10. #define RET_REG 0
  11. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  12. #define EMIT_PTR(ptr, x) \
  13. do { \
  14. *(u32 *)ptr = x; \
  15. ptr = (void *)((u8 *)ptr + sizeof(u32)); \
  16. } while (0)
  17. // ARM special registers and peephole optimization flags
  18. #define SP 13 // stack pointer
  19. #define LR 14 // link (return address)
  20. #define PC 15 // program counter
  21. #define SR 16 // CPSR, status register
  22. #define MEM 17 // memory access (src=LDR, dst=STR)
  23. #define CYC1 20 // 1 cycle interlock (LDR, reg-cntrld shift)
  24. #define CYC2 21 // 2+ cycles interlock (LDR[BH], MUL/MLA etc)
  25. #define SWAP 31 // swapped
  26. #define NO 32 // token for "no register"
  27. // bitmask builders
  28. #define M1(x) (u32)(1ULL<<(x)) // u32 to have NO evaluate to 0
  29. #define M2(x,y) (M1(x)|M1(y))
  30. #define M3(x,y,z) (M2(x,y)|M1(z))
  31. #define M4(x,y,z,a) (M3(x,y,z)|M1(a))
  32. #define M5(x,y,z,a,b) (M4(x,y,z,a)|M1(b))
  33. #define M10(a,b,c,d,e,f,g,h,i,j) (M5(a,b,c,d,e)|M5(f,g,h,i,j))
  34. // peephole optimizer. ATM only tries to reduce interlock
  35. #define EMIT_CACHE_SIZE 3
  36. struct emit_op {
  37. u32 op;
  38. u32 src, dst;
  39. };
  40. // peephole cache, last commited insn + cache + next insn + empty insn = size+3
  41. static struct emit_op emit_cache[EMIT_CACHE_SIZE+3];
  42. static int emit_index;
  43. #define emith_insn_ptr() (u8 *)((u32 *)tcache_ptr-emit_index)
  44. static int emith_pool_index(int tcache_offs);
  45. static void emith_pool_adjust(int pool_index, int move_offs);
  46. static NOINLINE void EMIT(u32 op, u32 dst, u32 src)
  47. {
  48. void *emit_ptr = (u32 *)tcache_ptr - emit_index;
  49. int i;
  50. EMIT_PTR(tcache_ptr, op); // emit to keep tcache_ptr current
  51. COUNT_OP;
  52. // for conditional execution SR is always source
  53. if (op < 0xe0000000 /*A_COND_AL << 28*/)
  54. src |= M1(SR);
  55. // put insn on back of queue
  56. emit_cache[emit_index+1].op = op;
  57. emit_cache[emit_index+1].src = src & ~M1(NO); // mask away the NO token
  58. emit_cache[emit_index+1].dst = dst & ~M1(NO);
  59. // move insn down in the queue as long as permitted by dependencies
  60. for (i = emit_index-1; i > 0; i--) {
  61. struct emit_op *ptr = &emit_cache[i];
  62. int deps = 0;
  63. // never swap branch insns (changes semantics)
  64. if ((ptr[0].dst | ptr[1].dst) & M1(PC))
  65. continue;
  66. // dst deps between 0 and 1 must not be swapped, since any deps
  67. // but [0].src & [1].src lead to changed semantics if swapped.
  68. if ((ptr[0].dst & ptr[1].src) || (ptr[1].dst & ptr[0].src) ||
  69. (ptr[0].dst & ptr[1].dst))
  70. continue;
  71. #if 1
  72. // just move loads as far up as possible
  73. deps -= !!(ptr[1].src & M1(MEM));
  74. deps += !!(ptr[0].src & M1(MEM));
  75. #elif 0
  76. // treat all dest->src deps as a potential interlock
  77. #define DEP_INSN(x,y) !!(ptr[x].dst & ptr[y].src)
  78. // insn sequence: -1, 0, 1, 2
  79. deps -= DEP_INSN(1,2) + DEP_INSN(-1,0);
  80. deps -= !!(ptr[1].src & M1(MEM)); // favour moving LDR's down
  81. // insn sequence: -1, 1, 0, 2
  82. deps += DEP_INSN(0,2) + DEP_INSN(-1,1);
  83. deps += !!(ptr[0].src & M1(SWAP)); // penalise if swapped
  84. #else
  85. // calculate ARM920T interlock cycles
  86. #define DEP_CYC1(x,y) ((ptr[x].dst & ptr[y].src)&&(ptr[x].src & M1(CYC1)))
  87. #define DEP_CYC2(x,y) ((ptr[x].dst & ptr[y].src)&&(ptr[x].src & M1(CYC2)))
  88. #define DEP_INSN(x,y,z) DEP_CYC1(x,y)+DEP_CYC1(y,z)+2*DEP_CYC2(x,y)+DEP_CYC2(x,z)
  89. // insn sequence: -1, 0, 1, 2
  90. deps -= DEP_INSN(0,1,2) + DEP_INSN(-1,0,1);
  91. deps -= !!(ptr[1].src & M1(MEM)); // favour moving LDR's down
  92. // insn sequence: -1, 1, 0, 2
  93. deps += DEP_INSN(0,2,1) + DEP_INSN(-1,1,0);
  94. deps += !!(ptr[0].src & M1(SWAP)); // penalise multiple swaps
  95. #endif
  96. // swap if fewer depencies
  97. if (deps < 0) {
  98. // swap insn reading PC only if uncomitted pool load
  99. struct emit_op tmp;
  100. int i0 = -1, i1 = -1;
  101. if ((!(ptr[0].src & M1(PC)) ||
  102. (i0 = emith_pool_index(emit_index+2 - i)) >= 0) &&
  103. (!(ptr[1].src & M1(PC)) ||
  104. (i1 = emith_pool_index(emit_index+1 - i)) >= 0)) {
  105. // not using PC, or pool load
  106. emith_pool_adjust(i0, 1);
  107. emith_pool_adjust(i1, -1);
  108. tmp = ptr[0], ptr[0] = ptr[1], ptr[1] = tmp;
  109. ptr[0].src |= M1(SWAP);
  110. }
  111. }
  112. }
  113. if (emit_index <= EMIT_CACHE_SIZE) {
  114. // queue not yet full
  115. emit_index++;
  116. } else {
  117. // commit oldest insn from cache
  118. EMIT_PTR(emit_ptr, emit_cache[1].op);
  119. for (i = 0; i <= emit_index; i++)
  120. emit_cache[i] = emit_cache[i+1];
  121. }
  122. }
  123. static void emith_flush(void)
  124. {
  125. int i;
  126. void *emit_ptr = tcache_ptr - emit_index*sizeof(u32);
  127. for (i = 1; i <= emit_index; i++)
  128. EMIT_PTR(emit_ptr, emit_cache[i].op);
  129. emit_index = 0;
  130. }
  131. #define A_COND_AL 0xe
  132. #define A_COND_EQ 0x0
  133. #define A_COND_NE 0x1
  134. #define A_COND_HS 0x2
  135. #define A_COND_LO 0x3
  136. #define A_COND_MI 0x4
  137. #define A_COND_PL 0x5
  138. #define A_COND_VS 0x6
  139. #define A_COND_VC 0x7
  140. #define A_COND_HI 0x8
  141. #define A_COND_LS 0x9
  142. #define A_COND_GE 0xa
  143. #define A_COND_LT 0xb
  144. #define A_COND_GT 0xc
  145. #define A_COND_LE 0xd
  146. #define A_COND_CS A_COND_HS
  147. #define A_COND_CC A_COND_LO
  148. #define A_COND_NV 0xf // Not Valid (aka NeVer :-) - ATTN: not a real condition!
  149. /* unified conditions */
  150. #define DCOND_EQ A_COND_EQ
  151. #define DCOND_NE A_COND_NE
  152. #define DCOND_MI A_COND_MI
  153. #define DCOND_PL A_COND_PL
  154. #define DCOND_HI A_COND_HI
  155. #define DCOND_HS A_COND_HS
  156. #define DCOND_LO A_COND_LO
  157. #define DCOND_GE A_COND_GE
  158. #define DCOND_GT A_COND_GT
  159. #define DCOND_LT A_COND_LT
  160. #define DCOND_LS A_COND_LS
  161. #define DCOND_LE A_COND_LE
  162. #define DCOND_VS A_COND_VS
  163. #define DCOND_VC A_COND_VC
  164. #define DCOND_CS A_COND_HS
  165. #define DCOND_CC A_COND_LO
  166. /* addressing mode 1 */
  167. #define A_AM1_LSL 0
  168. #define A_AM1_LSR 1
  169. #define A_AM1_ASR 2
  170. #define A_AM1_ROR 3
  171. #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000)
  172. #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
  173. #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
  174. /* data processing op */
  175. #define A_OP_AND 0x0
  176. #define A_OP_EOR 0x1
  177. #define A_OP_SUB 0x2
  178. #define A_OP_RSB 0x3
  179. #define A_OP_ADD 0x4
  180. #define A_OP_ADC 0x5
  181. #define A_OP_SBC 0x6
  182. #define A_OP_RSC 0x7
  183. #define A_OP_TST 0x8
  184. #define A_OP_TEQ 0x9
  185. #define A_OP_CMP 0xa
  186. #define A_OP_CMN 0xb
  187. #define A_OP_ORR 0xc
  188. #define A_OP_MOV 0xd
  189. #define A_OP_BIC 0xe
  190. #define A_OP_MVN 0xf
  191. // operation specific register usage in DOP
  192. #define A_Rn(op,rn) (((op)&0xd)!=0xd ? rn:NO) // no rn for MOV,MVN
  193. #define A_Rd(op,rd) (((op)&0xc)!=0x8 ? rd:NO) // no rd for TST,TEQ,CMP,CMN
  194. // CSPR is dst if S set, CSPR is src if op is ADC/SBC/RSC or shift is RRX
  195. #define A_Sd(s) ((s) ? SR:NO)
  196. #define A_Sr(op,sop) (((op)>=0x5 && (op)<=0x7) || (sop)>>4==A_AM1_ROR<<1 ? SR:NO)
  197. #define EOP_C_DOP_X(cond,op,s,rn,rd,sop,rm,rs) \
  198. EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (sop), \
  199. M2(A_Rd(op,rd),A_Sd(s)), M5(A_Sr(op,sop),A_Rn(op,rn),rm,rs,rs==NO?NO:CYC1))
  200. #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8), NO, NO)
  201. #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm), rm, NO)
  202. #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm), rm, rs)
  203. #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
  204. #define EOP_MVN_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
  205. #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
  206. #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
  207. #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
  208. #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
  209. #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
  210. #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
  211. #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
  212. #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
  213. #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
  214. #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
  215. #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
  216. #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
  217. #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
  218. #define EOP_MVN_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
  219. #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
  220. #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
  221. #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
  222. #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
  223. #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
  224. #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
  225. #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
  226. #define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
  227. #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
  228. #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
  229. #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
  230. #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
  231. #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
  232. #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
  233. #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
  234. #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
  235. #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
  236. #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
  237. #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  238. #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  239. #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  240. #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
  241. #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
  242. #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  243. #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  244. #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  245. #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm)
  246. #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs)
  247. #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs)
  248. #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  249. #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  250. /* addressing mode 2 */
  251. #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
  252. EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  253. ((offset_12) & 0xfff), M1(l?rd:MEM), M3(rn,l?MEM:rd,l?b?CYC2:CYC1:NO))
  254. #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
  255. EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  256. A_AM1_REG_XIMM(shift_imm, shift_op, rm), M1(l?rd:MEM), M4(rn,rm,l?MEM:rd,l?b?CYC2:CYC1:NO))
  257. /* addressing mode 3 */
  258. #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
  259. EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  260. ((s)<<6) | ((h)<<5) | (immed_reg), M1(l?rd:MEM), M4(rn,r?NO:immed_reg,l?MEM:rd,l?CYC2:NO))
  261. #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
  262. #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
  263. /* ldr and str */
  264. #define EOP_LDR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,0,1,rn,rd,abs(offset_12))
  265. #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,1,1,rn,rd,abs(offset_12))
  266. #define EOP_STR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,0,0,rn,rd,abs(offset_12))
  267. #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,(offset_12) >= 0,0,1,rn,rd,abs(offset_12))
  268. #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
  269. #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,(offset_12) >= 0,0,0,rn,rd,abs(offset_12))
  270. #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
  271. #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
  272. #define EOP_LDR_REG_LSL_WB(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,3,rn,rd,shift_imm,A_AM1_LSL,rm)
  273. #define EOP_LDRB_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,1,1,rn,rd,shift_imm,A_AM1_LSL,rm);
  274. #define EOP_STR_REG_LSL_WB(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,2,rn,rd,shift_imm,A_AM1_LSL,rm)
  275. #define EOP_LDRH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,0,1,abs(offset_8))
  276. #define EOP_LDRH_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,0,1,rm)
  277. #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,(offset_8) >= 0,1,rn,rd,0,1,abs(offset_8))
  278. #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
  279. #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
  280. #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,(offset_8) >= 0,0,rn,rd,0,1,abs(offset_8))
  281. #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
  282. #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
  283. #define EOP_LDRSB_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,1,0,abs(offset_8))
  284. #define EOP_LDRSB_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,1,0,rm)
  285. #define EOP_LDRSH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,1,1,abs(offset_8))
  286. #define EOP_LDRSH_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,1,1,rm)
  287. /* ldm and stm */
  288. #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
  289. EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list), \
  290. M2(rn,l?NO:MEM)|(l?list:0), M3(rn,l?MEM:NO,l?CYC2:NO)|(l?0:list))
  291. #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
  292. #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
  293. #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,SP,list)
  294. #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,SP,list)
  295. /* branches */
  296. #define EOP_C_BX(cond,rm) \
  297. EMIT(((cond)<<28) | 0x012fff10 | (rm), M1(PC), M1(rm))
  298. #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
  299. EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
  300. #define EOP_C_B(cond,l,signed_immed_24) \
  301. EMIT(((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24), M2(PC,l?LR:NO), M1(PC))
  302. #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
  303. #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
  304. /* misc */
  305. #define EOP_C_MUL(cond,s,rd,rs,rm) \
  306. EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm), M2(rd,s?SR:NO), M3(rs,rm,CYC2))
  307. #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
  308. EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M3(rs,rm,CYC2))
  309. #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
  310. EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M3(rs,rm,CYC2))
  311. #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
  312. EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M5(rs,rm,rdlo,rdhi,CYC2))
  313. #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
  314. #define EOP_C_MRS(cond,rd) \
  315. EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12), M1(rd), M1(SR))
  316. #define EOP_C_MSR_IMM(cond,ror2,imm) \
  317. EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm), M1(SR), 0) // cpsr_f
  318. #define EOP_C_MSR_REG(cond,rm) \
  319. EMIT(((cond)<<28) | 0x0128f000 | (rm), M1(SR), M1(rm)) // cpsr_f
  320. #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd)
  321. #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
  322. #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
  323. #define EOP_MOVW(rd,imm) \
  324. EMIT(0xe3000000 | ((rd)<<12) | ((imm)&0xfff) | (((imm)<<4)&0xf0000), M1(rd), NO)
  325. #define EOP_MOVT(rd,imm) \
  326. EMIT(0xe3400000 | ((rd)<<12) | (((imm)>>16)&0xfff) | (((imm)>>12)&0xf0000), M1(rd), NO)
  327. // host literal pool; must be significantly smaller than 1024 (max LDR offset = 4096)
  328. #define MAX_HOST_LITERALS 128
  329. static u32 literal_pool[MAX_HOST_LITERALS];
  330. static u32 *literal_insn[MAX_HOST_LITERALS];
  331. static int literal_pindex, literal_iindex;
  332. static int emith_pool_literal(u32 imm, int *offs)
  333. {
  334. int idx = literal_pindex - 8; // max look behind in pool
  335. // see if one of the last literals was the same (or close enough)
  336. for (idx = (idx < 0 ? 0 : idx); idx < literal_pindex; idx++)
  337. if (abs((int)(imm - literal_pool[idx])) <= 0xff)
  338. break;
  339. if (idx == literal_pindex) // store new literal
  340. literal_pool[literal_pindex++] = imm;
  341. *offs = imm - literal_pool[idx];
  342. return idx;
  343. }
  344. // XXX: RSB, *S will break if 1 insn is not enough
  345. static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
  346. {
  347. int ror2;
  348. u32 v;
  349. int i;
  350. if (cond == A_COND_NV)
  351. return;
  352. switch (op) {
  353. case A_OP_MOV:
  354. rn = 0;
  355. // count bits in imm and use MVN if more bits 1 than 0
  356. if (count_bits(imm) > 16) {
  357. imm = ~imm;
  358. op = A_OP_MVN;
  359. }
  360. // count insns needed for mov/orr #imm
  361. for (v = imm, ror2 = 0; (v >> 24) && ror2 < 32/2; ror2++)
  362. v = (v << 2) | (v >> 30);
  363. #ifdef HAVE_ARMV7
  364. for (i = 2; i > 0; i--, v >>= 8)
  365. while (v > 0xff && !(v & 3))
  366. v >>= 2;
  367. if (v) { // 3+ insns needed...
  368. if (op == A_OP_MVN)
  369. imm = ~imm;
  370. // ...prefer movw/movt
  371. EOP_MOVW(rd, imm);
  372. if (imm & 0xffff0000)
  373. EOP_MOVT(rd, imm);
  374. return;
  375. }
  376. #else
  377. for (i = 3; i > 0; i--, v >>= 8)
  378. while (v > 0xff && !(v & 3))
  379. v >>= 2;
  380. if (v) { // 4 insns needed...
  381. if (op == A_OP_MVN)
  382. imm = ~imm;
  383. // ...emit literal load
  384. int idx, o;
  385. if (literal_iindex >= MAX_HOST_LITERALS) {
  386. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY,
  387. "pool overflow");
  388. exit(1);
  389. }
  390. idx = emith_pool_literal(imm, &o);
  391. literal_insn[literal_iindex++] = (u32 *)tcache_ptr;
  392. EOP_LDR_IMM2(cond, rd, PC, idx * sizeof(u32));
  393. if (o > 0)
  394. EOP_C_DOP_IMM(cond, A_OP_ADD, 0, rd, rd, 0, o);
  395. else if (o < 0)
  396. EOP_C_DOP_IMM(cond, A_OP_SUB, 0, rd, rd, 0, -o);
  397. return;
  398. }
  399. #endif
  400. break;
  401. case A_OP_AND:
  402. // AND must fit into 1 insn. if not, use BIC
  403. for (v = imm, ror2 = 0; (v >> 8) && ror2 < 32/2; ror2++)
  404. v = (v << 2) | (v >> 30);
  405. if (v >> 8) {
  406. imm = ~imm;
  407. op = A_OP_BIC;
  408. }
  409. break;
  410. case A_OP_SUB:
  411. case A_OP_ADD:
  412. // count bits in imm and swap ADD and SUB if more bits 1 than 0
  413. if (s == 0 && count_bits(imm) > 16) {
  414. imm = -imm;
  415. op ^= (A_OP_ADD^A_OP_SUB);
  416. }
  417. case A_OP_EOR:
  418. case A_OP_ORR:
  419. case A_OP_BIC:
  420. if (s == 0 && imm == 0 && rd == rn)
  421. return;
  422. break;
  423. }
  424. // try to get the topmost byte empty to possibly save an insn
  425. for (v = imm, ror2 = 0; (v >> 24) && ror2 < 32/2; ror2++)
  426. v = (v << 2) | (v >> 30);
  427. do {
  428. // shift down to get 'best' rot2
  429. while (v > 0xff && !(v & 3))
  430. v >>= 2, ror2--;
  431. EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0xf, v & 0xff);
  432. switch (op) {
  433. case A_OP_MOV: op = A_OP_ORR; break;
  434. case A_OP_MVN: op = A_OP_BIC; break;
  435. case A_OP_ADC: op = A_OP_ADD; break;
  436. case A_OP_SBC: op = A_OP_SUB; break;
  437. }
  438. rn = rd;
  439. v >>= 8, ror2 -= 8/2;
  440. } while (v);
  441. }
  442. #define emith_op_imm(cond, s, op, r, imm) \
  443. emith_op_imm2(cond, s, op, r, r, imm)
  444. // test op
  445. #define emith_top_imm(cond, op, r, imm) do { \
  446. u32 ror2, v; \
  447. for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
  448. ror2--; \
  449. EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
  450. } while (0)
  451. #define is_offset_24(val) \
  452. ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
  453. static int emith_xbranch(int cond, void *target, int is_call)
  454. {
  455. int val = (u32 *)target - (u32 *)tcache_ptr - 2;
  456. int direct = is_offset_24(val);
  457. u32 *start_ptr = (u32 *)tcache_ptr;
  458. if (cond == A_COND_NV)
  459. return 0; // never taken
  460. if (direct)
  461. {
  462. EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target
  463. }
  464. else
  465. {
  466. #ifdef __EPOC32__
  467. // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
  468. if (is_call)
  469. EOP_ADD_IMM(LR,PC,0,8); // add lr,pc,#8
  470. EOP_C_AM2_IMM(cond,1,0,1,PC,PC,0); // ldrcc pc,[pc]
  471. EOP_MOV_REG_SIMPLE(PC,PC); // mov pc, pc
  472. EMIT((u32)target,M1(PC),0);
  473. #else
  474. // should never happen
  475. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %8p->%8p", target, tcache_ptr);
  476. exit(1);
  477. #endif
  478. }
  479. return (u32 *)tcache_ptr - start_ptr;
  480. }
  481. static void emith_pool_commit(int jumpover)
  482. {
  483. int i, sz = literal_pindex * sizeof(u32);
  484. u8 *pool = (u8 *)tcache_ptr;
  485. // nothing to commit if pool is empty
  486. if (sz == 0)
  487. return;
  488. // need branch over pool if not at block end
  489. if (jumpover) {
  490. pool += sizeof(u32);
  491. emith_xbranch(A_COND_AL, (u8 *)pool + sz, 0);
  492. }
  493. emith_flush();
  494. // safety check - pool must be after insns and reachable
  495. if ((u32)(pool - (u8 *)literal_insn[0] + 8) > 0xfff) {
  496. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY,
  497. "pool offset out of range");
  498. exit(1);
  499. }
  500. // copy pool and adjust addresses in insns accessing the pool
  501. memcpy(pool, literal_pool, sz);
  502. for (i = 0; i < literal_iindex; i++) {
  503. *literal_insn[i] += (u8 *)pool - ((u8 *)literal_insn[i] + 8);
  504. }
  505. // count pool constants as insns for statistics
  506. for (i = 0; i < literal_pindex; i++)
  507. COUNT_OP;
  508. tcache_ptr = (void *)((u8 *)pool + sz);
  509. literal_pindex = literal_iindex = 0;
  510. }
  511. static inline void emith_pool_check(void)
  512. {
  513. // check if pool must be committed
  514. if (literal_iindex > MAX_HOST_LITERALS-4 ||
  515. (u8 *)tcache_ptr - (u8 *)literal_insn[0] > 0xe00)
  516. // pool full, or displacement is approaching the limit
  517. emith_pool_commit(1);
  518. }
  519. static inline int emith_pool_index(int tcache_offs)
  520. {
  521. u32 *ptr = (u32 *)tcache_ptr - tcache_offs;
  522. int i;
  523. for (i = literal_iindex-1; i >= 0 && literal_insn[i] >= ptr; i--)
  524. if (literal_insn[i] == ptr)
  525. return i;
  526. return -1;
  527. }
  528. static inline void emith_pool_adjust(int pool_index, int move_offs)
  529. {
  530. if (pool_index >= 0)
  531. literal_insn[pool_index] += move_offs;
  532. }
  533. #define JMP_POS(ptr) \
  534. ptr = tcache_ptr; \
  535. EMIT(0,M1(PC),0);
  536. #define JMP_EMIT(cond, ptr) { \
  537. u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
  538. emith_flush(); \
  539. EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
  540. }
  541. #define EMITH_JMP_START(cond) { \
  542. void *cond_ptr; \
  543. JMP_POS(cond_ptr)
  544. #define EMITH_JMP_END(cond) \
  545. JMP_EMIT(cond, cond_ptr); \
  546. }
  547. // fake "simple" or "short" jump - using cond insns instead
  548. #define EMITH_NOTHING1(cond) \
  549. (void)(cond)
  550. #define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond)
  551. #define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond)
  552. #define EMITH_SJMP2_START(cond) EMITH_NOTHING1(cond)
  553. #define EMITH_SJMP2_MID(cond) EMITH_JMP_START((cond)^1) // inverse cond
  554. #define EMITH_SJMP2_END(cond) EMITH_JMP_END((cond)^1)
  555. #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
  556. #define EMITH_SJMP3_MID(cond) EMITH_NOTHING1(cond)
  557. #define EMITH_SJMP3_END()
  558. #define emith_move_r_r_c(cond, d, s) \
  559. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,0)
  560. #define emith_move_r_r(d, s) \
  561. emith_move_r_r_c(A_COND_AL, d, s)
  562. #define emith_move_r_r_ptr_c(cond, d, s) \
  563. emith_move_r_r_c(cond, d, s)
  564. #define emith_move_r_r_ptr(d, s) \
  565. emith_move_r_r(d, s)
  566. #define emith_mvn_r_r(d, s) \
  567. EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
  568. #define emith_add_r_r_r_lsl(d, s1, s2, lslimm) \
  569. EOP_ADD_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  570. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, lslimm) \
  571. emith_add_r_r_r_lsl(d, s1, s2, lslimm)
  572. #define emith_addf_r_r_r_lsl(d, s1, s2, lslimm) \
  573. EOP_ADD_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  574. #define emith_addf_r_r_r_lsr(d, s1, s2, lslimm) \
  575. EOP_ADD_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSR,lslimm)
  576. #define emith_adcf_r_r_r_lsl(d, s1, s2, lslimm) \
  577. EOP_ADC_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  578. #define emith_sub_r_r_r_lsl(d, s1, s2, lslimm) \
  579. EOP_SUB_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  580. #define emith_subf_r_r_r_lsl(d, s1, s2, lslimm) \
  581. EOP_SUB_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  582. #define emith_sbcf_r_r_r_lsl(d, s1, s2, lslimm) \
  583. EOP_SBC_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  584. #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
  585. EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  586. #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
  587. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  588. #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
  589. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
  590. #define emith_and_r_r_r_lsl(d, s1, s2, lslimm) \
  591. EOP_AND_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  592. #define emith_or_r_r_lsl(d, s, lslimm) \
  593. emith_or_r_r_r_lsl(d, d, s, lslimm)
  594. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  595. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  596. #define emith_add_r_r_r(d, s1, s2) \
  597. emith_add_r_r_r_lsl(d, s1, s2, 0)
  598. #define emith_addf_r_r_r(d, s1, s2) \
  599. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  600. #define emith_adcf_r_r_r(d, s1, s2) \
  601. emith_adcf_r_r_r_lsl(d, s1, s2, 0)
  602. #define emith_sub_r_r_r(d, s1, s2) \
  603. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  604. #define emith_subf_r_r_r(d, s1, s2) \
  605. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  606. #define emith_sbcf_r_r_r(d, s1, s2) \
  607. emith_sbcf_r_r_r_lsl(d, s1, s2, 0)
  608. #define emith_or_r_r_r(d, s1, s2) \
  609. emith_or_r_r_r_lsl(d, s1, s2, 0)
  610. #define emith_eor_r_r_r(d, s1, s2) \
  611. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  612. #define emith_and_r_r_r(d, s1, s2) \
  613. emith_and_r_r_r_lsl(d, s1, s2, 0)
  614. #define emith_add_r_r(d, s) \
  615. emith_add_r_r_r(d, d, s)
  616. #define emith_add_r_r_ptr(d, s) \
  617. emith_add_r_r_r(d, d, s)
  618. #define emith_sub_r_r(d, s) \
  619. emith_sub_r_r_r(d, d, s)
  620. #define emith_adc_r_r(d, s) \
  621. EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  622. #define emith_and_r_r_c(cond, d, s) \
  623. EOP_AND_REG(cond,0,d,d,s,A_AM1_LSL,0)
  624. #define emith_and_r_r(d, s) \
  625. EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  626. #define emith_or_r_r(d, s) \
  627. emith_or_r_r_r(d, d, s)
  628. #define emith_eor_r_r(d, s) \
  629. emith_eor_r_r_r(d, d, s)
  630. #define emith_tst_r_r(d, s) \
  631. EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  632. #define emith_tst_r_r_ptr(d, s) \
  633. emith_tst_r_r(d, s)
  634. #define emith_teq_r_r(d, s) \
  635. EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  636. #define emith_cmp_r_r(d, s) \
  637. EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  638. #define emith_addf_r_r(d, s) \
  639. EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  640. #define emith_subf_r_r(d, s) \
  641. EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  642. #define emith_adcf_r_r(d, s) \
  643. EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  644. #define emith_sbcf_r_r(d, s) \
  645. EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  646. #define emith_eorf_r_r(d, s) \
  647. EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  648. #define emith_move_r_imm(r, imm) \
  649. emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
  650. #define emith_move_r_ptr_imm(r, imm) \
  651. emith_move_r_imm(r, (u32)(imm))
  652. #define emith_add_r_imm(r, imm) \
  653. emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
  654. #define emith_adc_r_imm(r, imm) \
  655. emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
  656. #define emith_adcf_r_imm(r, imm) \
  657. emith_op_imm(A_COND_AL, 1, A_OP_ADC, r, imm)
  658. #define emith_sub_r_imm(r, imm) \
  659. emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
  660. #define emith_bic_r_imm(r, imm) \
  661. emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
  662. #define emith_and_r_imm(r, imm) \
  663. emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
  664. #define emith_or_r_imm(r, imm) \
  665. emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
  666. #define emith_eor_r_imm(r, imm) \
  667. emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
  668. #define emith_eor_r_imm_ptr(r, imm) \
  669. emith_eor_r_imm(r, imm)
  670. // note: only use 8bit imm for these
  671. #define emith_tst_r_imm(r, imm) \
  672. emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
  673. #define emith_cmp_r_imm(r, imm) do { \
  674. u32 op_ = A_OP_CMP, imm_ = (u8)imm; \
  675. if ((s8)imm_ < 0) { \
  676. imm_ = (u8)-imm_; \
  677. op_ = A_OP_CMN; \
  678. } \
  679. emith_top_imm(A_COND_AL, op_, r, imm_); \
  680. } while (0)
  681. #define emith_subf_r_imm(r, imm) \
  682. emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
  683. #define emith_move_r_imm_c(cond, r, imm) \
  684. emith_op_imm(cond, 0, A_OP_MOV, r, imm)
  685. #define emith_add_r_imm_c(cond, r, imm) \
  686. emith_op_imm(cond, 0, A_OP_ADD, r, imm)
  687. #define emith_sub_r_imm_c(cond, r, imm) \
  688. emith_op_imm(cond, 0, A_OP_SUB, r, imm)
  689. #define emith_or_r_imm_c(cond, r, imm) \
  690. emith_op_imm(cond, 0, A_OP_ORR, r, imm)
  691. #define emith_eor_r_imm_c(cond, r, imm) \
  692. emith_op_imm(cond, 0, A_OP_EOR, r, imm)
  693. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  694. emith_eor_r_imm_c(cond, r, imm)
  695. #define emith_bic_r_imm_c(cond, r, imm) \
  696. emith_op_imm(cond, 0, A_OP_BIC, r, imm)
  697. #define emith_tst_r_imm_c(cond, r, imm) \
  698. emith_top_imm(cond, A_OP_TST, r, imm)
  699. #define emith_move_r_imm_s8(r, imm) do { \
  700. if ((s8)(imm) < 0) \
  701. EOP_MVN_IMM(r, 0, ((u8)(imm) ^ 0xff)); \
  702. else \
  703. EOP_MOV_IMM(r, 0, (u8)imm); \
  704. } while (0)
  705. #define emith_and_r_r_imm(d, s, imm) \
  706. emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
  707. #define emith_add_r_r_imm(d, s, imm) \
  708. emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
  709. #define emith_add_r_r_ptr_imm(d, s, imm) \
  710. emith_add_r_r_imm(d, s, imm)
  711. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  712. emith_op_imm2(cond, 0, A_OP_SUB, d, s, (imm))
  713. #define emith_sub_r_r_imm(d, s, imm) \
  714. emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
  715. #define emith_subf_r_r_imm(d, s, imm) \
  716. emith_op_imm2(A_COND_AL, 1, A_OP_SUB, d, s, imm)
  717. #define emith_or_r_r_imm(d, s, imm) \
  718. emith_op_imm2(A_COND_AL, 0, A_OP_ORR, d, s, imm)
  719. #define emith_eor_r_r_imm(d, s, imm) \
  720. emith_op_imm2(A_COND_AL, 0, A_OP_EOR, d, s, imm)
  721. #define emith_neg_r_r(d, s) \
  722. EOP_RSB_IMM(d, s, 0, 0)
  723. #define emith_lsl(d, s, cnt) \
  724. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
  725. #define emith_lsr(d, s, cnt) \
  726. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
  727. #define emith_asr(d, s, cnt) \
  728. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
  729. #define emith_ror_c(cond, d, s, cnt) \
  730. EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
  731. #define emith_ror(d, s, cnt) \
  732. emith_ror_c(A_COND_AL, d, s, cnt)
  733. #define emith_rol(d, s, cnt) \
  734. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
  735. #define emith_lslf(d, s, cnt) \
  736. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
  737. #define emith_lsrf(d, s, cnt) \
  738. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
  739. #define emith_asrf(d, s, cnt) \
  740. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
  741. // note: only C flag updated correctly
  742. #define emith_rolf(d, s, cnt) do { \
  743. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
  744. /* we don't have ROL so we shift to get the right carry */ \
  745. EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
  746. } while (0)
  747. #define emith_rorf(d, s, cnt) \
  748. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
  749. #define emith_rolcf(d) \
  750. emith_adcf_r_r(d, d)
  751. #define emith_rorcf(d) \
  752. EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
  753. #define emith_negcf_r_r(d, s) \
  754. EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
  755. #define emith_mul(d, s1, s2) do { \
  756. if ((d) != (s1)) /* rd != rm limitation */ \
  757. EOP_MUL(d, s1, s2); \
  758. else \
  759. EOP_MUL(d, s2, s1); \
  760. } while (0)
  761. #define emith_mul_u64(dlo, dhi, s1, s2) \
  762. EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  763. #define emith_mul_s64(dlo, dhi, s1, s2) \
  764. EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  765. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  766. EOP_C_SMLAL(cond,0,dhi,dlo,s1,s2)
  767. #define emith_mula_s64(dlo, dhi, s1, s2) \
  768. EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
  769. // misc
  770. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  771. EOP_LDR_IMM2(cond, r, rs, offs)
  772. #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
  773. emith_read_r_r_offs_c(cond, r, rs, offs)
  774. #define emith_read_r_r_r_c(cond, r, rs, rm) \
  775. EOP_LDR_REG_LSL(cond, r, rs, rm, 0)
  776. #define emith_read_r_r_offs(r, rs, offs) \
  777. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  778. #define emith_read_r_r_offs_ptr(r, rs, offs) \
  779. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  780. #define emith_read_r_r_r(r, rs, rm) \
  781. EOP_LDR_REG_LSL(A_COND_AL, r, rs, rm, 0)
  782. #define emith_read_r_r_r_wb(r, rs, rm) \
  783. EOP_LDR_REG_LSL_WB(A_COND_AL, r, rs, rm, 0)
  784. #define emith_read_r_r_r_ptr_wb(r, rs, rm) \
  785. emith_read_r_r_r_wb(r, rs, rm)
  786. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  787. EOP_LDRB_IMM2(cond, r, rs, offs)
  788. #define emith_read8_r_r_r_c(cond, r, rs, rm) \
  789. EOP_LDRB_REG_LSL(cond, r, rs, rm, 0)
  790. #define emith_read8_r_r_offs(r, rs, offs) \
  791. emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
  792. #define emith_read8_r_r_r(r, rs, rm) \
  793. emith_read8_r_r_r_c(A_COND_AL, r, rs, rm)
  794. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  795. EOP_LDRH_IMM2(cond, r, rs, offs)
  796. #define emith_read16_r_r_r_c(cond, r, rs, rm) \
  797. EOP_LDRH_REG2(cond, r, rs, rm)
  798. #define emith_read16_r_r_offs(r, rs, offs) \
  799. emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
  800. #define emith_read16_r_r_r(r, rs, rm) \
  801. emith_read16_r_r_r_c(A_COND_AL, r, rs, rm)
  802. #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
  803. EOP_LDRSB_IMM2(cond, r, rs, offs)
  804. #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
  805. EOP_LDRSB_REG2(cond, r, rs, rm)
  806. #define emith_read8s_r_r_offs(r, rs, offs) \
  807. emith_read8s_r_r_offs_c(A_COND_AL, r, rs, offs)
  808. #define emith_read8s_r_r_r(r, rs, rm) \
  809. emith_read8s_r_r_r_c(A_COND_AL, r, rs, rm)
  810. #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
  811. EOP_LDRSH_IMM2(cond, r, rs, offs)
  812. #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
  813. EOP_LDRSH_REG2(cond, r, rs, rm)
  814. #define emith_read16s_r_r_offs(r, rs, offs) \
  815. emith_read16s_r_r_offs_c(A_COND_AL, r, rs, offs)
  816. #define emith_read16s_r_r_r(r, rs, rm) \
  817. emith_read16s_r_r_r_c(A_COND_AL, r, rs, rm)
  818. #define emith_write_r_r_offs_c(cond, r, rs, offs) \
  819. EOP_STR_IMM2(cond, r, rs, offs)
  820. #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
  821. emith_write_r_r_offs_c(cond, r, rs, offs)
  822. #define emith_write_r_r_offs(r, rs, offs) \
  823. emith_write_r_r_offs_c(A_COND_AL, r, rs, offs)
  824. #define emith_write_r_r_offs_ptr(r, rs, offs) \
  825. emith_write_r_r_offs_c(A_COND_AL, r, rs, offs)
  826. #define emith_write_r_r_r_wb(r, rs, rm) \
  827. EOP_STR_REG_LSL_WB(A_COND_AL, r, rs, rm, 0)
  828. #define emith_write_r_r_r_ptr_wb(r, rs, rm) \
  829. emith_write_r_r_r_wb(r, rs, rm)
  830. #define emith_ctx_read_c(cond, r, offs) \
  831. emith_read_r_r_offs_c(cond, r, CONTEXT_REG, offs)
  832. #define emith_ctx_read(r, offs) \
  833. emith_ctx_read_c(A_COND_AL, r, offs)
  834. #define emith_ctx_read_ptr(r, offs) \
  835. emith_ctx_read(r, offs)
  836. #define emith_ctx_write(r, offs) \
  837. EOP_STR_IMM(r, CONTEXT_REG, offs)
  838. #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
  839. int v_, r_ = r, c_ = count, b_ = CONTEXT_REG; \
  840. for (v_ = 0; c_; c_--, r_++) \
  841. v_ |= M1(r_); \
  842. if ((offs) != 0) { \
  843. EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
  844. b_ = tmpr; \
  845. } \
  846. op(b_,v_); \
  847. } while (0)
  848. #define emith_ctx_read_multiple(r, offs, count, tmpr) \
  849. emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
  850. #define emith_ctx_write_multiple(r, offs, count, tmpr) \
  851. emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
  852. #define emith_clear_msb_c(cond, d, s, count) do { \
  853. u32 t; \
  854. if ((count) <= 8) { \
  855. t = 8 - (count); \
  856. t = (0xff << t) & 0xff; \
  857. EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
  858. } else if ((count) >= 24) { \
  859. t = (count) - 24; \
  860. t = 0xff >> t; \
  861. EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
  862. } else { \
  863. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
  864. EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
  865. } \
  866. } while (0)
  867. #define emith_clear_msb(d, s, count) \
  868. emith_clear_msb_c(A_COND_AL, d, s, count)
  869. #define emith_sext(d, s, bits) do { \
  870. EOP_MOV_REG_LSL(d,s,32 - (bits)); \
  871. EOP_MOV_REG_ASR(d,d,32 - (bits)); \
  872. } while (0)
  873. #define emith_do_caller_regs(mask, func) do { \
  874. u32 _reg_mask = (mask) & 0x500f; \
  875. if (_reg_mask) { \
  876. if (__builtin_parity(_reg_mask) == 1) \
  877. _reg_mask |= 0x10; /* eabi align */ \
  878. func(_reg_mask); \
  879. } \
  880. } while (0)
  881. #define emith_save_caller_regs(mask) \
  882. emith_do_caller_regs(mask, EOP_STMFD_SP)
  883. #define emith_restore_caller_regs(mask) \
  884. emith_do_caller_regs(mask, EOP_LDMFD_SP)
  885. // upto 4 args
  886. #define emith_pass_arg_r(arg, reg) \
  887. EOP_MOV_REG_SIMPLE(arg, reg)
  888. #define emith_pass_arg_imm(arg, imm) \
  889. emith_move_r_imm(arg, imm)
  890. #define emith_jump(target) \
  891. emith_jump_cond(A_COND_AL, target)
  892. #define emith_jump_patchable(target) \
  893. emith_jump(target)
  894. #define emith_jump_cond(cond, target) \
  895. emith_xbranch(cond, target, 0)
  896. #define emith_jump_cond_patchable(cond, target) \
  897. emith_jump_cond(cond, target)
  898. #define emith_jump_patch(ptr, target) ({ \
  899. u32 *ptr_ = ptr; \
  900. u32 val_ = (u32 *)(target) - ptr_ - 2; \
  901. *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
  902. (u8 *)ptr; \
  903. })
  904. #define emith_jump_patch_size() 4
  905. #define emith_jump_at(ptr, target) do { \
  906. u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
  907. emith_flush(); \
  908. EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
  909. } while (0)
  910. #define emith_jump_reg_c(cond, r) \
  911. EOP_C_BX(cond, r)
  912. #define emith_jump_reg(r) \
  913. emith_jump_reg_c(A_COND_AL, r)
  914. #define emith_jump_ctx_c(cond, offs) \
  915. EOP_LDR_IMM2(cond,PC,CONTEXT_REG,offs)
  916. #define emith_jump_ctx(offs) \
  917. emith_jump_ctx_c(A_COND_AL, offs)
  918. #define emith_call_cond(cond, target) \
  919. emith_xbranch(cond, target, 1)
  920. #define emith_call(target) \
  921. emith_call_cond(A_COND_AL, target)
  922. #define emith_call_reg(r) do { \
  923. emith_move_r_r(LR, PC); \
  924. EOP_C_BX(A_COND_AL, r); \
  925. } while (0)
  926. #define emith_call_ctx(offs) do { \
  927. emith_move_r_r(LR, PC); \
  928. emith_jump_ctx(offs); \
  929. } while (0)
  930. #define emith_call_link(r, target) do { \
  931. emith_move_r_r(r, PC); \
  932. emith_jump(target); \
  933. } while (0)
  934. #define emith_call_cleanup() /**/
  935. #define emith_ret_c(cond) \
  936. emith_jump_reg_c(cond, LR)
  937. #define emith_ret() \
  938. emith_ret_c(A_COND_AL)
  939. #define emith_ret_to_ctx(offs) \
  940. emith_ctx_write(LR, offs)
  941. /* pushes r12 for eabi alignment */
  942. #define emith_push_ret(r) do { \
  943. int r_ = (r >= 0 ? r : 12); \
  944. EOP_STMFD_SP(M2(r_,LR)); \
  945. } while (0)
  946. #define emith_pop_and_ret(r) do { \
  947. int r_ = (r >= 0 ? r : 12); \
  948. EOP_LDMFD_SP(M2(r_,PC)); \
  949. } while (0)
  950. #define host_instructions_updated(base, end) \
  951. cache_flush_d_inval_i(base, end)
  952. #define host_arg2reg(rd, arg) \
  953. rd = arg
  954. #define emith_rw_offs_max() 0xff
  955. /* SH2 drc specific */
  956. /* pushes r12 for eabi alignment */
  957. #define emith_sh2_drc_entry() \
  958. EOP_STMFD_SP(M10(4,5,6,7,8,9,10,11,12,LR))
  959. #define emith_sh2_drc_exit() \
  960. EOP_LDMFD_SP(M10(4,5,6,7,8,9,10,11,12,PC))
  961. // assumes a is in arg0, tab, func and mask are temp
  962. #define emith_sh2_rcall(a, tab, func, mask) do { \
  963. emith_lsr(mask, a, SH2_READ_SHIFT); \
  964. EOP_ADD_REG_LSL(tab, tab, mask, 3); \
  965. if (func < mask) EOP_LDMIA(tab, M2(func,mask)); /* ldm if possible */ \
  966. else { emith_read_r_r_offs(func, tab, 0); \
  967. emith_read_r_r_offs(mask, tab, 4); } \
  968. emith_addf_r_r_r(func,func,func); \
  969. } while (0)
  970. // assumes a, val are in arg0 and arg1, tab and func are temp
  971. #define emith_sh2_wcall(a, val, tab, func) do { \
  972. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  973. EOP_LDR_REG_LSL(A_COND_AL,func,tab,func,2); \
  974. emith_move_r_r(2, CONTEXT_REG); /* arg2 */ \
  975. emith_jump_reg(func); \
  976. } while (0)
  977. #define emith_sh2_dtbf_loop() do { \
  978. int cr, rn; \
  979. int tmp_ = rcache_get_tmp(); \
  980. cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
  981. rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \
  982. emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \
  983. emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \
  984. emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
  985. cycles = 0; \
  986. emith_asrf(tmp_, cr, 2+12); /* movs tmp_, cr, asr #2+12 */\
  987. EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0); /* movmi tmp_, #0 */ \
  988. emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \
  989. emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \
  990. emith_subf_r_r(rn, tmp_); /* subs rn, tmp_ */ \
  991. EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0); /* rsbls tmp_, rn, #0 */ \
  992. EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
  993. EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \
  994. EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \
  995. rcache_free_tmp(tmp_); \
  996. } while (0)
  997. #define emith_sh2_delay_loop(cycles, reg) do { \
  998. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  999. int t1 = rcache_get_tmp(); \
  1000. int t2 = rcache_get_tmp(); \
  1001. int t3 = rcache_get_tmp(); \
  1002. /* if (sr < 0) return */ \
  1003. emith_asrf(t2, sr, 12); \
  1004. EMITH_JMP_START(DCOND_LE); \
  1005. /* turns = sr.cycles / cycles */ \
  1006. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  1007. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  1008. rcache_free_tmp(t3); \
  1009. if (reg >= 0) { \
  1010. /* if (reg <= turns) turns = reg-1 */ \
  1011. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  1012. emith_cmp_r_r(t3, t2); \
  1013. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  1014. /* if (reg <= 1) turns = 0 */ \
  1015. emith_cmp_r_imm(t3, 1); \
  1016. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  1017. /* reg -= turns */ \
  1018. emith_sub_r_r(t3, t2); \
  1019. } \
  1020. /* sr.cycles -= turns * cycles; */ \
  1021. emith_move_r_imm(t1, cycles); \
  1022. emith_mul(t1, t2, t1); \
  1023. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  1024. EMITH_JMP_END(DCOND_LE); \
  1025. rcache_free_tmp(t1); \
  1026. rcache_free_tmp(t2); \
  1027. } while (0)
  1028. #define emith_write_sr(sr, srcr) do { \
  1029. emith_lsr(sr, sr, 10); \
  1030. emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
  1031. emith_ror(sr, sr, 22); \
  1032. } while (0)
  1033. #define emith_carry_to_t(srr, is_sub) do { \
  1034. if (is_sub) { /* has inverted C on ARM */ \
  1035. emith_or_r_imm_c(A_COND_CC, srr, 1); \
  1036. emith_bic_r_imm_c(A_COND_CS, srr, 1); \
  1037. } else { \
  1038. emith_or_r_imm_c(A_COND_CS, srr, 1); \
  1039. emith_bic_r_imm_c(A_COND_CC, srr, 1); \
  1040. } \
  1041. } while (0)
  1042. #define emith_tpop_carry(sr, is_sub) do { \
  1043. if (is_sub) \
  1044. emith_eor_r_imm(sr, 1); \
  1045. emith_lsrf(sr, sr, 1); \
  1046. } while (0)
  1047. #define emith_tpush_carry(sr, is_sub) do { \
  1048. emith_adc_r_r(sr, sr); \
  1049. if (is_sub) \
  1050. emith_eor_r_imm(sr, 1); \
  1051. } while (0)
  1052. /*
  1053. * if Q
  1054. * t = carry(Rn += Rm)
  1055. * else
  1056. * t = carry(Rn -= Rm)
  1057. * T ^= t
  1058. */
  1059. #define emith_sh2_div1_step(rn, rm, sr) do { \
  1060. void *jmp0, *jmp1; \
  1061. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  1062. JMP_POS(jmp0); /* beq do_sub */ \
  1063. emith_addf_r_r(rn, rm); \
  1064. emith_eor_r_imm_c(A_COND_CS, sr, T); \
  1065. JMP_POS(jmp1); /* b done */ \
  1066. JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */ \
  1067. emith_subf_r_r(rn, rm); \
  1068. emith_eor_r_imm_c(A_COND_CC, sr, T); \
  1069. JMP_EMIT(A_COND_AL, jmp1); /* done: */ \
  1070. } while (0)
  1071. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1072. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1073. emith_tst_r_imm(sr, S); \
  1074. EMITH_SJMP2_START(DCOND_NE); \
  1075. emith_mula_s64_c(DCOND_EQ, ml, mh, rn, rm); \
  1076. EMITH_SJMP2_MID(DCOND_NE); \
  1077. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1078. emith_sext(mh, mh, 16); \
  1079. emith_mula_s64(ml, mh, rn, rm); \
  1080. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1081. /* to check: add MACH[15] to MACH[31:16]. this is 0 if no overflow */ \
  1082. emith_asrf(rn, mh, 16); /* sum = (MACH>>16) + ((MACH>>15)&1) */ \
  1083. emith_adcf_r_imm(rn, 0); /* (MACH>>15) is in carry after shift */ \
  1084. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1085. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1086. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1087. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> +ovl */ \
  1088. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0xffffffff */ \
  1089. emith_sub_r_imm_c(DCOND_GT, mh, 1); /* 0x00007fff */ \
  1090. EMITH_SJMP_END(DCOND_LE); \
  1091. EMITH_SJMP_END(DCOND_EQ); \
  1092. EMITH_SJMP2_END(DCOND_NE); \
  1093. } while (0)
  1094. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1095. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1096. emith_tst_r_imm(sr, S); \
  1097. EMITH_SJMP2_START(DCOND_NE); \
  1098. emith_mula_s64_c(DCOND_EQ, ml, mh, rn, rm); \
  1099. EMITH_SJMP2_MID(DCOND_NE); \
  1100. /* XXX: MACH should be untouched when S is set? */ \
  1101. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1102. emith_mula_s64(ml, mh, rn, rm); \
  1103. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1104. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1105. emith_addf_r_r_r_lsr(mh, mh, ml, 31); /* sum = MACH + ((MACL>>31)&1) */\
  1106. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1107. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1108. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1109. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1110. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> positive ovrfl */ \
  1111. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0x7fffffff */ \
  1112. EMITH_SJMP_END(DCOND_LE); \
  1113. EMITH_SJMP_END(DCOND_EQ); \
  1114. EMITH_SJMP2_END(DCOND_NE); \
  1115. } while (0)
  1116. #ifdef T
  1117. // T bit handling
  1118. static int tcond = -1;
  1119. #define emith_invert_cond(cond) \
  1120. ((cond) ^ 1)
  1121. #define emith_clr_t_cond(sr) \
  1122. (void)sr
  1123. #define emith_set_t_cond(sr, cond) \
  1124. tcond = cond
  1125. #define emith_get_t_cond() \
  1126. tcond
  1127. #define emith_invalidate_t() \
  1128. tcond = -1
  1129. #define emith_set_t(sr, val) \
  1130. tcond = ((val) ? A_COND_AL: A_COND_NV)
  1131. static void emith_sync_t(int sr)
  1132. {
  1133. if (tcond == A_COND_AL)
  1134. emith_or_r_imm(sr, T);
  1135. else if (tcond == A_COND_NV)
  1136. emith_bic_r_imm(sr, T);
  1137. else if (tcond >= 0) {
  1138. emith_bic_r_imm_c(emith_invert_cond(tcond),sr, T);
  1139. emith_or_r_imm_c(tcond, sr, T);
  1140. }
  1141. tcond = -1;
  1142. }
  1143. static int emith_tst_t(int sr, int tf)
  1144. {
  1145. if (tcond < 0) {
  1146. emith_tst_r_imm(sr, T);
  1147. return tf ? DCOND_NE: DCOND_EQ;
  1148. } else if (tcond >= A_COND_AL) {
  1149. // MUST sync because A_COND_NV isn't a real condition
  1150. emith_sync_t(sr);
  1151. emith_tst_r_imm(sr, T);
  1152. return tf ? DCOND_NE: DCOND_EQ;
  1153. } else
  1154. return tf ? tcond : emith_invert_cond(tcond);
  1155. }
  1156. #endif