emit_arm.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402
  1. /*
  2. * Basic macros to emit ARM instructions and some utils
  3. * Copyright (C) 2008,2009,2010 notaz
  4. * Copyright (C) 2019 kub
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. */
  9. #define HOST_REGS 16
  10. #define CONTEXT_REG 11
  11. #define RET_REG 0
  12. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  13. #define EMIT_PTR(ptr, x) \
  14. do { \
  15. *(u32 *)ptr = x; \
  16. ptr = (void *)((u8 *)ptr + sizeof(u32)); \
  17. } while (0)
  18. // ARM special registers and peephole optimization flags
  19. #define SP 13 // stack pointer
  20. #define LR 14 // link (return address)
  21. #define PC 15 // program counter
  22. #define SR 16 // CPSR, status register
  23. #define MEM 17 // memory access (src=LDR, dst=STR)
  24. #define CYC1 20 // 1 cycle interlock (LDR, reg-cntrld shift)
  25. #define CYC2 21 // 2+ cycles interlock (LDR[BH], MUL/MLA etc)
  26. #define SWAP 31 // swapped
  27. #define NO 32 // token for "no register"
  28. // bitmask builders
  29. #define M1(x) (u32)(1ULL<<(x)) // u32 to have NO evaluate to 0
  30. #define M2(x,y) (M1(x)|M1(y))
  31. #define M3(x,y,z) (M2(x,y)|M1(z))
  32. #define M4(x,y,z,a) (M3(x,y,z)|M1(a))
  33. #define M5(x,y,z,a,b) (M4(x,y,z,a)|M1(b))
  34. #define M10(a,b,c,d,e,f,g,h,i,j) (M5(a,b,c,d,e)|M5(f,g,h,i,j))
  35. // peephole optimizer. ATM only tries to reduce interlock
  36. #define EMIT_CACHE_SIZE 3
  37. struct emit_op {
  38. u32 op;
  39. u32 src, dst;
  40. };
  41. // peephole cache, last commited insn + cache + next insn + empty insn = size+3
  42. static struct emit_op emit_cache[EMIT_CACHE_SIZE+3];
  43. static int emit_index;
  44. #define emith_insn_ptr() (u8 *)((u32 *)tcache_ptr-emit_index)
  45. static int emith_pool_index(int tcache_offs);
  46. static void emith_pool_adjust(int pool_index, int move_offs);
  47. static NOINLINE void EMIT(u32 op, u32 dst, u32 src)
  48. {
  49. void *emit_ptr = (u32 *)tcache_ptr - emit_index;
  50. int i;
  51. EMIT_PTR(tcache_ptr, op); // emit to keep tcache_ptr current
  52. COUNT_OP;
  53. // for conditional execution SR is always source
  54. if (op < 0xe0000000 /*A_COND_AL << 28*/)
  55. src |= M1(SR);
  56. // put insn on back of queue
  57. emit_cache[emit_index+1].op = op;
  58. emit_cache[emit_index+1].src = src & ~M1(NO); // mask away the NO token
  59. emit_cache[emit_index+1].dst = dst & ~M1(NO);
  60. // move insn down in the queue as long as permitted by dependencies
  61. for (i = emit_index-1; i > 0; i--) {
  62. struct emit_op *ptr = &emit_cache[i];
  63. int deps = 0;
  64. // never swap branch insns (changes semantics)
  65. if ((ptr[0].dst | ptr[1].dst) & M1(PC))
  66. continue;
  67. // dst deps between 0 and 1 must not be swapped, since any deps
  68. // but [0].src & [1].src lead to changed semantics if swapped.
  69. if ((ptr[0].dst & ptr[1].src) || (ptr[1].dst & ptr[0].src) ||
  70. (ptr[0].dst & ptr[1].dst))
  71. continue;
  72. #if 1
  73. // just move loads as far up as possible
  74. deps -= !!(ptr[1].src & M1(MEM));
  75. deps += !!(ptr[0].src & M1(MEM));
  76. #elif 0
  77. // treat all dest->src deps as a potential interlock
  78. #define DEP_INSN(x,y) !!(ptr[x].dst & ptr[y].src)
  79. // insn sequence: -1, 0, 1, 2
  80. deps -= DEP_INSN(1,2) + DEP_INSN(-1,0);
  81. deps -= !!(ptr[1].src & M1(MEM)); // favour moving LDR's down
  82. // insn sequence: -1, 1, 0, 2
  83. deps += DEP_INSN(0,2) + DEP_INSN(-1,1);
  84. deps += !!(ptr[0].src & M1(SWAP)); // penalise if swapped
  85. #else
  86. // calculate ARM920T interlock cycles
  87. #define DEP_CYC1(x,y) ((ptr[x].dst & ptr[y].src)&&(ptr[x].src & M1(CYC1)))
  88. #define DEP_CYC2(x,y) ((ptr[x].dst & ptr[y].src)&&(ptr[x].src & M1(CYC2)))
  89. #define DEP_INSN(x,y,z) DEP_CYC1(x,y)+DEP_CYC1(y,z)+2*DEP_CYC2(x,y)+DEP_CYC2(x,z)
  90. // insn sequence: -1, 0, 1, 2
  91. deps -= DEP_INSN(0,1,2) + DEP_INSN(-1,0,1);
  92. deps -= !!(ptr[1].src & M1(MEM)); // favour moving LDR's down
  93. // insn sequence: -1, 1, 0, 2
  94. deps += DEP_INSN(0,2,1) + DEP_INSN(-1,1,0);
  95. deps += !!(ptr[0].src & M1(SWAP)); // penalise multiple swaps
  96. #endif
  97. // swap if fewer depencies
  98. if (deps < 0) {
  99. // swap insn reading PC only if uncomitted pool load
  100. struct emit_op tmp;
  101. int i0 = -1, i1 = -1;
  102. if ((!(ptr[0].src & M1(PC)) ||
  103. (i0 = emith_pool_index(emit_index+2 - i)) >= 0) &&
  104. (!(ptr[1].src & M1(PC)) ||
  105. (i1 = emith_pool_index(emit_index+1 - i)) >= 0)) {
  106. // not using PC, or pool load
  107. emith_pool_adjust(i0, 1);
  108. emith_pool_adjust(i1, -1);
  109. tmp = ptr[0], ptr[0] = ptr[1], ptr[1] = tmp;
  110. ptr[0].src |= M1(SWAP);
  111. }
  112. }
  113. }
  114. if (emit_index <= EMIT_CACHE_SIZE) {
  115. // queue not yet full
  116. emit_index++;
  117. } else {
  118. // commit oldest insn from cache
  119. EMIT_PTR(emit_ptr, emit_cache[1].op);
  120. for (i = 0; i <= emit_index; i++)
  121. emit_cache[i] = emit_cache[i+1];
  122. }
  123. }
  124. static void emith_flush(void)
  125. {
  126. int i;
  127. void *emit_ptr = tcache_ptr - emit_index*sizeof(u32);
  128. for (i = 1; i <= emit_index; i++)
  129. EMIT_PTR(emit_ptr, emit_cache[i].op);
  130. emit_index = 0;
  131. }
  132. #define A_COND_AL 0xe
  133. #define A_COND_EQ 0x0
  134. #define A_COND_NE 0x1
  135. #define A_COND_HS 0x2
  136. #define A_COND_LO 0x3
  137. #define A_COND_MI 0x4
  138. #define A_COND_PL 0x5
  139. #define A_COND_VS 0x6
  140. #define A_COND_VC 0x7
  141. #define A_COND_HI 0x8
  142. #define A_COND_LS 0x9
  143. #define A_COND_GE 0xa
  144. #define A_COND_LT 0xb
  145. #define A_COND_GT 0xc
  146. #define A_COND_LE 0xd
  147. #define A_COND_CS A_COND_HS
  148. #define A_COND_CC A_COND_LO
  149. #define A_COND_NV 0xf // Not Valid (aka NeVer :-) - ATTN: not a real condition!
  150. /* unified conditions */
  151. #define DCOND_EQ A_COND_EQ
  152. #define DCOND_NE A_COND_NE
  153. #define DCOND_MI A_COND_MI
  154. #define DCOND_PL A_COND_PL
  155. #define DCOND_HI A_COND_HI
  156. #define DCOND_HS A_COND_HS
  157. #define DCOND_LO A_COND_LO
  158. #define DCOND_GE A_COND_GE
  159. #define DCOND_GT A_COND_GT
  160. #define DCOND_LT A_COND_LT
  161. #define DCOND_LS A_COND_LS
  162. #define DCOND_LE A_COND_LE
  163. #define DCOND_VS A_COND_VS
  164. #define DCOND_VC A_COND_VC
  165. #define DCOND_CS A_COND_HS
  166. #define DCOND_CC A_COND_LO
  167. /* addressing mode 1 */
  168. #define A_AM1_LSL 0
  169. #define A_AM1_LSR 1
  170. #define A_AM1_ASR 2
  171. #define A_AM1_ROR 3
  172. #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000)
  173. #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
  174. #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
  175. /* data processing op */
  176. #define A_OP_AND 0x0
  177. #define A_OP_EOR 0x1
  178. #define A_OP_SUB 0x2
  179. #define A_OP_RSB 0x3
  180. #define A_OP_ADD 0x4
  181. #define A_OP_ADC 0x5
  182. #define A_OP_SBC 0x6
  183. #define A_OP_RSC 0x7
  184. #define A_OP_TST 0x8
  185. #define A_OP_TEQ 0x9
  186. #define A_OP_CMP 0xa
  187. #define A_OP_CMN 0xb
  188. #define A_OP_ORR 0xc
  189. #define A_OP_MOV 0xd
  190. #define A_OP_BIC 0xe
  191. #define A_OP_MVN 0xf
  192. // operation specific register usage in DOP
  193. #define A_Rn(op,rn) (((op)&0xd)!=0xd ? rn:NO) // no rn for MOV,MVN
  194. #define A_Rd(op,rd) (((op)&0xc)!=0x8 ? rd:NO) // no rd for TST,TEQ,CMP,CMN
  195. // CSPR is dst if S set, CSPR is src if op is ADC/SBC/RSC or shift is RRX
  196. #define A_Sd(s) ((s) ? SR:NO)
  197. #define A_Sr(op,sop) (((op)>=0x5 && (op)<=0x7) || (sop)>>4==A_AM1_ROR<<1 ? SR:NO)
  198. #define EOP_C_DOP_X(cond,op,s,rn,rd,sop,rm,rs) \
  199. EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (sop), \
  200. M2(A_Rd(op,rd),A_Sd(s)), M5(A_Sr(op,sop),A_Rn(op,rn),rm,rs,rs==NO?NO:CYC1))
  201. #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8), NO, NO)
  202. #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm), rm, NO)
  203. #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm), rm, rs)
  204. #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
  205. #define EOP_MVN_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
  206. #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
  207. #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
  208. #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
  209. #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
  210. #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
  211. #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
  212. #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
  213. #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
  214. #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
  215. #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
  216. #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
  217. #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
  218. #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
  219. #define EOP_MVN_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
  220. #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
  221. #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
  222. #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
  223. #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
  224. #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
  225. #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
  226. #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
  227. #define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
  228. #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
  229. #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
  230. #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
  231. #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
  232. #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
  233. #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
  234. #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
  235. #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
  236. #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
  237. #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
  238. #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  239. #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  240. #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  241. #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
  242. #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
  243. #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  244. #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  245. #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  246. #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm)
  247. #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs)
  248. #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs)
  249. #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  250. #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  251. /* addressing mode 2 */
  252. #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
  253. EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  254. ((offset_12) & 0xfff), M1(l?rd:MEM), M3(rn,l?MEM:rd,l?b?CYC2:CYC1:NO))
  255. #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
  256. EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  257. A_AM1_REG_XIMM(shift_imm, shift_op, rm), M1(l?rd:MEM), M4(rn,rm,l?MEM:rd,l?b?CYC2:CYC1:NO))
  258. /* addressing mode 3 */
  259. #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
  260. EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  261. ((s)<<6) | ((h)<<5) | (immed_reg), M1(l?rd:MEM), M4(rn,r?NO:immed_reg,l?MEM:rd,l?CYC2:NO))
  262. #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
  263. #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
  264. /* ldr and str */
  265. #define EOP_LDR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,0,1,rn,rd,abs(offset_12))
  266. #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,1,1,rn,rd,abs(offset_12))
  267. #define EOP_STR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,0,0,rn,rd,abs(offset_12))
  268. #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,(offset_12) >= 0,0,1,rn,rd,abs(offset_12))
  269. #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
  270. #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,(offset_12) >= 0,0,0,rn,rd,abs(offset_12))
  271. #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
  272. #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
  273. #define EOP_LDR_REG_LSL_WB(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,3,rn,rd,shift_imm,A_AM1_LSL,rm)
  274. #define EOP_LDRB_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,1,1,rn,rd,shift_imm,A_AM1_LSL,rm);
  275. #define EOP_STR_REG_LSL_WB(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,2,rn,rd,shift_imm,A_AM1_LSL,rm)
  276. #define EOP_LDRH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,0,1,abs(offset_8))
  277. #define EOP_LDRH_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,0,1,rm)
  278. #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,(offset_8) >= 0,1,rn,rd,0,1,abs(offset_8))
  279. #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
  280. #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
  281. #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,(offset_8) >= 0,0,rn,rd,0,1,abs(offset_8))
  282. #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
  283. #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
  284. #define EOP_LDRSB_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,1,0,abs(offset_8))
  285. #define EOP_LDRSB_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,1,0,rm)
  286. #define EOP_LDRSH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,1,1,abs(offset_8))
  287. #define EOP_LDRSH_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,1,1,rm)
  288. /* ldm and stm */
  289. #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
  290. EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list), \
  291. M2(rn,l?NO:MEM)|(l?list:0), M3(rn,l?MEM:NO,l?CYC2:NO)|(l?0:list))
  292. #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
  293. #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
  294. #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,SP,list)
  295. #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,SP,list)
  296. /* branches */
  297. #define EOP_C_BX(cond,rm) \
  298. EMIT(((cond)<<28) | 0x012fff10 | (rm), M1(PC), M1(rm))
  299. #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
  300. EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
  301. #define EOP_C_B(cond,l,signed_immed_24) \
  302. EMIT(((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24), M2(PC,l?LR:NO), M1(PC))
  303. #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
  304. #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
  305. /* misc */
  306. #define EOP_C_MUL(cond,s,rd,rs,rm) \
  307. EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm), M2(rd,s?SR:NO), M3(rs,rm,CYC2))
  308. #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
  309. EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M3(rs,rm,CYC2))
  310. #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
  311. EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M3(rs,rm,CYC2))
  312. #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
  313. EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M5(rs,rm,rdlo,rdhi,CYC2))
  314. #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
  315. #define EOP_C_MRS(cond,rd) \
  316. EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12), M1(rd), M1(SR))
  317. #define EOP_C_MSR_IMM(cond,ror2,imm) \
  318. EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm), M1(SR), 0) // cpsr_f
  319. #define EOP_C_MSR_REG(cond,rm) \
  320. EMIT(((cond)<<28) | 0x0128f000 | (rm), M1(SR), M1(rm)) // cpsr_f
  321. #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd)
  322. #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
  323. #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
  324. #define EOP_MOVW(rd,imm) \
  325. EMIT(0xe3000000 | ((rd)<<12) | ((imm)&0xfff) | (((imm)<<4)&0xf0000), M1(rd), NO)
  326. #define EOP_MOVT(rd,imm) \
  327. EMIT(0xe3400000 | ((rd)<<12) | (((imm)>>16)&0xfff) | (((imm)>>12)&0xf0000), M1(rd), NO)
  328. // host literal pool; must be significantly smaller than 1024 (max LDR offset = 4096)
  329. #define MAX_HOST_LITERALS 128
  330. static u32 literal_pool[MAX_HOST_LITERALS];
  331. static u32 *literal_insn[MAX_HOST_LITERALS];
  332. static int literal_pindex, literal_iindex;
  333. static int emith_pool_literal(u32 imm, int *offs)
  334. {
  335. int idx = literal_pindex - 8; // max look behind in pool
  336. // see if one of the last literals was the same (or close enough)
  337. for (idx = (idx < 0 ? 0 : idx); idx < literal_pindex; idx++)
  338. if (abs((int)(imm - literal_pool[idx])) <= 0xff)
  339. break;
  340. if (idx == literal_pindex) // store new literal
  341. literal_pool[literal_pindex++] = imm;
  342. *offs = imm - literal_pool[idx];
  343. return idx;
  344. }
  345. // XXX: RSB, *S will break if 1 insn is not enough
  346. static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
  347. {
  348. int ror2;
  349. u32 v;
  350. int i;
  351. if (cond == A_COND_NV)
  352. return;
  353. switch (op) {
  354. case A_OP_MOV:
  355. rn = 0;
  356. // count bits in imm and use MVN if more bits 1 than 0
  357. if (count_bits(imm) > 16) {
  358. imm = ~imm;
  359. op = A_OP_MVN;
  360. }
  361. // count insns needed for mov/orr #imm
  362. for (v = imm, ror2 = 0; (v >> 24) && ror2 < 32/2; ror2++)
  363. v = (v << 2) | (v >> 30);
  364. #ifdef HAVE_ARMV7
  365. for (i = 2; i > 0; i--, v >>= 8)
  366. while (v > 0xff && !(v & 3))
  367. v >>= 2;
  368. if (v) { // 3+ insns needed...
  369. if (op == A_OP_MVN)
  370. imm = ~imm;
  371. // ...prefer movw/movt
  372. EOP_MOVW(rd, imm);
  373. if (imm & 0xffff0000)
  374. EOP_MOVT(rd, imm);
  375. return;
  376. }
  377. #else
  378. for (i = 3; i > 0; i--, v >>= 8)
  379. while (v > 0xff && !(v & 3))
  380. v >>= 2;
  381. if (v) { // 4 insns needed...
  382. if (op == A_OP_MVN)
  383. imm = ~imm;
  384. // ...emit literal load
  385. int idx, o;
  386. if (literal_iindex >= MAX_HOST_LITERALS) {
  387. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY,
  388. "pool overflow");
  389. exit(1);
  390. }
  391. idx = emith_pool_literal(imm, &o);
  392. literal_insn[literal_iindex++] = (u32 *)tcache_ptr;
  393. EOP_LDR_IMM2(cond, rd, PC, idx * sizeof(u32));
  394. if (o > 0)
  395. EOP_C_DOP_IMM(cond, A_OP_ADD, 0, rd, rd, 0, o);
  396. else if (o < 0)
  397. EOP_C_DOP_IMM(cond, A_OP_SUB, 0, rd, rd, 0, -o);
  398. return;
  399. }
  400. #endif
  401. break;
  402. case A_OP_AND:
  403. // AND must fit into 1 insn. if not, use BIC
  404. for (v = imm, ror2 = 0; (v >> 8) && ror2 < 32/2; ror2++)
  405. v = (v << 2) | (v >> 30);
  406. if (v >> 8) {
  407. imm = ~imm;
  408. op = A_OP_BIC;
  409. }
  410. break;
  411. case A_OP_SUB:
  412. case A_OP_ADD:
  413. // count bits in imm and swap ADD and SUB if more bits 1 than 0
  414. if (s == 0 && count_bits(imm) > 16) {
  415. imm = -imm;
  416. op ^= (A_OP_ADD^A_OP_SUB);
  417. }
  418. case A_OP_EOR:
  419. case A_OP_ORR:
  420. case A_OP_BIC:
  421. if (s == 0 && imm == 0 && rd == rn)
  422. return;
  423. break;
  424. }
  425. // try to get the topmost byte empty to possibly save an insn
  426. for (v = imm, ror2 = 0; (v >> 24) && ror2 < 32/2; ror2++)
  427. v = (v << 2) | (v >> 30);
  428. do {
  429. // shift down to get 'best' rot2
  430. while (v > 0xff && !(v & 3))
  431. v >>= 2, ror2--;
  432. EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0xf, v & 0xff);
  433. switch (op) {
  434. case A_OP_MOV: op = A_OP_ORR; break;
  435. case A_OP_MVN: op = A_OP_BIC; break;
  436. case A_OP_ADC: op = A_OP_ADD; break;
  437. case A_OP_SBC: op = A_OP_SUB; break;
  438. }
  439. rn = rd;
  440. v >>= 8, ror2 -= 8/2;
  441. } while (v);
  442. }
  443. #define emith_op_imm(cond, s, op, r, imm) \
  444. emith_op_imm2(cond, s, op, r, r, imm)
  445. // test op
  446. #define emith_top_imm(cond, op, r, imm) do { \
  447. u32 ror2, v; \
  448. for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
  449. ror2--; \
  450. EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
  451. } while (0)
  452. #define is_offset_24(val) \
  453. ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
  454. static int emith_xbranch(int cond, void *target, int is_call)
  455. {
  456. int val = (u32 *)target - (u32 *)tcache_ptr - 2;
  457. int direct = is_offset_24(val);
  458. u32 *start_ptr = (u32 *)tcache_ptr;
  459. if (cond == A_COND_NV)
  460. return 0; // never taken
  461. if (direct)
  462. {
  463. EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target
  464. }
  465. else
  466. {
  467. #ifdef __EPOC32__
  468. // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
  469. if (is_call)
  470. EOP_ADD_IMM(LR,PC,0,8); // add lr,pc,#8
  471. EOP_C_AM2_IMM(cond,1,0,1,PC,PC,0); // ldrcc pc,[pc]
  472. EOP_MOV_REG_SIMPLE(PC,PC); // mov pc, pc
  473. EMIT((u32)target,M1(PC),0);
  474. #else
  475. // should never happen
  476. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %8p->%8p", target, tcache_ptr);
  477. exit(1);
  478. #endif
  479. }
  480. return (u32 *)tcache_ptr - start_ptr;
  481. }
  482. static void emith_pool_commit(int jumpover)
  483. {
  484. int i, sz = literal_pindex * sizeof(u32);
  485. u8 *pool = (u8 *)tcache_ptr;
  486. // nothing to commit if pool is empty
  487. if (sz == 0)
  488. return;
  489. // need branch over pool if not at block end
  490. if (jumpover) {
  491. pool += sizeof(u32);
  492. emith_xbranch(A_COND_AL, (u8 *)pool + sz, 0);
  493. }
  494. emith_flush();
  495. // safety check - pool must be after insns and reachable
  496. if ((u32)(pool - (u8 *)literal_insn[0] + 8) > 0xfff) {
  497. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY,
  498. "pool offset out of range");
  499. exit(1);
  500. }
  501. // copy pool and adjust addresses in insns accessing the pool
  502. memcpy(pool, literal_pool, sz);
  503. for (i = 0; i < literal_iindex; i++) {
  504. *literal_insn[i] += (u8 *)pool - ((u8 *)literal_insn[i] + 8);
  505. }
  506. // count pool constants as insns for statistics
  507. for (i = 0; i < literal_pindex; i++)
  508. COUNT_OP;
  509. tcache_ptr = (void *)((u8 *)pool + sz);
  510. literal_pindex = literal_iindex = 0;
  511. }
  512. static inline void emith_pool_check(void)
  513. {
  514. // check if pool must be committed
  515. if (literal_iindex > MAX_HOST_LITERALS-4 ||
  516. (u8 *)tcache_ptr - (u8 *)literal_insn[0] > 0xe00)
  517. // pool full, or displacement is approaching the limit
  518. emith_pool_commit(1);
  519. }
  520. static inline int emith_pool_index(int tcache_offs)
  521. {
  522. u32 *ptr = (u32 *)tcache_ptr - tcache_offs;
  523. int i;
  524. for (i = literal_iindex-1; i >= 0 && literal_insn[i] >= ptr; i--)
  525. if (literal_insn[i] == ptr)
  526. return i;
  527. return -1;
  528. }
  529. static inline void emith_pool_adjust(int pool_index, int move_offs)
  530. {
  531. if (pool_index >= 0)
  532. literal_insn[pool_index] += move_offs;
  533. }
  534. #define JMP_POS(ptr) \
  535. ptr = tcache_ptr; \
  536. EMIT(0,M1(PC),0);
  537. #define JMP_EMIT(cond, ptr) { \
  538. u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
  539. emith_flush(); \
  540. EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
  541. }
  542. #define EMITH_JMP_START(cond) { \
  543. void *cond_ptr; \
  544. JMP_POS(cond_ptr)
  545. #define EMITH_JMP_END(cond) \
  546. JMP_EMIT(cond, cond_ptr); \
  547. }
  548. // fake "simple" or "short" jump - using cond insns instead
  549. #define EMITH_NOTHING1(cond) \
  550. (void)(cond)
  551. #define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond)
  552. #define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond)
  553. #define EMITH_SJMP2_START(cond) EMITH_NOTHING1(cond)
  554. #define EMITH_SJMP2_MID(cond) EMITH_JMP_START((cond)^1) // inverse cond
  555. #define EMITH_SJMP2_END(cond) EMITH_JMP_END((cond)^1)
  556. #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
  557. #define EMITH_SJMP3_MID(cond) EMITH_NOTHING1(cond)
  558. #define EMITH_SJMP3_END()
  559. #define emith_move_r_r_c(cond, d, s) \
  560. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,0)
  561. #define emith_move_r_r(d, s) \
  562. emith_move_r_r_c(A_COND_AL, d, s)
  563. #define emith_move_r_r_ptr_c(cond, d, s) \
  564. emith_move_r_r_c(cond, d, s)
  565. #define emith_move_r_r_ptr(d, s) \
  566. emith_move_r_r(d, s)
  567. #define emith_mvn_r_r(d, s) \
  568. EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
  569. #define emith_add_r_r_r_lsl(d, s1, s2, lslimm) \
  570. EOP_ADD_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  571. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, lslimm) \
  572. emith_add_r_r_r_lsl(d, s1, s2, lslimm)
  573. #define emith_addf_r_r_r_lsl(d, s1, s2, lslimm) \
  574. EOP_ADD_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  575. #define emith_addf_r_r_r_lsr(d, s1, s2, lslimm) \
  576. EOP_ADD_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSR,lslimm)
  577. #define emith_adcf_r_r_r_lsl(d, s1, s2, lslimm) \
  578. EOP_ADC_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  579. #define emith_sub_r_r_r_lsl(d, s1, s2, lslimm) \
  580. EOP_SUB_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  581. #define emith_subf_r_r_r_lsl(d, s1, s2, lslimm) \
  582. EOP_SUB_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  583. #define emith_sbcf_r_r_r_lsl(d, s1, s2, lslimm) \
  584. EOP_SBC_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  585. #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
  586. EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  587. #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
  588. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  589. #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
  590. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
  591. #define emith_and_r_r_r_lsl(d, s1, s2, lslimm) \
  592. EOP_AND_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  593. #define emith_or_r_r_lsl(d, s, lslimm) \
  594. emith_or_r_r_r_lsl(d, d, s, lslimm)
  595. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  596. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  597. #define emith_add_r_r_r(d, s1, s2) \
  598. emith_add_r_r_r_lsl(d, s1, s2, 0)
  599. #define emith_addf_r_r_r(d, s1, s2) \
  600. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  601. #define emith_adcf_r_r_r(d, s1, s2) \
  602. emith_adcf_r_r_r_lsl(d, s1, s2, 0)
  603. #define emith_sub_r_r_r(d, s1, s2) \
  604. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  605. #define emith_subf_r_r_r(d, s1, s2) \
  606. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  607. #define emith_sbcf_r_r_r(d, s1, s2) \
  608. emith_sbcf_r_r_r_lsl(d, s1, s2, 0)
  609. #define emith_or_r_r_r(d, s1, s2) \
  610. emith_or_r_r_r_lsl(d, s1, s2, 0)
  611. #define emith_eor_r_r_r(d, s1, s2) \
  612. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  613. #define emith_and_r_r_r(d, s1, s2) \
  614. emith_and_r_r_r_lsl(d, s1, s2, 0)
  615. #define emith_add_r_r(d, s) \
  616. emith_add_r_r_r(d, d, s)
  617. #define emith_add_r_r_ptr(d, s) \
  618. emith_add_r_r_r(d, d, s)
  619. #define emith_sub_r_r(d, s) \
  620. emith_sub_r_r_r(d, d, s)
  621. #define emith_adc_r_r(d, s) \
  622. EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  623. #define emith_and_r_r_c(cond, d, s) \
  624. EOP_AND_REG(cond,0,d,d,s,A_AM1_LSL,0)
  625. #define emith_and_r_r(d, s) \
  626. EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  627. #define emith_or_r_r(d, s) \
  628. emith_or_r_r_r(d, d, s)
  629. #define emith_eor_r_r(d, s) \
  630. emith_eor_r_r_r(d, d, s)
  631. #define emith_tst_r_r(d, s) \
  632. EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  633. #define emith_tst_r_r_ptr(d, s) \
  634. emith_tst_r_r(d, s)
  635. #define emith_teq_r_r(d, s) \
  636. EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  637. #define emith_cmp_r_r(d, s) \
  638. EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  639. #define emith_addf_r_r(d, s) \
  640. EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  641. #define emith_subf_r_r(d, s) \
  642. EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  643. #define emith_adcf_r_r(d, s) \
  644. EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  645. #define emith_sbcf_r_r(d, s) \
  646. EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  647. #define emith_eorf_r_r(d, s) \
  648. EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  649. #define emith_move_r_imm(r, imm) \
  650. emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
  651. #define emith_move_r_ptr_imm(r, imm) \
  652. emith_move_r_imm(r, (u32)(imm))
  653. #define emith_add_r_imm(r, imm) \
  654. emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
  655. #define emith_adc_r_imm(r, imm) \
  656. emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
  657. #define emith_adcf_r_imm(r, imm) \
  658. emith_op_imm(A_COND_AL, 1, A_OP_ADC, r, imm)
  659. #define emith_sub_r_imm(r, imm) \
  660. emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
  661. #define emith_bic_r_imm(r, imm) \
  662. emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
  663. #define emith_and_r_imm(r, imm) \
  664. emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
  665. #define emith_or_r_imm(r, imm) \
  666. emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
  667. #define emith_eor_r_imm(r, imm) \
  668. emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
  669. #define emith_eor_r_imm_ptr(r, imm) \
  670. emith_eor_r_imm(r, imm)
  671. // note: only use 8bit imm for these
  672. #define emith_tst_r_imm(r, imm) \
  673. emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
  674. #define emith_cmp_r_imm(r, imm) do { \
  675. u32 op_ = A_OP_CMP, imm_ = (u8)imm; \
  676. if ((s8)imm_ < 0) { \
  677. imm_ = (u8)-imm_; \
  678. op_ = A_OP_CMN; \
  679. } \
  680. emith_top_imm(A_COND_AL, op_, r, imm_); \
  681. } while (0)
  682. #define emith_subf_r_imm(r, imm) \
  683. emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
  684. #define emith_move_r_imm_c(cond, r, imm) \
  685. emith_op_imm(cond, 0, A_OP_MOV, r, imm)
  686. #define emith_add_r_imm_c(cond, r, imm) \
  687. emith_op_imm(cond, 0, A_OP_ADD, r, imm)
  688. #define emith_sub_r_imm_c(cond, r, imm) \
  689. emith_op_imm(cond, 0, A_OP_SUB, r, imm)
  690. #define emith_or_r_imm_c(cond, r, imm) \
  691. emith_op_imm(cond, 0, A_OP_ORR, r, imm)
  692. #define emith_eor_r_imm_c(cond, r, imm) \
  693. emith_op_imm(cond, 0, A_OP_EOR, r, imm)
  694. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  695. emith_eor_r_imm_c(cond, r, imm)
  696. #define emith_bic_r_imm_c(cond, r, imm) \
  697. emith_op_imm(cond, 0, A_OP_BIC, r, imm)
  698. #define emith_tst_r_imm_c(cond, r, imm) \
  699. emith_top_imm(cond, A_OP_TST, r, imm)
  700. #define emith_move_r_imm_s8(r, imm) do { \
  701. if ((s8)(imm) < 0) \
  702. EOP_MVN_IMM(r, 0, ((u8)(imm) ^ 0xff)); \
  703. else \
  704. EOP_MOV_IMM(r, 0, (u8)imm); \
  705. } while (0)
  706. #define emith_and_r_r_imm(d, s, imm) \
  707. emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
  708. #define emith_add_r_r_imm(d, s, imm) \
  709. emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
  710. #define emith_add_r_r_ptr_imm(d, s, imm) \
  711. emith_add_r_r_imm(d, s, imm)
  712. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  713. emith_op_imm2(cond, 0, A_OP_SUB, d, s, (imm))
  714. #define emith_sub_r_r_imm(d, s, imm) \
  715. emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
  716. #define emith_subf_r_r_imm(d, s, imm) \
  717. emith_op_imm2(A_COND_AL, 1, A_OP_SUB, d, s, imm)
  718. #define emith_or_r_r_imm(d, s, imm) \
  719. emith_op_imm2(A_COND_AL, 0, A_OP_ORR, d, s, imm)
  720. #define emith_eor_r_r_imm(d, s, imm) \
  721. emith_op_imm2(A_COND_AL, 0, A_OP_EOR, d, s, imm)
  722. #define emith_neg_r_r(d, s) \
  723. EOP_RSB_IMM(d, s, 0, 0)
  724. #define emith_lsl(d, s, cnt) \
  725. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
  726. #define emith_lsr(d, s, cnt) \
  727. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
  728. #define emith_asr(d, s, cnt) \
  729. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
  730. #define emith_ror_c(cond, d, s, cnt) \
  731. EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
  732. #define emith_ror(d, s, cnt) \
  733. emith_ror_c(A_COND_AL, d, s, cnt)
  734. #define emith_rol(d, s, cnt) \
  735. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
  736. #define emith_lslf(d, s, cnt) \
  737. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
  738. #define emith_lsrf(d, s, cnt) \
  739. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
  740. #define emith_asrf(d, s, cnt) \
  741. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
  742. // note: only C flag updated correctly
  743. #define emith_rolf(d, s, cnt) do { \
  744. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
  745. /* we don't have ROL so we shift to get the right carry */ \
  746. EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
  747. } while (0)
  748. #define emith_rorf(d, s, cnt) \
  749. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
  750. #define emith_rolcf(d) \
  751. emith_adcf_r_r(d, d)
  752. #define emith_rorcf(d) \
  753. EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
  754. #define emith_negcf_r_r(d, s) \
  755. EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
  756. #define emith_mul(d, s1, s2) do { \
  757. if ((d) != (s1)) /* rd != rm limitation */ \
  758. EOP_MUL(d, s1, s2); \
  759. else \
  760. EOP_MUL(d, s2, s1); \
  761. } while (0)
  762. #define emith_mul_u64(dlo, dhi, s1, s2) \
  763. EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  764. #define emith_mul_s64(dlo, dhi, s1, s2) \
  765. EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  766. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  767. EOP_C_SMLAL(cond,0,dhi,dlo,s1,s2)
  768. #define emith_mula_s64(dlo, dhi, s1, s2) \
  769. EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
  770. // misc
  771. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  772. EOP_LDR_IMM2(cond, r, rs, offs)
  773. #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
  774. emith_read_r_r_offs_c(cond, r, rs, offs)
  775. #define emith_read_r_r_r_c(cond, r, rs, rm) \
  776. EOP_LDR_REG_LSL(cond, r, rs, rm, 0)
  777. #define emith_read_r_r_offs(r, rs, offs) \
  778. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  779. #define emith_read_r_r_offs_ptr(r, rs, offs) \
  780. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  781. #define emith_read_r_r_r(r, rs, rm) \
  782. EOP_LDR_REG_LSL(A_COND_AL, r, rs, rm, 0)
  783. #define emith_read_r_r_r_wb(r, rs, rm) \
  784. EOP_LDR_REG_LSL_WB(A_COND_AL, r, rs, rm, 0)
  785. #define emith_read_r_r_r_ptr_wb(r, rs, rm) \
  786. emith_read_r_r_r_wb(r, rs, rm)
  787. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  788. EOP_LDRB_IMM2(cond, r, rs, offs)
  789. #define emith_read8_r_r_r_c(cond, r, rs, rm) \
  790. EOP_LDRB_REG_LSL(cond, r, rs, rm, 0)
  791. #define emith_read8_r_r_offs(r, rs, offs) \
  792. emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
  793. #define emith_read8_r_r_r(r, rs, rm) \
  794. emith_read8_r_r_r_c(A_COND_AL, r, rs, rm)
  795. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  796. EOP_LDRH_IMM2(cond, r, rs, offs)
  797. #define emith_read16_r_r_r_c(cond, r, rs, rm) \
  798. EOP_LDRH_REG2(cond, r, rs, rm)
  799. #define emith_read16_r_r_offs(r, rs, offs) \
  800. emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
  801. #define emith_read16_r_r_r(r, rs, rm) \
  802. emith_read16_r_r_r_c(A_COND_AL, r, rs, rm)
  803. #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
  804. EOP_LDRSB_IMM2(cond, r, rs, offs)
  805. #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
  806. EOP_LDRSB_REG2(cond, r, rs, rm)
  807. #define emith_read8s_r_r_offs(r, rs, offs) \
  808. emith_read8s_r_r_offs_c(A_COND_AL, r, rs, offs)
  809. #define emith_read8s_r_r_r(r, rs, rm) \
  810. emith_read8s_r_r_r_c(A_COND_AL, r, rs, rm)
  811. #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
  812. EOP_LDRSH_IMM2(cond, r, rs, offs)
  813. #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
  814. EOP_LDRSH_REG2(cond, r, rs, rm)
  815. #define emith_read16s_r_r_offs(r, rs, offs) \
  816. emith_read16s_r_r_offs_c(A_COND_AL, r, rs, offs)
  817. #define emith_read16s_r_r_r(r, rs, rm) \
  818. emith_read16s_r_r_r_c(A_COND_AL, r, rs, rm)
  819. #define emith_write_r_r_offs_c(cond, r, rs, offs) \
  820. EOP_STR_IMM2(cond, r, rs, offs)
  821. #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
  822. emith_write_r_r_offs_c(cond, r, rs, offs)
  823. #define emith_write_r_r_offs(r, rs, offs) \
  824. emith_write_r_r_offs_c(A_COND_AL, r, rs, offs)
  825. #define emith_write_r_r_offs_ptr(r, rs, offs) \
  826. emith_write_r_r_offs_c(A_COND_AL, r, rs, offs)
  827. #define emith_write_r_r_r_wb(r, rs, rm) \
  828. EOP_STR_REG_LSL_WB(A_COND_AL, r, rs, rm, 0)
  829. #define emith_write_r_r_r_ptr_wb(r, rs, rm) \
  830. emith_write_r_r_r_wb(r, rs, rm)
  831. #define emith_ctx_read_c(cond, r, offs) \
  832. emith_read_r_r_offs_c(cond, r, CONTEXT_REG, offs)
  833. #define emith_ctx_read(r, offs) \
  834. emith_ctx_read_c(A_COND_AL, r, offs)
  835. #define emith_ctx_read_ptr(r, offs) \
  836. emith_ctx_read(r, offs)
  837. #define emith_ctx_write(r, offs) \
  838. EOP_STR_IMM(r, CONTEXT_REG, offs)
  839. #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
  840. int v_, r_ = r, c_ = count, b_ = CONTEXT_REG; \
  841. for (v_ = 0; c_; c_--, r_++) \
  842. v_ |= M1(r_); \
  843. if ((offs) != 0) { \
  844. EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
  845. b_ = tmpr; \
  846. } \
  847. op(b_,v_); \
  848. } while (0)
  849. #define emith_ctx_read_multiple(r, offs, count, tmpr) \
  850. emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
  851. #define emith_ctx_write_multiple(r, offs, count, tmpr) \
  852. emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
  853. #define emith_clear_msb_c(cond, d, s, count) do { \
  854. u32 t; \
  855. if ((count) <= 8) { \
  856. t = 8 - (count); \
  857. t = (0xff << t) & 0xff; \
  858. EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
  859. } else if ((count) >= 24) { \
  860. t = (count) - 24; \
  861. t = 0xff >> t; \
  862. EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
  863. } else { \
  864. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
  865. EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
  866. } \
  867. } while (0)
  868. #define emith_clear_msb(d, s, count) \
  869. emith_clear_msb_c(A_COND_AL, d, s, count)
  870. #define emith_sext(d, s, bits) do { \
  871. EOP_MOV_REG_LSL(d,s,32 - (bits)); \
  872. EOP_MOV_REG_ASR(d,d,32 - (bits)); \
  873. } while (0)
  874. #define emith_do_caller_regs(mask, func) do { \
  875. u32 _reg_mask = (mask) & 0x500f; \
  876. if (_reg_mask) { \
  877. if (__builtin_parity(_reg_mask) == 1) \
  878. _reg_mask |= 0x10; /* eabi align */ \
  879. func(_reg_mask); \
  880. } \
  881. } while (0)
  882. #define emith_save_caller_regs(mask) \
  883. emith_do_caller_regs(mask, EOP_STMFD_SP)
  884. #define emith_restore_caller_regs(mask) \
  885. emith_do_caller_regs(mask, EOP_LDMFD_SP)
  886. // upto 4 args
  887. #define emith_pass_arg_r(arg, reg) \
  888. EOP_MOV_REG_SIMPLE(arg, reg)
  889. #define emith_pass_arg_imm(arg, imm) \
  890. emith_move_r_imm(arg, imm)
  891. #define emith_jump(target) \
  892. emith_jump_cond(A_COND_AL, target)
  893. #define emith_jump_patchable(target) \
  894. emith_jump(target)
  895. #define emith_jump_cond(cond, target) \
  896. emith_xbranch(cond, target, 0)
  897. #define emith_jump_cond_patchable(cond, target) \
  898. emith_jump_cond(cond, target)
  899. #define emith_jump_patch(ptr, target) ({ \
  900. u32 *ptr_ = ptr; \
  901. u32 val_ = (u32 *)(target) - ptr_ - 2; \
  902. *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
  903. (u8 *)ptr; \
  904. })
  905. #define emith_jump_patch_size() 4
  906. #define emith_jump_at(ptr, target) do { \
  907. u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
  908. emith_flush(); \
  909. EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
  910. } while (0)
  911. #define emith_jump_reg_c(cond, r) \
  912. EOP_C_BX(cond, r)
  913. #define emith_jump_reg(r) \
  914. emith_jump_reg_c(A_COND_AL, r)
  915. #define emith_jump_ctx_c(cond, offs) \
  916. EOP_LDR_IMM2(cond,PC,CONTEXT_REG,offs)
  917. #define emith_jump_ctx(offs) \
  918. emith_jump_ctx_c(A_COND_AL, offs)
  919. #define emith_call_cond(cond, target) \
  920. emith_xbranch(cond, target, 1)
  921. #define emith_call(target) \
  922. emith_call_cond(A_COND_AL, target)
  923. #define emith_call_reg(r) do { \
  924. emith_move_r_r(LR, PC); \
  925. EOP_C_BX(A_COND_AL, r); \
  926. } while (0)
  927. #define emith_call_ctx(offs) do { \
  928. emith_move_r_r(LR, PC); \
  929. emith_jump_ctx(offs); \
  930. } while (0)
  931. #define emith_call_link(r, target) do { \
  932. emith_move_r_r(r, PC); \
  933. emith_jump(target); \
  934. } while (0)
  935. #define emith_call_cleanup() /**/
  936. #define emith_ret_c(cond) \
  937. emith_jump_reg_c(cond, LR)
  938. #define emith_ret() \
  939. emith_ret_c(A_COND_AL)
  940. #define emith_ret_to_ctx(offs) \
  941. emith_ctx_write(LR, offs)
  942. /* pushes r12 for eabi alignment */
  943. #define emith_push_ret(r) do { \
  944. int r_ = (r >= 0 ? r : 12); \
  945. EOP_STMFD_SP(M2(r_,LR)); \
  946. } while (0)
  947. #define emith_pop_and_ret(r) do { \
  948. int r_ = (r >= 0 ? r : 12); \
  949. EOP_LDMFD_SP(M2(r_,PC)); \
  950. } while (0)
  951. #define host_instructions_updated(base, end) \
  952. cache_flush_d_inval_i(base, end)
  953. #define host_arg2reg(rd, arg) \
  954. rd = arg
  955. #define emith_rw_offs_max() 0xff
  956. /* SH2 drc specific */
  957. /* pushes r12 for eabi alignment */
  958. #define emith_sh2_drc_entry() \
  959. EOP_STMFD_SP(M10(4,5,6,7,8,9,10,11,12,LR))
  960. #define emith_sh2_drc_exit() \
  961. EOP_LDMFD_SP(M10(4,5,6,7,8,9,10,11,12,PC))
  962. // assumes a is in arg0, tab, func and mask are temp
  963. #define emith_sh2_rcall(a, tab, func, mask) do { \
  964. emith_lsr(mask, a, SH2_READ_SHIFT); \
  965. EOP_ADD_REG_LSL(tab, tab, mask, 3); \
  966. if (func < mask) EOP_LDMIA(tab, M2(func,mask)); /* ldm if possible */ \
  967. else { emith_read_r_r_offs(func, tab, 0); \
  968. emith_read_r_r_offs(mask, tab, 4); } \
  969. emith_addf_r_r_r(func,func,func); \
  970. } while (0)
  971. // assumes a, val are in arg0 and arg1, tab and func are temp
  972. #define emith_sh2_wcall(a, val, tab, func) do { \
  973. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  974. EOP_LDR_REG_LSL(A_COND_AL,func,tab,func,2); \
  975. emith_move_r_r(2, CONTEXT_REG); /* arg2 */ \
  976. emith_jump_reg(func); \
  977. } while (0)
  978. #define emith_sh2_dtbf_loop() do { \
  979. int cr, rn; \
  980. int tmp_ = rcache_get_tmp(); \
  981. cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
  982. rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \
  983. emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \
  984. emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \
  985. emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
  986. cycles = 0; \
  987. emith_asrf(tmp_, cr, 2+12); /* movs tmp_, cr, asr #2+12 */\
  988. EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0); /* movmi tmp_, #0 */ \
  989. emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \
  990. emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \
  991. emith_subf_r_r(rn, tmp_); /* subs rn, tmp_ */ \
  992. EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0); /* rsbls tmp_, rn, #0 */ \
  993. EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
  994. EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \
  995. EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \
  996. rcache_free_tmp(tmp_); \
  997. } while (0)
  998. #define emith_sh2_delay_loop(cycles, reg) do { \
  999. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  1000. int t1 = rcache_get_tmp(); \
  1001. int t2 = rcache_get_tmp(); \
  1002. int t3 = rcache_get_tmp(); \
  1003. /* if (sr < 0) return */ \
  1004. emith_asrf(t2, sr, 12); \
  1005. EMITH_JMP_START(DCOND_LE); \
  1006. /* turns = sr.cycles / cycles */ \
  1007. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  1008. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  1009. rcache_free_tmp(t3); \
  1010. if (reg >= 0) { \
  1011. /* if (reg <= turns) turns = reg-1 */ \
  1012. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  1013. emith_cmp_r_r(t3, t2); \
  1014. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  1015. /* if (reg <= 1) turns = 0 */ \
  1016. emith_cmp_r_imm(t3, 1); \
  1017. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  1018. /* reg -= turns */ \
  1019. emith_sub_r_r(t3, t2); \
  1020. } \
  1021. /* sr.cycles -= turns * cycles; */ \
  1022. emith_move_r_imm(t1, cycles); \
  1023. emith_mul(t1, t2, t1); \
  1024. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  1025. EMITH_JMP_END(DCOND_LE); \
  1026. rcache_free_tmp(t1); \
  1027. rcache_free_tmp(t2); \
  1028. } while (0)
  1029. #define emith_write_sr(sr, srcr) do { \
  1030. emith_lsr(sr, sr, 10); \
  1031. emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
  1032. emith_ror(sr, sr, 22); \
  1033. } while (0)
  1034. #define emith_carry_to_t(srr, is_sub) do { \
  1035. if (is_sub) { /* has inverted C on ARM */ \
  1036. emith_or_r_imm_c(A_COND_CC, srr, 1); \
  1037. emith_bic_r_imm_c(A_COND_CS, srr, 1); \
  1038. } else { \
  1039. emith_or_r_imm_c(A_COND_CS, srr, 1); \
  1040. emith_bic_r_imm_c(A_COND_CC, srr, 1); \
  1041. } \
  1042. } while (0)
  1043. #define emith_tpop_carry(sr, is_sub) do { \
  1044. if (is_sub) \
  1045. emith_eor_r_imm(sr, 1); \
  1046. emith_lsrf(sr, sr, 1); \
  1047. } while (0)
  1048. #define emith_tpush_carry(sr, is_sub) do { \
  1049. emith_adc_r_r(sr, sr); \
  1050. if (is_sub) \
  1051. emith_eor_r_imm(sr, 1); \
  1052. } while (0)
  1053. /*
  1054. * if Q
  1055. * t = carry(Rn += Rm)
  1056. * else
  1057. * t = carry(Rn -= Rm)
  1058. * T ^= t
  1059. */
  1060. #define emith_sh2_div1_step(rn, rm, sr) do { \
  1061. void *jmp0, *jmp1; \
  1062. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  1063. JMP_POS(jmp0); /* beq do_sub */ \
  1064. emith_addf_r_r(rn, rm); \
  1065. emith_eor_r_imm_c(A_COND_CS, sr, T); \
  1066. JMP_POS(jmp1); /* b done */ \
  1067. JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */ \
  1068. emith_subf_r_r(rn, rm); \
  1069. emith_eor_r_imm_c(A_COND_CC, sr, T); \
  1070. JMP_EMIT(A_COND_AL, jmp1); /* done: */ \
  1071. } while (0)
  1072. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1073. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1074. emith_tst_r_imm(sr, S); \
  1075. EMITH_SJMP2_START(DCOND_NE); \
  1076. emith_mula_s64_c(DCOND_EQ, ml, mh, rn, rm); \
  1077. EMITH_SJMP2_MID(DCOND_NE); \
  1078. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1079. emith_sext(mh, mh, 16); \
  1080. emith_mula_s64(ml, mh, rn, rm); \
  1081. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1082. /* to check: add MACH[15] to MACH[31:16]. this is 0 if no overflow */ \
  1083. emith_asrf(rn, mh, 16); /* sum = (MACH>>16) + ((MACH>>15)&1) */ \
  1084. emith_adcf_r_imm(rn, 0); /* (MACH>>15) is in carry after shift */ \
  1085. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1086. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1087. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1088. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> +ovl */ \
  1089. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0xffffffff */ \
  1090. emith_sub_r_imm_c(DCOND_GT, mh, 1); /* 0x00007fff */ \
  1091. EMITH_SJMP_END(DCOND_LE); \
  1092. EMITH_SJMP_END(DCOND_EQ); \
  1093. EMITH_SJMP2_END(DCOND_NE); \
  1094. } while (0)
  1095. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1096. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1097. emith_tst_r_imm(sr, S); \
  1098. EMITH_SJMP2_START(DCOND_NE); \
  1099. emith_mula_s64_c(DCOND_EQ, ml, mh, rn, rm); \
  1100. EMITH_SJMP2_MID(DCOND_NE); \
  1101. /* XXX: MACH should be untouched when S is set? */ \
  1102. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1103. emith_mula_s64(ml, mh, rn, rm); \
  1104. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1105. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1106. emith_addf_r_r_r_lsr(mh, mh, ml, 31); /* sum = MACH + ((MACL>>31)&1) */\
  1107. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1108. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1109. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1110. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1111. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> positive ovrfl */ \
  1112. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0x7fffffff */ \
  1113. EMITH_SJMP_END(DCOND_LE); \
  1114. EMITH_SJMP_END(DCOND_EQ); \
  1115. EMITH_SJMP2_END(DCOND_NE); \
  1116. } while (0)
  1117. #ifdef T
  1118. // T bit handling
  1119. static int tcond = -1;
  1120. #define emith_invert_cond(cond) \
  1121. ((cond) ^ 1)
  1122. #define emith_clr_t_cond(sr) \
  1123. (void)sr
  1124. #define emith_set_t_cond(sr, cond) \
  1125. tcond = cond
  1126. #define emith_get_t_cond() \
  1127. tcond
  1128. #define emith_invalidate_t() \
  1129. tcond = -1
  1130. #define emith_set_t(sr, val) \
  1131. tcond = ((val) ? A_COND_AL: A_COND_NV)
  1132. static void emith_sync_t(int sr)
  1133. {
  1134. if (tcond == A_COND_AL)
  1135. emith_or_r_imm(sr, T);
  1136. else if (tcond == A_COND_NV)
  1137. emith_bic_r_imm(sr, T);
  1138. else if (tcond >= 0) {
  1139. emith_bic_r_imm_c(emith_invert_cond(tcond),sr, T);
  1140. emith_or_r_imm_c(tcond, sr, T);
  1141. }
  1142. tcond = -1;
  1143. }
  1144. static int emith_tst_t(int sr, int tf)
  1145. {
  1146. if (tcond < 0) {
  1147. emith_tst_r_imm(sr, T);
  1148. return tf ? DCOND_NE: DCOND_EQ;
  1149. } else if (tcond >= A_COND_AL) {
  1150. // MUST sync because A_COND_NV isn't a real condition
  1151. emith_sync_t(sr);
  1152. emith_tst_r_imm(sr, T);
  1153. return tf ? DCOND_NE: DCOND_EQ;
  1154. } else
  1155. return tf ? tcond : emith_invert_cond(tcond);
  1156. }
  1157. #endif