emit_arm.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505
  1. /*
  2. * Basic macros to emit ARM instructions and some utils
  3. * Copyright (C) 2008,2009,2010 notaz
  4. * Copyright (C) 2019 kub
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. */
  9. #define HOST_REGS 16
  10. // OABI/EABI: params: r0-r3, return: r0-r1, temp: r12,r14, saved: r4-r8,r10,r11
  11. // SP,PC: r13,r15 must not be used. saved: r9 (for platform use, e.g. on ios)
  12. #define RET_REG 0
  13. #define PARAM_REGS { 0, 1, 2, 3 }
  14. #ifndef __MACH__
  15. #define PRESERVED_REGS { 4, 5, 6, 7, 8, 9, 10, 11 }
  16. #else
  17. #define PRESERVED_REGS { 4, 5, 6, 7, 8, 10, 11 } // no r9..
  18. #endif
  19. #define TEMPORARY_REGS { 12, 14 }
  20. #define CONTEXT_REG 11
  21. #define STATIC_SH2_REGS { SHR_SR,10 , SHR_R(0),8 , SHR_R(1),9 }
  22. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  23. #define EMIT_PTR(ptr, x) \
  24. do { \
  25. *(u32 *)ptr = x; \
  26. ptr = (void *)((u8 *)ptr + sizeof(u32)); \
  27. } while (0)
  28. // ARM special registers and peephole optimization flags
  29. #define SP 13 // stack pointer
  30. #define LR 14 // link (return address)
  31. #define PC 15 // program counter
  32. #define SR 16 // CPSR, status register
  33. #define MEM 17 // memory access (src=LDR, dst=STR)
  34. #define CYC1 20 // 1 cycle interlock (LDR, reg-cntrld shift)
  35. #define CYC2 (CYC1+1)// 2+ cycles interlock (LDR[BH], MUL/MLA etc)
  36. #define NO 32 // token for "no register"
  37. // bitmask builders
  38. #define M1(x) (u32)(1ULL<<(x)) // u32 to have NO evaluate to 0
  39. #define M2(x,y) (M1(x)|M1(y))
  40. #define M3(x,y,z) (M2(x,y)|M1(z))
  41. #define M4(x,y,z,a) (M3(x,y,z)|M1(a))
  42. #define M5(x,y,z,a,b) (M4(x,y,z,a)|M1(b))
  43. #define M6(x,y,z,a,b,c) (M5(x,y,z,a,b)|M1(c))
  44. #define M10(a,b,c,d,e,f,g,h,i,j) (M5(a,b,c,d,e)|M5(f,g,h,i,j))
  45. // avoid a warning with clang
  46. static inline uintptr_t pabs(intptr_t v) { return labs(v); }
  47. // sys_cacheflush always flushes whole pages, and it's rather expensive on ARMs
  48. // hold a list of pending cache updates and merge requests to reduce cacheflush
  49. static struct { void *base, *end; } pageflush[4];
  50. static unsigned pagesize = 4096;
  51. static void emith_update_cache(void)
  52. {
  53. int i;
  54. for (i = 0; i < 4 && pageflush[i].base; i++) {
  55. cache_flush_d_inval_i(pageflush[i].base, pageflush[i].end + pagesize-1);
  56. pageflush[i].base = NULL;
  57. }
  58. }
  59. static inline void emith_update_add(void *base, void *end)
  60. {
  61. void *p_base = (void *)((uintptr_t)(base) & ~(pagesize-1));
  62. void *p_end = (void *)((uintptr_t)(end ) & ~(pagesize-1));
  63. int i;
  64. for (i = 0; i < 4 && pageflush[i].base; i++) {
  65. if (p_base <= pageflush[i].end+pagesize && p_end >= pageflush[i].end) {
  66. if (p_base < pageflush[i].base) pageflush[i].base = p_base;
  67. pageflush[i].end = p_end;
  68. return;
  69. }
  70. if (p_base <= pageflush[i].base && p_end >= pageflush[i].base-pagesize) {
  71. if (p_end > pageflush[i].end) pageflush[i].end = p_end;
  72. pageflush[i].base = p_base;
  73. return;
  74. }
  75. }
  76. if (i == 4) {
  77. /* list full and not mergeable -> flush list */
  78. emith_update_cache();
  79. i = 0;
  80. }
  81. pageflush[i].base = p_base, pageflush[i].end = p_end;
  82. }
  83. // peephole optimizer. ATM only tries to reduce interlock
  84. #define EMIT_CACHE_SIZE 6
  85. struct emit_op {
  86. u32 op;
  87. u32 src, dst;
  88. };
  89. // peephole cache, last commited insn + cache + next insn = size+2
  90. static struct emit_op emit_cache[EMIT_CACHE_SIZE+2];
  91. static int emit_index;
  92. #define emith_insn_ptr() (u8 *)((u32 *)tcache_ptr-emit_index)
  93. static inline void emith_pool_adjust(int tcache_offs, int move_offs);
  94. static NOINLINE void EMIT(u32 op, u32 dst, u32 src)
  95. {
  96. void * emit_ptr = (u32 *)tcache_ptr - emit_index;
  97. struct emit_op *const ptr = emit_cache;
  98. const int n = emit_index+1;
  99. int i, bi, bd = 0;
  100. // account for new insn in tcache
  101. tcache_ptr = (void *)((u32 *)tcache_ptr + 1);
  102. COUNT_OP;
  103. // for conditional execution SR is always source
  104. if (op < 0xe0000000 /*A_COND_AL << 28*/)
  105. src |= M1(SR);
  106. // put insn on back of queue // mask away the NO token
  107. emit_cache[n] = (struct emit_op)
  108. { .op=op, .src=src & ~M1(NO), .dst=dst & ~M1(NO) };
  109. // check insns down the queue as long as permitted by dependencies
  110. for (bd = bi = 0, i = emit_index; i > 1 && !(dst & M1(PC)); i--) {
  111. int deps = 0;
  112. // dst deps between i and n must not be swapped, since any deps
  113. // but [i].src & [n].src lead to changed semantics if swapped.
  114. if ((ptr[i].dst & ptr[n].src) || (ptr[n].dst & ptr[i].src) ||
  115. (ptr[i].dst & ptr[n].dst))
  116. break;
  117. // don't swap insns reading PC if it's not a word pool load
  118. // (ptr[i].op&0xf700000) != EOP_C_AM2_IMM(0,0,0,1,0,0,0))
  119. if ((ptr[i].src & M1(PC)) && (ptr[i].op&0xf700000) != 0x5100000)
  120. break;
  121. // calculate ARM920T interlock cycles (differences only)
  122. #define D2(x,y) ((ptr[x].dst & ptr[y].src)?((ptr[x].src >> CYC2) & 1):0)
  123. #define D1(x,y) ((ptr[x].dst & ptr[y].src)?((ptr[x].src >> CYC1) & 3):0)
  124. // insn sequence: [..., i-2, i-1, i, i+1, ..., n-2, n-1, n]
  125. deps -= D2(i-2,i)+D2(i-1,i+1)+D2(n-2,n ) + D1(i-1,i)+D1(n-1,n);
  126. deps -= !!(ptr[n].src & M2(CYC1,CYC2));// favour moving LDR down
  127. // insn sequence: [..., i-2, i-1, n, i, i+1, ..., n-2, n-1]
  128. deps += D2(i-2,n)+D2(i-1,i )+D2(n ,i+1) + D1(i-1,n)+D1(n ,i);
  129. deps += !!(ptr[i].src & M2(CYC1,CYC2));// penalize moving LDR up
  130. // remember best match found
  131. if (bd > deps)
  132. bd = deps, bi = i;
  133. }
  134. // swap if fewer depencies
  135. if (bd < 0) {
  136. // make room for new insn at bi
  137. struct emit_op tmp = ptr[n];
  138. for (i = n-1; i >= bi; i--) {
  139. ptr[i+1] = ptr[i];
  140. if (ptr[i].src & M1(PC))
  141. emith_pool_adjust(n-i+1, 1);
  142. }
  143. // insert new insn at bi
  144. ptr[bi] = tmp;
  145. if (ptr[bi].src & M1(PC))
  146. emith_pool_adjust(1, bi-n);
  147. }
  148. if (dst & M1(PC)) {
  149. // commit everything if a branch insn is emitted
  150. for (i = 1; i <= emit_index+1; i++)
  151. EMIT_PTR(emit_ptr, emit_cache[i].op);
  152. emit_index = 0;
  153. } else if (emit_index < EMIT_CACHE_SIZE) {
  154. // queue not yet full
  155. emit_index++;
  156. } else {
  157. // commit oldest insn from cache
  158. EMIT_PTR(emit_ptr, emit_cache[1].op);
  159. for (i = 0; i <= emit_index; i++)
  160. emit_cache[i] = emit_cache[i+1];
  161. }
  162. }
  163. static void emith_flush(void)
  164. {
  165. int i;
  166. void *emit_ptr = tcache_ptr - emit_index*sizeof(u32);
  167. for (i = 1; i <= emit_index; i++)
  168. EMIT_PTR(emit_ptr, emit_cache[i].op);
  169. emit_index = 0;
  170. }
  171. #define A_COND_AL 0xe
  172. #define A_COND_EQ 0x0
  173. #define A_COND_NE 0x1
  174. #define A_COND_HS 0x2
  175. #define A_COND_LO 0x3
  176. #define A_COND_MI 0x4
  177. #define A_COND_PL 0x5
  178. #define A_COND_VS 0x6
  179. #define A_COND_VC 0x7
  180. #define A_COND_HI 0x8
  181. #define A_COND_LS 0x9
  182. #define A_COND_GE 0xa
  183. #define A_COND_LT 0xb
  184. #define A_COND_GT 0xc
  185. #define A_COND_LE 0xd
  186. #define A_COND_CS A_COND_HS
  187. #define A_COND_CC A_COND_LO
  188. #define A_COND_NV 0xf // Not Valid (aka NeVer :-) - ATTN: not a real condition!
  189. /* unified conditions */
  190. #define DCOND_EQ A_COND_EQ
  191. #define DCOND_NE A_COND_NE
  192. #define DCOND_MI A_COND_MI
  193. #define DCOND_PL A_COND_PL
  194. #define DCOND_HI A_COND_HI
  195. #define DCOND_HS A_COND_HS
  196. #define DCOND_LO A_COND_LO
  197. #define DCOND_GE A_COND_GE
  198. #define DCOND_GT A_COND_GT
  199. #define DCOND_LT A_COND_LT
  200. #define DCOND_LS A_COND_LS
  201. #define DCOND_LE A_COND_LE
  202. #define DCOND_VS A_COND_VS
  203. #define DCOND_VC A_COND_VC
  204. #define DCOND_CS A_COND_HS
  205. #define DCOND_CC A_COND_LO
  206. /* addressing mode 1 */
  207. #define A_AM1_LSL 0
  208. #define A_AM1_LSR 1
  209. #define A_AM1_ASR 2
  210. #define A_AM1_ROR 3
  211. #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000)
  212. #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
  213. #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
  214. /* data processing op */
  215. #define A_OP_AND 0x0
  216. #define A_OP_EOR 0x1
  217. #define A_OP_SUB 0x2
  218. #define A_OP_RSB 0x3
  219. #define A_OP_ADD 0x4
  220. #define A_OP_ADC 0x5
  221. #define A_OP_SBC 0x6
  222. #define A_OP_RSC 0x7
  223. #define A_OP_TST 0x8
  224. #define A_OP_TEQ 0x9
  225. #define A_OP_CMP 0xa
  226. #define A_OP_CMN 0xb
  227. #define A_OP_ORR 0xc
  228. #define A_OP_MOV 0xd
  229. #define A_OP_BIC 0xe
  230. #define A_OP_MVN 0xf
  231. // operation specific register usage in DOP
  232. #define A_Rn(op,rn) (((op)&0xd)!=0xd ? rn:NO) // no rn for MOV,MVN
  233. #define A_Rd(op,rd) (((op)&0xc)!=0x8 ? rd:NO) // no rd for TST,TEQ,CMP,CMN
  234. // CSPR is dst if S set, CSPR is src if op is ADC/SBC/RSC or shift is RRX
  235. #define A_Sd(s) ((s) ? SR:NO)
  236. #define A_Sr(op,sop) (((op)>=0x5 && (op)<=0x7) || (sop)>>4==A_AM1_ROR<<1 ? SR:NO)
  237. #define EOP_C_DOP_X(cond,op,s,rn,rd,sop,rm,rs) \
  238. EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (sop), \
  239. M2(A_Rd(op,rd),A_Sd(s)), M5(A_Sr(op,sop),A_Rn(op,rn),rm,rs,rs==NO?NO:CYC1))
  240. #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8), NO, NO)
  241. #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm), rm, NO)
  242. #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm), rm, rs)
  243. #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
  244. #define EOP_MVN_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
  245. #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
  246. #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
  247. #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
  248. #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
  249. #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
  250. #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
  251. #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
  252. #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
  253. #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
  254. #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
  255. #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
  256. #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
  257. #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
  258. #define EOP_MVN_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
  259. #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
  260. #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
  261. #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
  262. #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
  263. #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
  264. #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
  265. #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
  266. #define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
  267. #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
  268. #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
  269. #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
  270. #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
  271. #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
  272. #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
  273. #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
  274. #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
  275. #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
  276. #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
  277. #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  278. #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  279. #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  280. #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
  281. #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
  282. #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  283. #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  284. #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  285. #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm)
  286. #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs)
  287. #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs)
  288. #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  289. #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  290. /* addressing mode 2 */
  291. #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
  292. EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  293. ((offset_12) & 0xfff), M1(l?rd:MEM), M3(rn,l?MEM:rd,l?b?CYC2:CYC1:NO))
  294. #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
  295. EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  296. A_AM1_REG_XIMM(shift_imm, shift_op, rm), M1(l?rd:MEM), M4(rn,rm,l?MEM:rd,l?b?CYC2:CYC1:NO))
  297. /* addressing mode 3 */
  298. #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
  299. EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  300. ((s)<<6) | ((h)<<5) | (immed_reg), M1(l?rd:MEM), M4(rn,r?NO:immed_reg,l?MEM:rd,l?CYC2:NO))
  301. #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
  302. #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
  303. /* ldr and str */
  304. #define EOP_LDR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,0,1,rn,rd,pabs(offset_12))
  305. #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,1,1,rn,rd,pabs(offset_12))
  306. #define EOP_STR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,(offset_12) >= 0,0,0,rn,rd,pabs(offset_12))
  307. #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,(offset_12) >= 0,0,1,rn,rd,pabs(offset_12))
  308. #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
  309. #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,(offset_12) >= 0,0,0,rn,rd,pabs(offset_12))
  310. #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
  311. #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
  312. #define EOP_LDR_REG_LSL_WB(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,3,rn,rd,shift_imm,A_AM1_LSL,rm)
  313. #define EOP_LDRB_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,1,1,rn,rd,shift_imm,A_AM1_LSL,rm)
  314. #define EOP_STR_REG_LSL_WB(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,2,rn,rd,shift_imm,A_AM1_LSL,rm)
  315. #define EOP_LDRH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,0,1,pabs(offset_8))
  316. #define EOP_LDRH_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,0,1,rm)
  317. #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,(offset_8) >= 0,1,rn,rd,0,1,pabs(offset_8))
  318. #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
  319. #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
  320. #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,(offset_8) >= 0,0,rn,rd,0,1,pabs(offset_8))
  321. #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
  322. #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
  323. #define EOP_LDRSB_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,1,0,pabs(offset_8))
  324. #define EOP_LDRSB_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,1,0,rm)
  325. #define EOP_LDRSH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,(offset_8) >= 0,1,rn,rd,1,1,pabs(offset_8))
  326. #define EOP_LDRSH_REG2(cond,rd,rn,rm) EOP_C_AM3_REG(cond,1,1,rn,rd,1,1,rm)
  327. /* ldm and stm */
  328. #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
  329. EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list), \
  330. M2(rn,l?NO:MEM)|(l?list:0), M3(rn,l?MEM:NO,l?CYC2:NO)|(l?0:list))
  331. #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
  332. #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
  333. #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,SP,list)
  334. #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,SP,list)
  335. /* branches */
  336. #define EOP_C_BX(cond,rm) \
  337. EMIT(((cond)<<28) | 0x012fff10 | (rm), M1(PC), M1(rm))
  338. #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
  339. EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
  340. #define EOP_C_B(cond,l,signed_immed_24) \
  341. EMIT(((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24), M2(PC,l?LR:NO), M1(PC))
  342. #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
  343. #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
  344. /* misc */
  345. #define EOP_C_MUL(cond,s,rd,rs,rm) \
  346. EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm), M2(rd,s?SR:NO), M3(rs,rm,CYC2))
  347. #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
  348. EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M4(rs,rm,CYC1,CYC2))
  349. #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
  350. EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M4(rs,rm,CYC1,CYC2))
  351. #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
  352. EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm), M3(rdhi,rdlo,s?SR:NO), M6(rs,rm,rdlo,rdhi,CYC1,CYC2))
  353. #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
  354. #define EOP_C_MRS(cond,rd) \
  355. EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12), M1(rd), M1(SR))
  356. #define EOP_C_MSR_IMM(cond,ror2,imm) \
  357. EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm), M1(SR), 0) // cpsr_f
  358. #define EOP_C_MSR_REG(cond,rm) \
  359. EMIT(((cond)<<28) | 0x0128f000 | (rm), M1(SR), M1(rm)) // cpsr_f
  360. #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd)
  361. #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
  362. #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
  363. #define EOP_MOVW(rd,imm) \
  364. EMIT(0xe3000000 | ((rd)<<12) | ((imm)&0xfff) | (((imm)<<4)&0xf0000), M1(rd), NO)
  365. #define EOP_MOVT(rd,imm) \
  366. EMIT(0xe3400000 | ((rd)<<12) | (((imm)>>16)&0xfff) | (((imm)>>12)&0xf0000), M1(rd), NO)
  367. // host literal pool; must be significantly smaller than 1024 (max LDR offset = 4096)
  368. #define MAX_HOST_LITERALS 128
  369. static u32 literal_pool[MAX_HOST_LITERALS];
  370. static u32 *literal_insn[MAX_HOST_LITERALS];
  371. static int literal_pindex, literal_iindex;
  372. static int emith_pool_literal(u32 imm, int *offs)
  373. {
  374. int idx = literal_pindex - 8; // max look behind in pool
  375. // see if one of the last literals was the same (or close enough)
  376. for (idx = (idx < 0 ? 0 : idx); idx < literal_pindex; idx++)
  377. if (abs((int)(imm - literal_pool[idx])) <= 0xff)
  378. break;
  379. if (idx == literal_pindex) // store new literal
  380. literal_pool[literal_pindex++] = imm;
  381. *offs = imm - literal_pool[idx];
  382. return idx;
  383. }
  384. // XXX: RSB, *S will break if 1 insn is not enough
  385. static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
  386. {
  387. int ror2;
  388. u32 v;
  389. int i;
  390. if (cond == A_COND_NV)
  391. return;
  392. do {
  393. u32 u;
  394. // try to get the topmost byte empty to possibly save an insn
  395. for (v = imm, ror2 = 0; (v >> 24) && ror2 < 32/2; ror2++)
  396. v = (v << 2) | (v >> 30);
  397. switch (op) {
  398. case A_OP_MOV:
  399. case A_OP_MVN:
  400. rn = 0;
  401. // use MVN if more bits 1 than 0
  402. if (count_bits(imm) > 16) {
  403. imm = ~imm;
  404. op = A_OP_MVN;
  405. ror2 = -1;
  406. break;
  407. }
  408. // count insns needed for mov/orr #imm
  409. #ifdef HAVE_ARMV7
  410. for (i = 2, u = v; i > 0; i--, u >>= 8)
  411. while (u > 0xff && !(u & 3))
  412. u >>= 2;
  413. if (u) { // 3+ insns needed...
  414. if (op == A_OP_MVN)
  415. imm = ~imm;
  416. // ...prefer movw/movt
  417. EOP_MOVW(rd, imm);
  418. if (imm & 0xffff0000)
  419. EOP_MOVT(rd, imm);
  420. return;
  421. }
  422. #else
  423. for (i = 2, u = v; i > 0; i--, u >>= 8)
  424. while (u > 0xff && !(u & 3))
  425. u >>= 2;
  426. if (u) { // 3+ insns needed...
  427. if (op == A_OP_MVN)
  428. imm = ~imm;
  429. // ...emit literal load
  430. int idx, o;
  431. if (literal_iindex >= MAX_HOST_LITERALS) {
  432. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY,
  433. "pool overflow");
  434. exit(1);
  435. }
  436. idx = emith_pool_literal(imm, &o);
  437. literal_insn[literal_iindex++] = (u32 *)tcache_ptr;
  438. EOP_LDR_IMM2(cond, rd, PC, idx * sizeof(u32));
  439. if (o > 0)
  440. EOP_C_DOP_IMM(cond, A_OP_ADD, 0,rd,rd,0,o);
  441. else if (o < 0)
  442. EOP_C_DOP_IMM(cond, A_OP_SUB, 0,rd,rd,0,-o);
  443. return;
  444. }
  445. #endif
  446. break;
  447. case A_OP_AND:
  448. // AND must fit into 1 insn. if not, use BIC
  449. for (u = v; u > 0xff && !(u & 3); u >>= 2) ;
  450. if (u >> 8) {
  451. imm = ~imm;
  452. op = A_OP_BIC;
  453. ror2 = -1;
  454. }
  455. break;
  456. case A_OP_SUB:
  457. case A_OP_ADD:
  458. // swap ADD and SUB if more bits 1 than 0
  459. if (s == 0 && count_bits(imm) > 16) {
  460. imm = -imm;
  461. op ^= (A_OP_ADD^A_OP_SUB);
  462. ror2 = -1;
  463. }
  464. case A_OP_EOR:
  465. case A_OP_ORR:
  466. case A_OP_BIC:
  467. if (s == 0 && imm == 0 && rd == rn)
  468. return;
  469. break;
  470. }
  471. } while (ror2 < 0);
  472. do {
  473. // shift down to get 'best' rot2
  474. while (v > 0xff && !(v & 3))
  475. v >>= 2, ror2--;
  476. EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0xf, v & 0xff);
  477. switch (op) {
  478. case A_OP_MOV: op = A_OP_ORR; break;
  479. case A_OP_MVN: op = A_OP_BIC; break;
  480. case A_OP_ADC: op = A_OP_ADD; break;
  481. case A_OP_SBC: op = A_OP_SUB; break;
  482. }
  483. rn = rd;
  484. v >>= 8, ror2 -= 8/2;
  485. } while (v);
  486. }
  487. #define emith_op_imm(cond, s, op, r, imm) \
  488. emith_op_imm2(cond, s, op, r, r, imm)
  489. // test op
  490. #define emith_top_imm(cond, op, r, imm) do { \
  491. u32 ror2, v; \
  492. for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
  493. ror2--; \
  494. EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
  495. } while (0)
  496. #define is_offset_24(val) \
  497. ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
  498. static int emith_xbranch(int cond, void *target, int is_call)
  499. {
  500. int val = (u32 *)target - (u32 *)tcache_ptr - 2;
  501. int direct = is_offset_24(val);
  502. u32 *start_ptr = (u32 *)tcache_ptr;
  503. if (cond == A_COND_NV)
  504. return 0; // never taken
  505. if (direct)
  506. {
  507. EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target
  508. }
  509. else
  510. {
  511. #ifdef __EPOC32__
  512. // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
  513. if (is_call)
  514. EOP_ADD_IMM(LR,PC,0,8); // add lr,pc,#8
  515. EOP_C_AM2_IMM(cond,1,0,1,PC,PC,0); // ldrcc pc,[pc]
  516. EOP_MOV_REG_SIMPLE(PC,PC); // mov pc, pc
  517. EMIT((u32)target,M1(PC),0);
  518. #else
  519. // should never happen
  520. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %8p->%8p", target, tcache_ptr);
  521. exit(1);
  522. #endif
  523. }
  524. return (u32 *)tcache_ptr - start_ptr;
  525. }
  526. static void emith_pool_commit(int jumpover)
  527. {
  528. int i, sz = literal_pindex * sizeof(u32);
  529. u8 *pool = (u8 *)tcache_ptr;
  530. // nothing to commit if pool is empty
  531. if (sz == 0)
  532. return;
  533. // need branch over pool if not at block end
  534. if (jumpover) {
  535. pool += sizeof(u32);
  536. emith_xbranch(A_COND_AL, (u8 *)pool + sz, 0);
  537. }
  538. emith_flush();
  539. // safety check - pool must be after insns and reachable
  540. if ((u32)(pool - (u8 *)literal_insn[0] + 8) > 0xfff) {
  541. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY,
  542. "pool offset out of range");
  543. exit(1);
  544. }
  545. // copy pool and adjust addresses in insns accessing the pool
  546. memcpy(pool, literal_pool, sz);
  547. for (i = 0; i < literal_iindex; i++) {
  548. *literal_insn[i] += (u8 *)pool - ((u8 *)literal_insn[i] + 8);
  549. }
  550. // count pool constants as insns for statistics
  551. for (i = 0; i < literal_pindex; i++)
  552. COUNT_OP;
  553. tcache_ptr = (void *)((u8 *)pool + sz);
  554. literal_pindex = literal_iindex = 0;
  555. }
  556. static inline void emith_pool_check(void)
  557. {
  558. // check if pool must be committed
  559. if (literal_iindex > MAX_HOST_LITERALS-4 || (literal_pindex &&
  560. (u8 *)tcache_ptr - (u8 *)literal_insn[0] > 0xe00))
  561. // pool full, or displacement is approaching the limit
  562. emith_pool_commit(1);
  563. }
  564. static inline void emith_pool_adjust(int tcache_offs, int move_offs)
  565. {
  566. u32 *ptr = (u32 *)tcache_ptr - tcache_offs;
  567. int i;
  568. for (i = literal_iindex-1; i >= 0 && literal_insn[i] >= ptr; i--)
  569. if (literal_insn[i] == ptr)
  570. literal_insn[i] += move_offs;
  571. }
  572. #define EMITH_HINT_COND(cond) /**/
  573. #define JMP_POS(ptr) { \
  574. ptr = tcache_ptr; \
  575. EMIT(0,M1(PC),0); \
  576. }
  577. #define JMP_EMIT(cond, ptr) { \
  578. u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
  579. emith_flush(); /* NO insn swapping across jump targets */ \
  580. EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
  581. }
  582. #define EMITH_JMP_START(cond) { \
  583. void *cond_ptr; \
  584. JMP_POS(cond_ptr)
  585. #define EMITH_JMP_END(cond) \
  586. JMP_EMIT(cond, cond_ptr); \
  587. }
  588. // fake "simple" or "short" jump - using cond insns instead
  589. #define EMITH_NOTHING1(cond) \
  590. (void)(cond)
  591. #define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond)
  592. #define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond)
  593. #define EMITH_SJMP2_START(cond) EMITH_NOTHING1(cond)
  594. #define EMITH_SJMP2_MID(cond) EMITH_JMP_START((cond)^1) // inverse cond
  595. #define EMITH_SJMP2_END(cond) EMITH_JMP_END((cond)^1)
  596. #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
  597. #define EMITH_SJMP3_MID(cond) EMITH_NOTHING1(cond)
  598. #define EMITH_SJMP3_END()
  599. #define emith_move_r_r_c(cond, d, s) \
  600. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,0)
  601. #define emith_move_r_r(d, s) \
  602. emith_move_r_r_c(A_COND_AL, d, s)
  603. #define emith_move_r_r_ptr_c(cond, d, s) \
  604. emith_move_r_r_c(cond, d, s)
  605. #define emith_move_r_r_ptr(d, s) \
  606. emith_move_r_r(d, s)
  607. #define emith_mvn_r_r(d, s) \
  608. EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
  609. #define emith_add_r_r_r_lsl(d, s1, s2, lslimm) \
  610. EOP_ADD_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  611. #define emith_add_r_r_r_lsl_ptr(d, s1, s2, lslimm) \
  612. emith_add_r_r_r_lsl(d, s1, s2, lslimm)
  613. #define emith_adc_r_r_r_lsl(d, s1, s2, lslimm) \
  614. EOP_ADC_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  615. #define emith_addf_r_r_r_lsl(d, s1, s2, lslimm) \
  616. EOP_ADD_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  617. #define emith_addf_r_r_r_lsr(d, s1, s2, lslimm) \
  618. EOP_ADD_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSR,lslimm)
  619. #define emith_adcf_r_r_r_lsl(d, s1, s2, lslimm) \
  620. EOP_ADC_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  621. #define emith_sub_r_r_r_lsl(d, s1, s2, lslimm) \
  622. EOP_SUB_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  623. #define emith_sbc_r_r_r_lsl(d, s1, s2, lslimm) \
  624. EOP_SBC_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  625. #define emith_subf_r_r_r_lsl(d, s1, s2, lslimm) \
  626. EOP_SUB_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  627. #define emith_sbcf_r_r_r_lsl(d, s1, s2, lslimm) \
  628. EOP_SBC_REG(A_COND_AL,1,d,s1,s2,A_AM1_LSL,lslimm)
  629. #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
  630. EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  631. #define emith_or_r_r_r_lsr(d, s1, s2, lsrimm) \
  632. EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
  633. #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
  634. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  635. #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
  636. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
  637. #define emith_and_r_r_r_lsl(d, s1, s2, lslimm) \
  638. EOP_AND_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  639. #define emith_or_r_r_lsl(d, s, lslimm) \
  640. emith_or_r_r_r_lsl(d, d, s, lslimm)
  641. #define emith_or_r_r_lsr(d, s, lsrimm) \
  642. emith_or_r_r_r_lsr(d, d, s, lsrimm)
  643. #define emith_eor_r_r_lsl(d, s, lslimm) \
  644. emith_eor_r_r_r_lsl(d, d, s, lslimm)
  645. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  646. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  647. #define emith_add_r_r_r(d, s1, s2) \
  648. emith_add_r_r_r_lsl(d, s1, s2, 0)
  649. #define emith_adc_r_r_r(d, s1, s2) \
  650. emith_adc_r_r_r_lsl(d, s1, s2, 0)
  651. #define emith_addf_r_r_r(d, s1, s2) \
  652. emith_addf_r_r_r_lsl(d, s1, s2, 0)
  653. #define emith_adcf_r_r_r(d, s1, s2) \
  654. emith_adcf_r_r_r_lsl(d, s1, s2, 0)
  655. #define emith_sub_r_r_r(d, s1, s2) \
  656. emith_sub_r_r_r_lsl(d, s1, s2, 0)
  657. #define emith_sbc_r_r_r(d, s1, s2) \
  658. emith_sbc_r_r_r_lsl(d, s1, s2, 0)
  659. #define emith_subf_r_r_r(d, s1, s2) \
  660. emith_subf_r_r_r_lsl(d, s1, s2, 0)
  661. #define emith_sbcf_r_r_r(d, s1, s2) \
  662. emith_sbcf_r_r_r_lsl(d, s1, s2, 0)
  663. #define emith_or_r_r_r(d, s1, s2) \
  664. emith_or_r_r_r_lsl(d, s1, s2, 0)
  665. #define emith_eor_r_r_r(d, s1, s2) \
  666. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  667. #define emith_and_r_r_r(d, s1, s2) \
  668. emith_and_r_r_r_lsl(d, s1, s2, 0)
  669. #define emith_add_r_r(d, s) \
  670. emith_add_r_r_r(d, d, s)
  671. #define emith_add_r_r_ptr(d, s) \
  672. emith_add_r_r_r(d, d, s)
  673. #define emith_adc_r_r(d, s) \
  674. emith_adc_r_r_r(d, d, s)
  675. #define emith_sub_r_r(d, s) \
  676. emith_sub_r_r_r(d, d, s)
  677. #define emith_sbc_r_r(d, s) \
  678. emith_sbc_r_r_r(d, d, s)
  679. #define emith_negc_r_r(d, s) \
  680. EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,0,s,d,0,0)
  681. #define emith_and_r_r_c(cond, d, s) \
  682. EOP_AND_REG(cond,0,d,d,s,A_AM1_LSL,0)
  683. #define emith_and_r_r(d, s) \
  684. EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  685. #define emith_or_r_r(d, s) \
  686. emith_or_r_r_r(d, d, s)
  687. #define emith_eor_r_r(d, s) \
  688. emith_eor_r_r_r(d, d, s)
  689. #define emith_tst_r_r(d, s) \
  690. EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  691. #define emith_tst_r_r_ptr(d, s) \
  692. emith_tst_r_r(d, s)
  693. #define emith_teq_r_r(d, s) \
  694. EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  695. #define emith_cmp_r_r(d, s) \
  696. EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  697. #define emith_addf_r_r(d, s) \
  698. EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  699. #define emith_subf_r_r(d, s) \
  700. EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  701. #define emith_adcf_r_r(d, s) \
  702. EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  703. #define emith_sbcf_r_r(d, s) \
  704. EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  705. #define emith_eorf_r_r(d, s) \
  706. EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  707. #define emith_move_r_imm(r, imm) \
  708. emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
  709. #define emith_move_r_ptr_imm(r, imm) \
  710. emith_move_r_imm(r, (u32)(imm))
  711. #define emith_add_r_imm(r, imm) \
  712. emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
  713. #define emith_adc_r_imm(r, imm) \
  714. emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
  715. #define emith_adcf_r_imm(r, imm) \
  716. emith_op_imm(A_COND_AL, 1, A_OP_ADC, r, imm)
  717. #define emith_sub_r_imm(r, imm) \
  718. emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
  719. #define emith_bic_r_imm(r, imm) \
  720. emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
  721. #define emith_and_r_imm(r, imm) \
  722. emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
  723. #define emith_or_r_imm(r, imm) \
  724. emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
  725. #define emith_eor_r_imm(r, imm) \
  726. emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
  727. #define emith_eor_r_imm_ptr(r, imm) \
  728. emith_eor_r_imm(r, imm)
  729. // note: only use 8bit imm for these
  730. #define emith_tst_r_imm(r, imm) \
  731. emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
  732. #define emith_cmp_r_imm(r, imm) do { \
  733. u32 op_ = A_OP_CMP, imm_ = (u8)imm; \
  734. if ((s8)imm_ < 0) { \
  735. imm_ = (u8)-imm_; \
  736. op_ = A_OP_CMN; \
  737. } \
  738. emith_top_imm(A_COND_AL, op_, r, imm_); \
  739. } while (0)
  740. #define emith_subf_r_imm(r, imm) \
  741. emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
  742. #define emith_move_r_imm_c(cond, r, imm) \
  743. emith_op_imm(cond, 0, A_OP_MOV, r, imm)
  744. #define emith_add_r_imm_c(cond, r, imm) \
  745. emith_op_imm(cond, 0, A_OP_ADD, r, imm)
  746. #define emith_sub_r_imm_c(cond, r, imm) \
  747. emith_op_imm(cond, 0, A_OP_SUB, r, imm)
  748. #define emith_or_r_imm_c(cond, r, imm) \
  749. emith_op_imm(cond, 0, A_OP_ORR, r, imm)
  750. #define emith_eor_r_imm_c(cond, r, imm) \
  751. emith_op_imm(cond, 0, A_OP_EOR, r, imm)
  752. #define emith_eor_r_imm_ptr_c(cond, r, imm) \
  753. emith_eor_r_imm_c(cond, r, imm)
  754. #define emith_bic_r_imm_c(cond, r, imm) \
  755. emith_op_imm(cond, 0, A_OP_BIC, r, imm)
  756. #define emith_tst_r_imm_c(cond, r, imm) \
  757. emith_top_imm(cond, A_OP_TST, r, imm)
  758. #define emith_move_r_imm_s8_patchable(r, imm) do { \
  759. emith_flush(); /* pin insn at current tcache_ptr for patching */ \
  760. if ((s8)(imm) < 0) \
  761. EOP_MVN_IMM(r, 0, (u8)~(imm)); \
  762. else \
  763. EOP_MOV_IMM(r, 0, (u8)(imm)); \
  764. } while (0)
  765. #define emith_move_r_imm_s8_patch(ptr, imm) do { \
  766. u32 *ptr_ = (u32 *)ptr; u32 op_ = *ptr_ & 0xfe1ff000; \
  767. if ((s8)(imm) < 0) \
  768. EMIT_PTR(ptr_, op_ | (A_OP_MVN<<21) | (u8)~(imm));\
  769. else \
  770. EMIT_PTR(ptr_, op_ | (A_OP_MOV<<21) | (u8)(imm));\
  771. } while (0)
  772. #define emith_and_r_r_imm(d, s, imm) \
  773. emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
  774. #define emith_add_r_r_imm(d, s, imm) \
  775. emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
  776. #define emith_add_r_r_ptr_imm(d, s, imm) \
  777. emith_add_r_r_imm(d, s, imm)
  778. #define emith_sub_r_r_imm_c(cond, d, s, imm) \
  779. emith_op_imm2(cond, 0, A_OP_SUB, d, s, (imm))
  780. #define emith_sub_r_r_imm(d, s, imm) \
  781. emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
  782. #define emith_subf_r_r_imm(d, s, imm) \
  783. emith_op_imm2(A_COND_AL, 1, A_OP_SUB, d, s, imm)
  784. #define emith_or_r_r_imm(d, s, imm) \
  785. emith_op_imm2(A_COND_AL, 0, A_OP_ORR, d, s, imm)
  786. #define emith_eor_r_r_imm(d, s, imm) \
  787. emith_op_imm2(A_COND_AL, 0, A_OP_EOR, d, s, imm)
  788. #define emith_neg_r_r(d, s) \
  789. EOP_RSB_IMM(d, s, 0, 0)
  790. #define emith_lsl(d, s, cnt) \
  791. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
  792. #define emith_lsr(d, s, cnt) \
  793. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
  794. #define emith_asr(d, s, cnt) \
  795. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
  796. #define emith_ror_c(cond, d, s, cnt) \
  797. EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
  798. #define emith_ror(d, s, cnt) \
  799. emith_ror_c(A_COND_AL, d, s, cnt)
  800. #define emith_rol(d, s, cnt) \
  801. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
  802. #define emith_lslf(d, s, cnt) \
  803. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
  804. #define emith_lsrf(d, s, cnt) \
  805. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
  806. #define emith_asrf(d, s, cnt) \
  807. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
  808. // note: only C flag updated correctly
  809. #define emith_rolf(d, s, cnt) do { \
  810. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
  811. /* we don't have ROL so we shift to get the right carry */ \
  812. EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
  813. } while (0)
  814. #define emith_rorf(d, s, cnt) \
  815. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
  816. #define emith_rolcf(d) \
  817. emith_adcf_r_r(d, d)
  818. #define emith_rolc(d) \
  819. emith_adc_r_r(d, d)
  820. #define emith_rorcf(d) \
  821. EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
  822. #define emith_rorc(d) \
  823. EOP_MOV_REG(A_COND_AL,0,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
  824. #define emith_negcf_r_r(d, s) \
  825. EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
  826. #define emith_mul(d, s1, s2) do { \
  827. if ((d) != (s1)) /* rd != rm limitation */ \
  828. EOP_MUL(d, s1, s2); \
  829. else \
  830. EOP_MUL(d, s2, s1); \
  831. } while (0)
  832. #define emith_mul_u64(dlo, dhi, s1, s2) \
  833. EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  834. #define emith_mul_s64(dlo, dhi, s1, s2) \
  835. EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  836. #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
  837. EOP_C_SMLAL(cond,0,dhi,dlo,s1,s2)
  838. #define emith_mula_s64(dlo, dhi, s1, s2) \
  839. EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
  840. // misc
  841. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  842. EOP_LDR_IMM2(cond, r, rs, offs)
  843. #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
  844. emith_read_r_r_offs_c(cond, r, rs, offs)
  845. #define emith_read_r_r_r_c(cond, r, rs, rm) \
  846. EOP_LDR_REG_LSL(cond, r, rs, rm, 0)
  847. #define emith_read_r_r_offs(r, rs, offs) \
  848. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  849. #define emith_read_r_r_offs_ptr(r, rs, offs) \
  850. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  851. #define emith_read_r_r_r(r, rs, rm) \
  852. EOP_LDR_REG_LSL(A_COND_AL, r, rs, rm, 0)
  853. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  854. EOP_LDRB_IMM2(cond, r, rs, offs)
  855. #define emith_read8_r_r_r_c(cond, r, rs, rm) \
  856. EOP_LDRB_REG_LSL(cond, r, rs, rm, 0)
  857. #define emith_read8_r_r_offs(r, rs, offs) \
  858. emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
  859. #define emith_read8_r_r_r(r, rs, rm) \
  860. emith_read8_r_r_r_c(A_COND_AL, r, rs, rm)
  861. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  862. EOP_LDRH_IMM2(cond, r, rs, offs)
  863. #define emith_read16_r_r_r_c(cond, r, rs, rm) \
  864. EOP_LDRH_REG2(cond, r, rs, rm)
  865. #define emith_read16_r_r_offs(r, rs, offs) \
  866. emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
  867. #define emith_read16_r_r_r(r, rs, rm) \
  868. emith_read16_r_r_r_c(A_COND_AL, r, rs, rm)
  869. #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
  870. EOP_LDRSB_IMM2(cond, r, rs, offs)
  871. #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
  872. EOP_LDRSB_REG2(cond, r, rs, rm)
  873. #define emith_read8s_r_r_offs(r, rs, offs) \
  874. emith_read8s_r_r_offs_c(A_COND_AL, r, rs, offs)
  875. #define emith_read8s_r_r_r(r, rs, rm) \
  876. emith_read8s_r_r_r_c(A_COND_AL, r, rs, rm)
  877. #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
  878. EOP_LDRSH_IMM2(cond, r, rs, offs)
  879. #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
  880. EOP_LDRSH_REG2(cond, r, rs, rm)
  881. #define emith_read16s_r_r_offs(r, rs, offs) \
  882. emith_read16s_r_r_offs_c(A_COND_AL, r, rs, offs)
  883. #define emith_read16s_r_r_r(r, rs, rm) \
  884. emith_read16s_r_r_r_c(A_COND_AL, r, rs, rm)
  885. #define emith_write_r_r_offs_c(cond, r, rs, offs) \
  886. EOP_STR_IMM2(cond, r, rs, offs)
  887. #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
  888. emith_write_r_r_offs_c(cond, r, rs, offs)
  889. #define emith_write_r_r_offs(r, rs, offs) \
  890. emith_write_r_r_offs_c(A_COND_AL, r, rs, offs)
  891. #define emith_write_r_r_offs_ptr(r, rs, offs) \
  892. emith_write_r_r_offs_c(A_COND_AL, r, rs, offs)
  893. #define emith_ctx_read_c(cond, r, offs) \
  894. emith_read_r_r_offs_c(cond, r, CONTEXT_REG, offs)
  895. #define emith_ctx_read(r, offs) \
  896. emith_ctx_read_c(A_COND_AL, r, offs)
  897. #define emith_ctx_read_ptr(r, offs) \
  898. emith_ctx_read(r, offs)
  899. #define emith_ctx_write(r, offs) \
  900. EOP_STR_IMM(r, CONTEXT_REG, offs)
  901. #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
  902. int v_, r_ = r, c_ = count, b_ = CONTEXT_REG; \
  903. for (v_ = 0; c_; c_--, r_++) \
  904. v_ |= M1(r_); \
  905. if ((offs) != 0) { \
  906. EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
  907. b_ = tmpr; \
  908. } \
  909. op(b_,v_); \
  910. } while (0)
  911. #define emith_ctx_read_multiple(r, offs, count, tmpr) \
  912. emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
  913. #define emith_ctx_write_multiple(r, offs, count, tmpr) \
  914. emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
  915. #define emith_clear_msb_c(cond, d, s, count) do { \
  916. u32 t; \
  917. if ((count) <= 8) { \
  918. t = 8 - (count); \
  919. t = (0xff << t) & 0xff; \
  920. EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
  921. } else if ((count) >= 24) { \
  922. t = (count) - 24; \
  923. t = 0xff >> t; \
  924. EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
  925. } else { \
  926. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
  927. EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
  928. } \
  929. } while (0)
  930. #define emith_clear_msb(d, s, count) \
  931. emith_clear_msb_c(A_COND_AL, d, s, count)
  932. #define emith_sext(d, s, bits) do { \
  933. EOP_MOV_REG_LSL(d,s,32 - (bits)); \
  934. EOP_MOV_REG_ASR(d,d,32 - (bits)); \
  935. } while (0)
  936. #define emith_uext_ptr(r) /**/
  937. #define emith_do_caller_regs(mask, func) do { \
  938. u32 _reg_mask = (mask) & 0x500f; \
  939. if (_reg_mask) { \
  940. if (__builtin_parity(_reg_mask) == 1) \
  941. _reg_mask |= 0x10; /* eabi align */ \
  942. func(_reg_mask); \
  943. } \
  944. } while (0)
  945. #define emith_save_caller_regs(mask) \
  946. emith_do_caller_regs(mask, EOP_STMFD_SP)
  947. #define emith_restore_caller_regs(mask) \
  948. emith_do_caller_regs(mask, EOP_LDMFD_SP)
  949. // upto 4 args
  950. #define emith_pass_arg_r(arg, reg) \
  951. EOP_MOV_REG_SIMPLE(arg, reg)
  952. #define emith_pass_arg_imm(arg, imm) \
  953. emith_move_r_imm(arg, imm)
  954. #define emith_jump(target) \
  955. emith_jump_cond(A_COND_AL, target)
  956. #define emith_jump_patchable(target) \
  957. emith_jump(target)
  958. #define emith_jump_cond(cond, target) \
  959. emith_xbranch(cond, target, 0)
  960. #define emith_jump_cond_inrange(target) !0
  961. #define emith_jump_cond_patchable(cond, target) \
  962. emith_jump_cond(cond, target)
  963. #define emith_jump_patch(ptr, target, pos) do { \
  964. u32 *ptr_ = (u32 *)ptr; \
  965. u32 val_ = (u32 *)(target) - ptr_ - 2; \
  966. *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
  967. if ((void *)(pos) != NULL) *(u8 **)(pos) = (u8 *)ptr; \
  968. } while (0)
  969. #define emith_jump_patch_inrange(ptr, target) !0
  970. #define emith_jump_patch_size() 4
  971. #define emith_jump_at(ptr, target) do { \
  972. u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
  973. EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
  974. } while (0)
  975. #define emith_jump_at_size() 4
  976. #define emith_jump_reg_c(cond, r) \
  977. EOP_C_BX(cond, r)
  978. #define emith_jump_reg(r) \
  979. emith_jump_reg_c(A_COND_AL, r)
  980. #define emith_jump_ctx_c(cond, offs) \
  981. EOP_LDR_IMM2(cond,PC,CONTEXT_REG,offs)
  982. #define emith_jump_ctx(offs) \
  983. emith_jump_ctx_c(A_COND_AL, offs)
  984. #define emith_call_cond(cond, target) \
  985. emith_xbranch(cond, target, 1)
  986. #define emith_call(target) \
  987. emith_call_cond(A_COND_AL, target)
  988. #define emith_call_reg(r) do { \
  989. emith_move_r_r(LR, PC); \
  990. EOP_C_BX(A_COND_AL, r); \
  991. } while (0)
  992. #define emith_call_ctx(offs) do { \
  993. emith_move_r_r(LR, PC); \
  994. emith_jump_ctx(offs); \
  995. } while (0)
  996. #define emith_abijump_reg(r) \
  997. emith_jump_reg(r)
  998. #define emith_abijump_reg_c(cond, r) \
  999. emith_jump_reg_c(cond, r)
  1000. #define emith_abicall(target) \
  1001. emith_call(target)
  1002. #define emith_abicall_cond(cond, target) \
  1003. emith_call_cond(cond, target)
  1004. #define emith_abicall_reg(r) \
  1005. emith_call_reg(r)
  1006. #define emith_call_cleanup() /**/
  1007. #define emith_ret_c(cond) \
  1008. emith_jump_reg_c(cond, LR)
  1009. #define emith_ret() \
  1010. emith_ret_c(A_COND_AL)
  1011. #define emith_ret_to_ctx(offs) \
  1012. emith_ctx_write(LR, offs)
  1013. #define emith_add_r_ret(r) \
  1014. emith_add_r_r_ptr(r, LR)
  1015. /* pushes r12 for eabi alignment */
  1016. #define emith_push_ret(r) do { \
  1017. int r_ = (r >= 0 ? r : 12); \
  1018. EOP_STMFD_SP(M2(r_,LR)); \
  1019. } while (0)
  1020. #define emith_pop_and_ret(r) do { \
  1021. int r_ = (r >= 0 ? r : 12); \
  1022. EOP_LDMFD_SP(M2(r_,PC)); \
  1023. } while (0)
  1024. #define host_instructions_updated(base, end, force) \
  1025. do { if (force) emith_update_add(base, end); } while (0)
  1026. #define host_arg2reg(rd, arg) \
  1027. rd = arg
  1028. #define emith_rw_offs_max() 0x1ff // minimum of offset in AM2 and AM3
  1029. /* SH2 drc specific */
  1030. /* pushes r12 for eabi alignment */
  1031. #define emith_sh2_drc_entry() \
  1032. EOP_STMFD_SP(M10(4,5,6,7,8,9,10,11,12,LR))
  1033. #define emith_sh2_drc_exit() \
  1034. EOP_LDMFD_SP(M10(4,5,6,7,8,9,10,11,12,PC))
  1035. // assumes a is in arg0, tab, func and mask are temp
  1036. #define emith_sh2_rcall(a, tab, func, mask) do { \
  1037. emith_lsr(mask, a, SH2_READ_SHIFT); \
  1038. EOP_ADD_REG_LSL(tab, tab, mask, 3); \
  1039. if (func < mask) EOP_LDMIA(tab, M2(func,mask)); /* ldm if possible */ \
  1040. else { emith_read_r_r_offs(func, tab, 0); \
  1041. emith_read_r_r_offs(mask, tab, 4); } \
  1042. emith_addf_r_r_r(func,func,func); \
  1043. } while (0)
  1044. // assumes a, val are in arg0 and arg1, tab and func are temp
  1045. #define emith_sh2_wcall(a, val, tab, func) do { \
  1046. emith_lsr(func, a, SH2_WRITE_SHIFT); \
  1047. EOP_LDR_REG_LSL(A_COND_AL,func,tab,func,2); \
  1048. emith_move_r_r(2, CONTEXT_REG); /* arg2 */ \
  1049. emith_jump_reg(func); \
  1050. } while (0)
  1051. #define emith_sh2_dtbf_loop() do { \
  1052. int cr, rn; \
  1053. int tmp_ = rcache_get_tmp(); \
  1054. cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
  1055. rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \
  1056. emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \
  1057. emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \
  1058. emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
  1059. cycles = 0; \
  1060. emith_asrf(tmp_, cr, 2+12); /* movs tmp_, cr, asr #2+12 */\
  1061. EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0); /* movmi tmp_, #0 */ \
  1062. emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \
  1063. emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \
  1064. emith_subf_r_r(rn, tmp_); /* subs rn, tmp_ */ \
  1065. EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0); /* rsbls tmp_, rn, #0 */ \
  1066. EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
  1067. EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \
  1068. EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \
  1069. rcache_free_tmp(tmp_); \
  1070. } while (0)
  1071. #define emith_sh2_delay_loop(cycles, reg) do { \
  1072. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
  1073. int t1 = rcache_get_tmp(); \
  1074. int t2 = rcache_get_tmp(); \
  1075. int t3 = rcache_get_tmp(); \
  1076. /* if (sr < 0) return */ \
  1077. emith_asrf(t2, sr, 12); \
  1078. EMITH_JMP_START(DCOND_LE); \
  1079. /* turns = sr.cycles / cycles */ \
  1080. emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
  1081. emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
  1082. rcache_free_tmp(t3); \
  1083. if (reg >= 0) { \
  1084. /* if (reg <= turns) turns = reg-1 */ \
  1085. t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
  1086. emith_cmp_r_r(t3, t2); \
  1087. emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
  1088. /* if (reg <= 1) turns = 0 */ \
  1089. emith_cmp_r_imm(t3, 1); \
  1090. emith_move_r_imm_c(DCOND_LS, t2, 0); \
  1091. /* reg -= turns */ \
  1092. emith_sub_r_r(t3, t2); \
  1093. } \
  1094. /* sr.cycles -= turns * cycles; */ \
  1095. emith_move_r_imm(t1, cycles); \
  1096. emith_mul(t1, t2, t1); \
  1097. emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
  1098. EMITH_JMP_END(DCOND_LE); \
  1099. rcache_free_tmp(t1); \
  1100. rcache_free_tmp(t2); \
  1101. } while (0)
  1102. #define emith_write_sr(sr, srcr) do { \
  1103. emith_lsr(sr, sr, 10); \
  1104. emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
  1105. emith_ror(sr, sr, 22); \
  1106. } while (0)
  1107. #define emith_carry_to_t(srr, is_sub) do { \
  1108. if (is_sub) { /* has inverted C on ARM */ \
  1109. emith_or_r_imm_c(A_COND_CC, srr, 1); \
  1110. emith_bic_r_imm_c(A_COND_CS, srr, 1); \
  1111. } else { \
  1112. emith_or_r_imm_c(A_COND_CS, srr, 1); \
  1113. emith_bic_r_imm_c(A_COND_CC, srr, 1); \
  1114. } \
  1115. } while (0)
  1116. #define emith_t_to_carry(srr, is_sub) do { \
  1117. if (is_sub) { \
  1118. int t_ = rcache_get_tmp(); \
  1119. emith_eor_r_r_imm(t_, srr, 1); \
  1120. emith_rorf(t_, t_, 1); \
  1121. rcache_free_tmp(t_); \
  1122. } else { \
  1123. emith_rorf(srr, srr, 1); \
  1124. emith_rol(srr, srr, 1); \
  1125. } \
  1126. } while (0)
  1127. #define emith_tpop_carry(sr, is_sub) do { \
  1128. if (is_sub) \
  1129. emith_eor_r_imm(sr, 1); \
  1130. emith_lsrf(sr, sr, 1); \
  1131. } while (0)
  1132. #define emith_tpush_carry(sr, is_sub) do { \
  1133. emith_adc_r_r(sr, sr); \
  1134. if (is_sub) \
  1135. emith_eor_r_imm(sr, 1); \
  1136. } while (0)
  1137. /*
  1138. * T = carry(Rn = (Rn << 1) | T)
  1139. * if Q
  1140. * T ^= !carry(Rn += Rm)
  1141. * else
  1142. * T ^= !carry(Rn -= Rm)
  1143. */
  1144. #define emith_sh2_div1_step(rn, rm, sr) do { \
  1145. void *jmp0, *jmp1; \
  1146. emith_tpop_carry(sr, 0); /* Rn = 2*Rn+T */\
  1147. emith_adcf_r_r_r(rn, rn, rn); \
  1148. emith_tpush_carry(sr, 0); \
  1149. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  1150. JMP_POS(jmp0); /* beq do_sub */ \
  1151. emith_addf_r_r(rn, rm); /* Rn += Rm */ \
  1152. emith_eor_r_imm_c(A_COND_CC, sr, T); \
  1153. JMP_POS(jmp1); /* b done */ \
  1154. JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */ \
  1155. emith_subf_r_r(rn, rm); /* Rn -= Rm */ \
  1156. emith_eor_r_imm_c(A_COND_CS, sr, T); \
  1157. JMP_EMIT(A_COND_AL, jmp1); /* done: */ \
  1158. } while (0)
  1159. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1160. #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
  1161. emith_tst_r_imm(sr, S); \
  1162. EMITH_SJMP2_START(DCOND_NE); \
  1163. emith_mula_s64_c(DCOND_EQ, ml, mh, rn, rm); \
  1164. EMITH_SJMP2_MID(DCOND_NE); \
  1165. /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
  1166. emith_sext(mh, mh, 16); \
  1167. emith_mula_s64(ml, mh, rn, rm); \
  1168. /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
  1169. /* to check: add MACH[15] to MACH[31:16]. this is 0 if no overflow */ \
  1170. emith_asrf(rn, mh, 16); /* sum = (MACH>>16) + ((MACH>>15)&1) */ \
  1171. emith_adcf_r_imm(rn, 0); /* (MACH>>15) is in carry after shift */ \
  1172. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
  1173. emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
  1174. emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
  1175. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> +ovl */ \
  1176. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0xffffffff */ \
  1177. emith_sub_r_imm_c(DCOND_GT, mh, 1); /* 0x00007fff */ \
  1178. EMITH_SJMP_END(DCOND_LE); \
  1179. EMITH_SJMP_END(DCOND_EQ); \
  1180. EMITH_SJMP2_END(DCOND_NE); \
  1181. } while (0)
  1182. /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
  1183. #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
  1184. emith_tst_r_imm(sr, S); \
  1185. EMITH_SJMP2_START(DCOND_NE); \
  1186. emith_mula_s64_c(DCOND_EQ, ml, mh, rn, rm); \
  1187. EMITH_SJMP2_MID(DCOND_NE); \
  1188. /* XXX: MACH should be untouched when S is set? */ \
  1189. emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
  1190. emith_mula_s64(ml, mh, rn, rm); \
  1191. /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
  1192. /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
  1193. emith_addf_r_r_r_lsr(mh, mh, ml, 31); /* sum = MACH + ((MACL>>31)&1) */\
  1194. EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
  1195. /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
  1196. emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
  1197. emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
  1198. EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> positive ovrfl */ \
  1199. emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0x7fffffff */ \
  1200. EMITH_SJMP_END(DCOND_LE); \
  1201. EMITH_SJMP_END(DCOND_EQ); \
  1202. EMITH_SJMP2_END(DCOND_NE); \
  1203. } while (0)
  1204. #ifdef T
  1205. // T bit handling
  1206. static int tcond = -1;
  1207. #define emith_invert_cond(cond) \
  1208. ((cond) ^ 1)
  1209. #define emith_clr_t_cond(sr) \
  1210. (void)sr
  1211. #define emith_set_t_cond(sr, cond) \
  1212. tcond = cond
  1213. #define emith_get_t_cond() \
  1214. tcond
  1215. #define emith_invalidate_t() \
  1216. tcond = -1
  1217. #define emith_set_t(sr, val) \
  1218. tcond = ((val) ? A_COND_AL: A_COND_NV)
  1219. static void emith_sync_t(int sr)
  1220. {
  1221. if (tcond == A_COND_AL)
  1222. emith_or_r_imm(sr, T);
  1223. else if (tcond == A_COND_NV)
  1224. emith_bic_r_imm(sr, T);
  1225. else if (tcond >= 0) {
  1226. emith_bic_r_imm_c(emith_invert_cond(tcond),sr, T);
  1227. emith_or_r_imm_c(tcond, sr, T);
  1228. }
  1229. tcond = -1;
  1230. }
  1231. static int emith_tst_t(int sr, int tf)
  1232. {
  1233. if (tcond < 0) {
  1234. emith_tst_r_imm(sr, T);
  1235. return tf ? DCOND_NE: DCOND_EQ;
  1236. } else if (tcond >= A_COND_AL) {
  1237. // MUST sync because A_COND_NV isn't a real condition
  1238. emith_sync_t(sr);
  1239. emith_tst_r_imm(sr, T);
  1240. return tf ? DCOND_NE: DCOND_EQ;
  1241. } else
  1242. return tf ? tcond : emith_invert_cond(tcond);
  1243. }
  1244. #endif