emit_arm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. /*
  2. * Basic macros to emit ARM instructions and some utils
  3. * Copyright (C) 2008,2009,2010 notaz
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. #define CONTEXT_REG 11
  9. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  10. #define EMIT_PTR(ptr, x) \
  11. do { \
  12. *(u32 *)ptr = x; \
  13. ptr = (void *)((u8 *)ptr + sizeof(u32)); \
  14. COUNT_OP; \
  15. } while (0)
  16. #define EMIT(x) EMIT_PTR(tcache_ptr, x)
  17. #define A_R4M (1 << 4)
  18. #define A_R5M (1 << 5)
  19. #define A_R6M (1 << 6)
  20. #define A_R7M (1 << 7)
  21. #define A_R8M (1 << 8)
  22. #define A_R9M (1 << 9)
  23. #define A_R10M (1 << 10)
  24. #define A_R11M (1 << 11)
  25. #define A_R14M (1 << 14)
  26. #define A_R15M (1 << 15)
  27. #define A_COND_AL 0xe
  28. #define A_COND_EQ 0x0
  29. #define A_COND_NE 0x1
  30. #define A_COND_HS 0x2
  31. #define A_COND_LO 0x3
  32. #define A_COND_MI 0x4
  33. #define A_COND_PL 0x5
  34. #define A_COND_VS 0x6
  35. #define A_COND_VC 0x7
  36. #define A_COND_HI 0x8
  37. #define A_COND_LS 0x9
  38. #define A_COND_GE 0xa
  39. #define A_COND_LT 0xb
  40. #define A_COND_GT 0xc
  41. #define A_COND_LE 0xd
  42. #define A_COND_CS A_COND_HS
  43. #define A_COND_CC A_COND_LO
  44. /* unified conditions */
  45. #define DCOND_EQ A_COND_EQ
  46. #define DCOND_NE A_COND_NE
  47. #define DCOND_MI A_COND_MI
  48. #define DCOND_PL A_COND_PL
  49. #define DCOND_HI A_COND_HI
  50. #define DCOND_HS A_COND_HS
  51. #define DCOND_LO A_COND_LO
  52. #define DCOND_GE A_COND_GE
  53. #define DCOND_GT A_COND_GT
  54. #define DCOND_LT A_COND_LT
  55. #define DCOND_LS A_COND_LS
  56. #define DCOND_LE A_COND_LE
  57. #define DCOND_VS A_COND_VS
  58. #define DCOND_VC A_COND_VC
  59. /* addressing mode 1 */
  60. #define A_AM1_LSL 0
  61. #define A_AM1_LSR 1
  62. #define A_AM1_ASR 2
  63. #define A_AM1_ROR 3
  64. #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000)
  65. #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
  66. #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
  67. /* data processing op */
  68. #define A_OP_AND 0x0
  69. #define A_OP_EOR 0x1
  70. #define A_OP_SUB 0x2
  71. #define A_OP_RSB 0x3
  72. #define A_OP_ADD 0x4
  73. #define A_OP_ADC 0x5
  74. #define A_OP_SBC 0x6
  75. #define A_OP_RSC 0x7
  76. #define A_OP_TST 0x8
  77. #define A_OP_TEQ 0x9
  78. #define A_OP_CMP 0xa
  79. #define A_OP_CMN 0xa
  80. #define A_OP_ORR 0xc
  81. #define A_OP_MOV 0xd
  82. #define A_OP_BIC 0xe
  83. #define A_OP_MVN 0xf
  84. #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
  85. EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
  86. #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
  87. #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
  88. #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm))
  89. #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
  90. #define EOP_MVN_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
  91. #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
  92. #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
  93. #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
  94. #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
  95. #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
  96. #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
  97. #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
  98. #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
  99. #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
  100. #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
  101. #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
  102. #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
  103. #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
  104. #define EOP_MVN_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
  105. #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
  106. #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
  107. #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
  108. #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
  109. #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
  110. #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
  111. #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
  112. #define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
  113. #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
  114. #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
  115. #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
  116. #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
  117. #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
  118. #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
  119. #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
  120. #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
  121. #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
  122. #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
  123. #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  124. #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  125. #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  126. #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
  127. #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
  128. #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  129. #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  130. #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  131. #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm)
  132. #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs)
  133. #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs)
  134. #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  135. #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  136. /* addressing mode 2 */
  137. #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
  138. EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
  139. #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
  140. EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  141. ((shift_imm)<<7) | ((shift_op)<<5) | (rm))
  142. /* addressing mode 3 */
  143. #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
  144. EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  145. ((s)<<6) | ((h)<<5) | (immed_reg))
  146. #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
  147. #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
  148. /* ldr and str */
  149. #define EOP_LDR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,0,1,rn,rd,offset_12)
  150. #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,1,1,rn,rd,offset_12)
  151. #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
  152. #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
  153. #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
  154. #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
  155. #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
  156. #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
  157. #define EOP_LDRH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,1,1,rn,rd,0,1,offset_8)
  158. #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
  159. #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
  160. #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
  161. #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
  162. #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
  163. #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
  164. /* ldm and stm */
  165. #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
  166. EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
  167. #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
  168. #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
  169. #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
  170. #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
  171. /* branches */
  172. #define EOP_C_BX(cond,rm) \
  173. EMIT(((cond)<<28) | 0x012fff10 | (rm))
  174. #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
  175. EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
  176. #define EOP_C_B(cond,l,signed_immed_24) \
  177. EOP_C_B_PTR(tcache_ptr,cond,l,signed_immed_24)
  178. #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
  179. #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
  180. /* misc */
  181. #define EOP_C_MUL(cond,s,rd,rs,rm) \
  182. EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
  183. #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
  184. EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
  185. #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
  186. EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
  187. #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
  188. EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
  189. #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
  190. #define EOP_C_MRS(cond,rd) \
  191. EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
  192. #define EOP_C_MSR_IMM(cond,ror2,imm) \
  193. EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
  194. #define EOP_C_MSR_REG(cond,rm) \
  195. EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
  196. #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd)
  197. #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
  198. #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
  199. // XXX: AND, RSB, *C, will break if 1 insn is not enough
  200. static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
  201. {
  202. int ror2;
  203. u32 v;
  204. switch (op) {
  205. case A_OP_MOV:
  206. rn = 0;
  207. if (~imm < 0x10000) {
  208. imm = ~imm;
  209. op = A_OP_MVN;
  210. }
  211. break;
  212. case A_OP_EOR:
  213. case A_OP_SUB:
  214. case A_OP_ADD:
  215. case A_OP_ORR:
  216. case A_OP_BIC:
  217. if (s == 0 && imm == 0)
  218. return;
  219. break;
  220. }
  221. for (v = imm, ror2 = 0; ; ror2 -= 8/2) {
  222. /* shift down to get 'best' rot2 */
  223. for (; v && !(v & 3); v >>= 2)
  224. ror2--;
  225. EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
  226. v >>= 8;
  227. if (v == 0)
  228. break;
  229. if (op == A_OP_MOV)
  230. op = A_OP_ORR;
  231. if (op == A_OP_MVN)
  232. op = A_OP_BIC;
  233. rn = rd;
  234. }
  235. }
  236. #define emith_op_imm(cond, s, op, r, imm) \
  237. emith_op_imm2(cond, s, op, r, r, imm)
  238. // test op
  239. #define emith_top_imm(cond, op, r, imm) do { \
  240. u32 ror2, v; \
  241. for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
  242. ror2--; \
  243. EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
  244. } while (0)
  245. #define is_offset_24(val) \
  246. ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
  247. static int emith_xbranch(int cond, void *target, int is_call)
  248. {
  249. int val = (u32 *)target - (u32 *)tcache_ptr - 2;
  250. int direct = is_offset_24(val);
  251. u32 *start_ptr = (u32 *)tcache_ptr;
  252. if (direct)
  253. {
  254. EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target
  255. }
  256. else
  257. {
  258. #ifdef __EPOC32__
  259. // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
  260. if (is_call)
  261. EOP_ADD_IMM(14,15,0,8); // add lr,pc,#8
  262. EOP_C_AM2_IMM(cond,1,0,1,15,15,0); // ldrcc pc,[pc]
  263. EOP_MOV_REG_SIMPLE(15,15); // mov pc, pc
  264. EMIT((u32)target);
  265. #else
  266. // should never happen
  267. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
  268. exit(1);
  269. #endif
  270. }
  271. return (u32 *)tcache_ptr - start_ptr;
  272. }
  273. #define JMP_POS(ptr) \
  274. ptr = tcache_ptr; \
  275. tcache_ptr += sizeof(u32)
  276. #define JMP_EMIT(cond, ptr) { \
  277. u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
  278. EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
  279. }
  280. #define EMITH_JMP_START(cond) { \
  281. void *cond_ptr; \
  282. JMP_POS(cond_ptr)
  283. #define EMITH_JMP_END(cond) \
  284. JMP_EMIT(cond, cond_ptr); \
  285. }
  286. // fake "simple" or "short" jump - using cond insns instead
  287. #define EMITH_NOTHING1(cond) \
  288. (void)(cond)
  289. #define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond)
  290. #define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond)
  291. #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
  292. #define EMITH_SJMP3_MID(cond) EMITH_NOTHING1(cond)
  293. #define EMITH_SJMP3_END()
  294. #define emith_move_r_r(d, s) \
  295. EOP_MOV_REG_SIMPLE(d, s)
  296. #define emith_mvn_r_r(d, s) \
  297. EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
  298. #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
  299. EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  300. #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
  301. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  302. #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
  303. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
  304. #define emith_or_r_r_lsl(d, s, lslimm) \
  305. emith_or_r_r_r_lsl(d, d, s, lslimm)
  306. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  307. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  308. #define emith_or_r_r_r(d, s1, s2) \
  309. emith_or_r_r_r_lsl(d, s1, s2, 0)
  310. #define emith_eor_r_r_r(d, s1, s2) \
  311. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  312. #define emith_add_r_r(d, s) \
  313. EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  314. #define emith_sub_r_r(d, s) \
  315. EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  316. #define emith_adc_r_r(d, s) \
  317. EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  318. #define emith_and_r_r(d, s) \
  319. EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  320. #define emith_or_r_r(d, s) \
  321. emith_or_r_r_r(d, d, s)
  322. #define emith_eor_r_r(d, s) \
  323. emith_eor_r_r_r(d, d, s)
  324. #define emith_tst_r_r(d, s) \
  325. EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  326. #define emith_teq_r_r(d, s) \
  327. EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  328. #define emith_cmp_r_r(d, s) \
  329. EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  330. #define emith_addf_r_r(d, s) \
  331. EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  332. #define emith_subf_r_r(d, s) \
  333. EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  334. #define emith_adcf_r_r(d, s) \
  335. EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  336. #define emith_sbcf_r_r(d, s) \
  337. EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  338. #define emith_eorf_r_r(d, s) \
  339. EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  340. #define emith_move_r_imm(r, imm) \
  341. emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
  342. #define emith_add_r_imm(r, imm) \
  343. emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
  344. #define emith_adc_r_imm(r, imm) \
  345. emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
  346. #define emith_sub_r_imm(r, imm) \
  347. emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
  348. #define emith_bic_r_imm(r, imm) \
  349. emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
  350. #define emith_and_r_imm(r, imm) \
  351. emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
  352. #define emith_or_r_imm(r, imm) \
  353. emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
  354. #define emith_eor_r_imm(r, imm) \
  355. emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
  356. // note: only use 8bit imm for these
  357. #define emith_tst_r_imm(r, imm) \
  358. emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
  359. #define emith_cmp_r_imm(r, imm) { \
  360. u32 op = A_OP_CMP, imm_ = imm; \
  361. if (~imm_ < 0x100) { \
  362. imm_ = ~imm_; \
  363. op = A_OP_CMN; \
  364. } \
  365. emith_top_imm(A_COND_AL, op, r, imm); \
  366. }
  367. #define emith_subf_r_imm(r, imm) \
  368. emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
  369. #define emith_move_r_imm_c(cond, r, imm) \
  370. emith_op_imm(cond, 0, A_OP_MOV, r, imm)
  371. #define emith_add_r_imm_c(cond, r, imm) \
  372. emith_op_imm(cond, 0, A_OP_ADD, r, imm)
  373. #define emith_sub_r_imm_c(cond, r, imm) \
  374. emith_op_imm(cond, 0, A_OP_SUB, r, imm)
  375. #define emith_or_r_imm_c(cond, r, imm) \
  376. emith_op_imm(cond, 0, A_OP_ORR, r, imm)
  377. #define emith_eor_r_imm_c(cond, r, imm) \
  378. emith_op_imm(cond, 0, A_OP_EOR, r, imm)
  379. #define emith_bic_r_imm_c(cond, r, imm) \
  380. emith_op_imm(cond, 0, A_OP_BIC, r, imm)
  381. #define emith_move_r_imm_s8(r, imm) { \
  382. if ((imm) & 0x80) \
  383. EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
  384. else \
  385. EOP_MOV_IMM(r, 0, imm); \
  386. }
  387. #define emith_and_r_r_imm(d, s, imm) \
  388. emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
  389. #define emith_add_r_r_imm(d, s, imm) \
  390. emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
  391. #define emith_sub_r_r_imm(d, s, imm) \
  392. emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
  393. #define emith_neg_r_r(d, s) \
  394. EOP_RSB_IMM(d, s, 0, 0)
  395. #define emith_lsl(d, s, cnt) \
  396. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
  397. #define emith_lsr(d, s, cnt) \
  398. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
  399. #define emith_asr(d, s, cnt) \
  400. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
  401. #define emith_ror_c(cond, d, s, cnt) \
  402. EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
  403. #define emith_ror(d, s, cnt) \
  404. emith_ror_c(A_COND_AL, d, s, cnt)
  405. #define emith_rol(d, s, cnt) \
  406. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
  407. #define emith_lslf(d, s, cnt) \
  408. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
  409. #define emith_lsrf(d, s, cnt) \
  410. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
  411. #define emith_asrf(d, s, cnt) \
  412. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
  413. // note: only C flag updated correctly
  414. #define emith_rolf(d, s, cnt) { \
  415. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
  416. /* we don't have ROL so we shift to get the right carry */ \
  417. EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
  418. }
  419. #define emith_rorf(d, s, cnt) \
  420. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
  421. #define emith_rolcf(d) \
  422. emith_adcf_r_r(d, d)
  423. #define emith_rorcf(d) \
  424. EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
  425. #define emith_negcf_r_r(d, s) \
  426. EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
  427. #define emith_mul(d, s1, s2) { \
  428. if ((d) != (s1)) /* rd != rm limitation */ \
  429. EOP_MUL(d, s1, s2); \
  430. else \
  431. EOP_MUL(d, s2, s1); \
  432. }
  433. #define emith_mul_u64(dlo, dhi, s1, s2) \
  434. EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  435. #define emith_mul_s64(dlo, dhi, s1, s2) \
  436. EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  437. #define emith_mula_s64(dlo, dhi, s1, s2) \
  438. EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
  439. // misc
  440. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  441. EOP_LDR_IMM2(cond, r, rs, offs)
  442. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  443. EOP_LDRB_IMM2(cond, r, rs, offs)
  444. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  445. EOP_LDRH_IMM2(cond, r, rs, offs)
  446. #define emith_read_r_r_offs(r, rs, offs) \
  447. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  448. #define emith_read8_r_r_offs(r, rs, offs) \
  449. emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
  450. #define emith_read16_r_r_offs(r, rs, offs) \
  451. emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
  452. #define emith_ctx_read(r, offs) \
  453. emith_read_r_r_offs(r, CONTEXT_REG, offs)
  454. #define emith_ctx_write(r, offs) \
  455. EOP_STR_IMM(r, CONTEXT_REG, offs)
  456. #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
  457. int v_, r_ = r, c_ = count, b_ = CONTEXT_REG; \
  458. for (v_ = 0; c_; c_--, r_++) \
  459. v_ |= 1 << r_; \
  460. if ((offs) != 0) { \
  461. EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
  462. b_ = tmpr; \
  463. } \
  464. op(b_,v_); \
  465. } while(0)
  466. #define emith_ctx_read_multiple(r, offs, count, tmpr) \
  467. emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
  468. #define emith_ctx_write_multiple(r, offs, count, tmpr) \
  469. emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
  470. #define emith_clear_msb_c(cond, d, s, count) { \
  471. u32 t; \
  472. if ((count) <= 8) { \
  473. t = (count) - 8; \
  474. t = (0xff << t) & 0xff; \
  475. EOP_BIC_IMM(d,s,8/2,t); \
  476. EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
  477. } else if ((count) >= 24) { \
  478. t = (count) - 24; \
  479. t = 0xff >> t; \
  480. EOP_AND_IMM(d,s,0,t); \
  481. EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
  482. } else { \
  483. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
  484. EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
  485. } \
  486. }
  487. #define emith_clear_msb(d, s, count) \
  488. emith_clear_msb_c(A_COND_AL, d, s, count)
  489. #define emith_sext(d, s, bits) { \
  490. EOP_MOV_REG_LSL(d,s,32 - (bits)); \
  491. EOP_MOV_REG_ASR(d,d,32 - (bits)); \
  492. }
  493. // upto 4 args
  494. #define emith_pass_arg_r(arg, reg) \
  495. EOP_MOV_REG_SIMPLE(arg, reg)
  496. #define emith_pass_arg_imm(arg, imm) \
  497. emith_move_r_imm(arg, imm)
  498. #define emith_jump(target) \
  499. emith_jump_cond(A_COND_AL, target)
  500. #define emith_jump_patchable(target) \
  501. emith_jump(target)
  502. #define emith_jump_cond(cond, target) \
  503. emith_xbranch(cond, target, 0)
  504. #define emith_jump_cond_patchable(cond, target) \
  505. emith_jump_cond(cond, target)
  506. #define emith_jump_patch(ptr, target) do { \
  507. u32 *ptr_ = ptr; \
  508. u32 val_ = (u32 *)(target) - ptr_ - 2; \
  509. *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
  510. } while (0)
  511. #define emith_jump_at(ptr, target) { \
  512. u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
  513. EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
  514. }
  515. #define emith_jump_reg_c(cond, r) \
  516. EOP_C_BX(cond, r)
  517. #define emith_jump_reg(r) \
  518. emith_jump_reg_c(A_COND_AL, r)
  519. #define emith_jump_ctx_c(cond, offs) \
  520. EOP_LDR_IMM2(cond,15,CONTEXT_REG,offs)
  521. #define emith_jump_ctx(offs) \
  522. emith_jump_ctx_c(A_COND_AL, offs)
  523. #define emith_call_cond(cond, target) \
  524. emith_xbranch(cond, target, 1)
  525. #define emith_call(target) \
  526. emith_call_cond(A_COND_AL, target)
  527. #define emith_call_ctx(offs) { \
  528. emith_move_r_r(14, 15); \
  529. emith_jump_ctx(offs); \
  530. }
  531. #define emith_ret_c(cond) \
  532. emith_jump_reg_c(cond, 14)
  533. #define emith_ret() \
  534. emith_ret_c(A_COND_AL)
  535. #define emith_ret_to_ctx(offs) \
  536. emith_ctx_write(14, offs)
  537. #define emith_push_ret() \
  538. EOP_STMFD_SP(A_R14M)
  539. #define emith_pop_and_ret() \
  540. EOP_LDMFD_SP(A_R15M)
  541. #define host_instructions_updated(base, end) \
  542. cache_flush_d_inval_i(base, end)
  543. #define host_arg2reg(rd, arg) \
  544. rd = arg
  545. /* SH2 drc specific */
  546. #define emith_sh2_drc_entry() \
  547. EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R14M)
  548. #define emith_sh2_drc_exit() \
  549. EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R15M)
  550. #define emith_sh2_wcall(a, tab, ret_ptr) { \
  551. int val_ = (char *)(ret_ptr) - (char *)tcache_ptr - 2*4; \
  552. if (val_ >= 0) \
  553. emith_add_r_r_imm(14, 15, val_); \
  554. else if (val_ < 0) \
  555. emith_sub_r_r_imm(14, 15, -val_); \
  556. emith_lsr(12, a, SH2_WRITE_SHIFT); \
  557. EOP_LDR_REG_LSL(A_COND_AL,12,tab,12,2); \
  558. emith_ctx_read(2, offsetof(SH2, is_slave)); \
  559. emith_jump_reg(12); \
  560. }
  561. #define emith_sh2_dtbf_loop() { \
  562. int cr, rn; \
  563. int tmp_ = rcache_get_tmp(); \
  564. cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
  565. rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \
  566. emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \
  567. emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \
  568. emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
  569. cycles = 0; \
  570. emith_asrf(tmp_, cr, 2+12); /* movs tmp_, cr, asr #2+12 */\
  571. EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0); /* movmi tmp_, #0 */ \
  572. emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \
  573. emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \
  574. emith_subf_r_r(rn, tmp_); /* subs rn, tmp_ */ \
  575. EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0); /* rsbls tmp_, rn, #0 */ \
  576. EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
  577. EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \
  578. EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \
  579. rcache_free_tmp(tmp_); \
  580. }
  581. #define emith_write_sr(sr, srcr) { \
  582. emith_lsr(sr, sr, 10); \
  583. emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
  584. emith_ror(sr, sr, 22); \
  585. }
  586. #define emith_carry_to_t(srr, is_sub) { \
  587. if (is_sub) { /* has inverted C on ARM */ \
  588. emith_or_r_imm_c(A_COND_CC, srr, 1); \
  589. emith_bic_r_imm_c(A_COND_CS, srr, 1); \
  590. } else { \
  591. emith_or_r_imm_c(A_COND_CS, srr, 1); \
  592. emith_bic_r_imm_c(A_COND_CC, srr, 1); \
  593. } \
  594. }
  595. #define emith_tpop_carry(sr, is_sub) { \
  596. if (is_sub) \
  597. emith_eor_r_imm(sr, 1); \
  598. emith_lsrf(sr, sr, 1); \
  599. }
  600. #define emith_tpush_carry(sr, is_sub) { \
  601. emith_adc_r_r(sr, sr); \
  602. if (is_sub) \
  603. emith_eor_r_imm(sr, 1); \
  604. }
  605. /*
  606. * if Q
  607. * t = carry(Rn += Rm)
  608. * else
  609. * t = carry(Rn -= Rm)
  610. * T ^= t
  611. */
  612. #define emith_sh2_div1_step(rn, rm, sr) { \
  613. void *jmp0, *jmp1; \
  614. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  615. JMP_POS(jmp0); /* beq do_sub */ \
  616. emith_addf_r_r(rn, rm); \
  617. emith_eor_r_imm_c(A_COND_CS, sr, T); \
  618. JMP_POS(jmp1); /* b done */ \
  619. JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */ \
  620. emith_subf_r_r(rn, rm); \
  621. emith_eor_r_imm_c(A_COND_CC, sr, T); \
  622. JMP_EMIT(A_COND_AL, jmp1); /* done: */ \
  623. }