emit_arm.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. /*
  2. * Basic macros to emit ARM instructions and some utils
  3. * Copyright (C) 2008,2009,2010 notaz
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. #define CONTEXT_REG 11
  9. #define RET_REG 0
  10. // XXX: tcache_ptr type for SVP and SH2 compilers differs..
  11. #define EMIT_PTR(ptr, x) \
  12. do { \
  13. *(u32 *)ptr = x; \
  14. ptr = (void *)((u8 *)ptr + sizeof(u32)); \
  15. COUNT_OP; \
  16. } while (0)
  17. #define EMIT(x) EMIT_PTR(tcache_ptr, x)
  18. #define A_R4M (1 << 4)
  19. #define A_R5M (1 << 5)
  20. #define A_R6M (1 << 6)
  21. #define A_R7M (1 << 7)
  22. #define A_R8M (1 << 8)
  23. #define A_R9M (1 << 9)
  24. #define A_R10M (1 << 10)
  25. #define A_R11M (1 << 11)
  26. #define A_R12M (1 << 12)
  27. #define A_R14M (1 << 14)
  28. #define A_R15M (1 << 15)
  29. #define A_COND_AL 0xe
  30. #define A_COND_EQ 0x0
  31. #define A_COND_NE 0x1
  32. #define A_COND_HS 0x2
  33. #define A_COND_LO 0x3
  34. #define A_COND_MI 0x4
  35. #define A_COND_PL 0x5
  36. #define A_COND_VS 0x6
  37. #define A_COND_VC 0x7
  38. #define A_COND_HI 0x8
  39. #define A_COND_LS 0x9
  40. #define A_COND_GE 0xa
  41. #define A_COND_LT 0xb
  42. #define A_COND_GT 0xc
  43. #define A_COND_LE 0xd
  44. #define A_COND_CS A_COND_HS
  45. #define A_COND_CC A_COND_LO
  46. /* unified conditions */
  47. #define DCOND_EQ A_COND_EQ
  48. #define DCOND_NE A_COND_NE
  49. #define DCOND_MI A_COND_MI
  50. #define DCOND_PL A_COND_PL
  51. #define DCOND_HI A_COND_HI
  52. #define DCOND_HS A_COND_HS
  53. #define DCOND_LO A_COND_LO
  54. #define DCOND_GE A_COND_GE
  55. #define DCOND_GT A_COND_GT
  56. #define DCOND_LT A_COND_LT
  57. #define DCOND_LS A_COND_LS
  58. #define DCOND_LE A_COND_LE
  59. #define DCOND_VS A_COND_VS
  60. #define DCOND_VC A_COND_VC
  61. /* addressing mode 1 */
  62. #define A_AM1_LSL 0
  63. #define A_AM1_LSR 1
  64. #define A_AM1_ASR 2
  65. #define A_AM1_ROR 3
  66. #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000)
  67. #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
  68. #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
  69. /* data processing op */
  70. #define A_OP_AND 0x0
  71. #define A_OP_EOR 0x1
  72. #define A_OP_SUB 0x2
  73. #define A_OP_RSB 0x3
  74. #define A_OP_ADD 0x4
  75. #define A_OP_ADC 0x5
  76. #define A_OP_SBC 0x6
  77. #define A_OP_RSC 0x7
  78. #define A_OP_TST 0x8
  79. #define A_OP_TEQ 0x9
  80. #define A_OP_CMP 0xa
  81. #define A_OP_CMN 0xa
  82. #define A_OP_ORR 0xc
  83. #define A_OP_MOV 0xd
  84. #define A_OP_BIC 0xe
  85. #define A_OP_MVN 0xf
  86. #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
  87. EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
  88. #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
  89. #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
  90. #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm))
  91. #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
  92. #define EOP_MVN_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
  93. #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
  94. #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
  95. #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
  96. #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
  97. #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
  98. #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
  99. #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
  100. #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
  101. #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
  102. #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
  103. #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
  104. #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
  105. #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
  106. #define EOP_MVN_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
  107. #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
  108. #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
  109. #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
  110. #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
  111. #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
  112. #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
  113. #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
  114. #define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
  115. #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
  116. #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
  117. #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
  118. #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
  119. #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
  120. #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
  121. #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
  122. #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
  123. #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
  124. #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
  125. #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  126. #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  127. #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  128. #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
  129. #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
  130. #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
  131. #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
  132. #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
  133. #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm)
  134. #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs)
  135. #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs)
  136. #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  137. #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
  138. /* addressing mode 2 */
  139. #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
  140. EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
  141. #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
  142. EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  143. ((shift_imm)<<7) | ((shift_op)<<5) | (rm))
  144. /* addressing mode 3 */
  145. #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
  146. EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
  147. ((s)<<6) | ((h)<<5) | (immed_reg))
  148. #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
  149. #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
  150. /* ldr and str */
  151. #define EOP_LDR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,0,1,rn,rd,offset_12)
  152. #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,1,1,rn,rd,offset_12)
  153. #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
  154. #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
  155. #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
  156. #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
  157. #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
  158. #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
  159. #define EOP_LDRH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,1,1,rn,rd,0,1,offset_8)
  160. #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
  161. #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
  162. #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
  163. #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
  164. #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
  165. #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
  166. /* ldm and stm */
  167. #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
  168. EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
  169. #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
  170. #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
  171. #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
  172. #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
  173. /* branches */
  174. #define EOP_C_BX(cond,rm) \
  175. EMIT(((cond)<<28) | 0x012fff10 | (rm))
  176. #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
  177. EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
  178. #define EOP_C_B(cond,l,signed_immed_24) \
  179. EOP_C_B_PTR(tcache_ptr,cond,l,signed_immed_24)
  180. #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
  181. #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
  182. /* misc */
  183. #define EOP_C_MUL(cond,s,rd,rs,rm) \
  184. EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
  185. #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
  186. EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
  187. #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
  188. EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
  189. #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
  190. EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
  191. #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
  192. #define EOP_C_MRS(cond,rd) \
  193. EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
  194. #define EOP_C_MSR_IMM(cond,ror2,imm) \
  195. EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
  196. #define EOP_C_MSR_REG(cond,rm) \
  197. EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
  198. #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd)
  199. #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
  200. #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
  201. #define EOP_MOVW(rd,imm) \
  202. EMIT(0xe3000000 | ((rd)<<12) | ((imm)&0xfff) | (((imm)<<4)&0xf0000))
  203. #define EOP_MOVT(rd,imm) \
  204. EMIT(0xe3400000 | ((rd)<<12) | (((imm)>>16)&0xfff) | (((imm)>>12)&0xf0000))
  205. // XXX: AND, RSB, *C, will break if 1 insn is not enough
  206. static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
  207. {
  208. int ror2;
  209. u32 v;
  210. switch (op) {
  211. case A_OP_MOV:
  212. rn = 0;
  213. if (~imm < 0x10000) {
  214. imm = ~imm;
  215. op = A_OP_MVN;
  216. }
  217. #ifdef HAVE_ARMV7
  218. for (v = imm, ror2 = 0; v && !(v & 3); v >>= 2)
  219. ror2--;
  220. if (v >> 8) {
  221. /* 2+ insns needed - prefer movw/movt */
  222. if (op == A_OP_MVN)
  223. imm = ~imm;
  224. EOP_MOVW(rd, imm);
  225. if (imm & 0xffff0000)
  226. EOP_MOVT(rd, imm);
  227. return;
  228. }
  229. #endif
  230. break;
  231. case A_OP_EOR:
  232. case A_OP_SUB:
  233. case A_OP_ADD:
  234. case A_OP_ORR:
  235. case A_OP_BIC:
  236. if (s == 0 && imm == 0)
  237. return;
  238. break;
  239. }
  240. for (v = imm, ror2 = 0; ; ror2 -= 8/2) {
  241. /* shift down to get 'best' rot2 */
  242. for (; v && !(v & 3); v >>= 2)
  243. ror2--;
  244. EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
  245. v >>= 8;
  246. if (v == 0)
  247. break;
  248. if (op == A_OP_MOV)
  249. op = A_OP_ORR;
  250. if (op == A_OP_MVN)
  251. op = A_OP_BIC;
  252. rn = rd;
  253. }
  254. }
  255. #define emith_op_imm(cond, s, op, r, imm) \
  256. emith_op_imm2(cond, s, op, r, r, imm)
  257. // test op
  258. #define emith_top_imm(cond, op, r, imm) do { \
  259. u32 ror2, v; \
  260. for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
  261. ror2--; \
  262. EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
  263. } while (0)
  264. #define is_offset_24(val) \
  265. ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
  266. static int emith_xbranch(int cond, void *target, int is_call)
  267. {
  268. int val = (u32 *)target - (u32 *)tcache_ptr - 2;
  269. int direct = is_offset_24(val);
  270. u32 *start_ptr = (u32 *)tcache_ptr;
  271. if (direct)
  272. {
  273. EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target
  274. }
  275. else
  276. {
  277. #ifdef __EPOC32__
  278. // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
  279. if (is_call)
  280. EOP_ADD_IMM(14,15,0,8); // add lr,pc,#8
  281. EOP_C_AM2_IMM(cond,1,0,1,15,15,0); // ldrcc pc,[pc]
  282. EOP_MOV_REG_SIMPLE(15,15); // mov pc, pc
  283. EMIT((u32)target);
  284. #else
  285. // should never happen
  286. elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
  287. exit(1);
  288. #endif
  289. }
  290. return (u32 *)tcache_ptr - start_ptr;
  291. }
  292. #define JMP_POS(ptr) \
  293. ptr = tcache_ptr; \
  294. tcache_ptr += sizeof(u32)
  295. #define JMP_EMIT(cond, ptr) { \
  296. u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
  297. EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
  298. }
  299. #define EMITH_JMP_START(cond) { \
  300. void *cond_ptr; \
  301. JMP_POS(cond_ptr)
  302. #define EMITH_JMP_END(cond) \
  303. JMP_EMIT(cond, cond_ptr); \
  304. }
  305. // fake "simple" or "short" jump - using cond insns instead
  306. #define EMITH_NOTHING1(cond) \
  307. (void)(cond)
  308. #define EMITH_SJMP_DECL_()
  309. #define EMITH_SJMP_START_(cond) EMITH_NOTHING1(cond)
  310. #define EMITH_SJMP_END_(cond) EMITH_NOTHING1(cond)
  311. #define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond)
  312. #define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond)
  313. #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
  314. #define EMITH_SJMP3_MID(cond) EMITH_NOTHING1(cond)
  315. #define EMITH_SJMP3_END()
  316. #define emith_move_r_r(d, s) \
  317. EOP_MOV_REG_SIMPLE(d, s)
  318. #define emith_move_r_r_ptr(d, s) \
  319. emith_move_r_r(d, s)
  320. #define emith_mvn_r_r(d, s) \
  321. EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
  322. #define emith_add_r_r_r_lsl(d, s1, s2, lslimm) \
  323. EOP_ADD_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  324. #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
  325. EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  326. #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
  327. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
  328. #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
  329. EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
  330. #define emith_or_r_r_lsl(d, s, lslimm) \
  331. emith_or_r_r_r_lsl(d, d, s, lslimm)
  332. #define emith_eor_r_r_lsr(d, s, lsrimm) \
  333. emith_eor_r_r_r_lsr(d, d, s, lsrimm)
  334. #define emith_add_r_r_r(d, s1, s2) \
  335. emith_add_r_r_r_lsl(d, s1, s2, 0)
  336. #define emith_or_r_r_r(d, s1, s2) \
  337. emith_or_r_r_r_lsl(d, s1, s2, 0)
  338. #define emith_eor_r_r_r(d, s1, s2) \
  339. emith_eor_r_r_r_lsl(d, s1, s2, 0)
  340. #define emith_add_r_r(d, s) \
  341. emith_add_r_r_r(d, d, s)
  342. #define emith_sub_r_r(d, s) \
  343. EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  344. #define emith_adc_r_r(d, s) \
  345. EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  346. #define emith_and_r_r(d, s) \
  347. EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
  348. #define emith_or_r_r(d, s) \
  349. emith_or_r_r_r(d, d, s)
  350. #define emith_eor_r_r(d, s) \
  351. emith_eor_r_r_r(d, d, s)
  352. #define emith_tst_r_r(d, s) \
  353. EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  354. #define emith_teq_r_r(d, s) \
  355. EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  356. #define emith_cmp_r_r(d, s) \
  357. EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
  358. #define emith_addf_r_r(d, s) \
  359. EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  360. #define emith_subf_r_r(d, s) \
  361. EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  362. #define emith_adcf_r_r(d, s) \
  363. EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  364. #define emith_sbcf_r_r(d, s) \
  365. EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  366. #define emith_eorf_r_r(d, s) \
  367. EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
  368. #define emith_move_r_imm(r, imm) \
  369. emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
  370. #define emith_add_r_imm(r, imm) \
  371. emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
  372. #define emith_adc_r_imm(r, imm) \
  373. emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
  374. #define emith_sub_r_imm(r, imm) \
  375. emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
  376. #define emith_bic_r_imm(r, imm) \
  377. emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
  378. #define emith_and_r_imm(r, imm) \
  379. emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
  380. #define emith_or_r_imm(r, imm) \
  381. emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
  382. #define emith_eor_r_imm(r, imm) \
  383. emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
  384. // note: only use 8bit imm for these
  385. #define emith_tst_r_imm(r, imm) \
  386. emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
  387. #define emith_cmp_r_imm(r, imm) { \
  388. u32 op = A_OP_CMP, imm_ = imm; \
  389. if (~imm_ < 0x100) { \
  390. imm_ = ~imm_; \
  391. op = A_OP_CMN; \
  392. } \
  393. emith_top_imm(A_COND_AL, op, r, imm); \
  394. }
  395. #define emith_subf_r_imm(r, imm) \
  396. emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
  397. #define emith_move_r_imm_c(cond, r, imm) \
  398. emith_op_imm(cond, 0, A_OP_MOV, r, imm)
  399. #define emith_add_r_imm_c(cond, r, imm) \
  400. emith_op_imm(cond, 0, A_OP_ADD, r, imm)
  401. #define emith_sub_r_imm_c(cond, r, imm) \
  402. emith_op_imm(cond, 0, A_OP_SUB, r, imm)
  403. #define emith_or_r_imm_c(cond, r, imm) \
  404. emith_op_imm(cond, 0, A_OP_ORR, r, imm)
  405. #define emith_eor_r_imm_c(cond, r, imm) \
  406. emith_op_imm(cond, 0, A_OP_EOR, r, imm)
  407. #define emith_bic_r_imm_c(cond, r, imm) \
  408. emith_op_imm(cond, 0, A_OP_BIC, r, imm)
  409. #define emith_move_r_imm_s8(r, imm) { \
  410. if ((imm) & 0x80) \
  411. EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
  412. else \
  413. EOP_MOV_IMM(r, 0, imm); \
  414. }
  415. #define emith_and_r_r_imm(d, s, imm) \
  416. emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
  417. #define emith_add_r_r_imm(d, s, imm) \
  418. emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
  419. #define emith_add_r_r_ptr_imm(d, s, imm) \
  420. emith_add_r_r_imm(d, s, imm)
  421. #define emith_sub_r_r_imm(d, s, imm) \
  422. emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
  423. #define emith_neg_r_r(d, s) \
  424. EOP_RSB_IMM(d, s, 0, 0)
  425. #define emith_lsl(d, s, cnt) \
  426. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
  427. #define emith_lsr(d, s, cnt) \
  428. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
  429. #define emith_asr(d, s, cnt) \
  430. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
  431. #define emith_ror_c(cond, d, s, cnt) \
  432. EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
  433. #define emith_ror(d, s, cnt) \
  434. emith_ror_c(A_COND_AL, d, s, cnt)
  435. #define emith_rol(d, s, cnt) \
  436. EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
  437. #define emith_lslf(d, s, cnt) \
  438. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
  439. #define emith_lsrf(d, s, cnt) \
  440. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
  441. #define emith_asrf(d, s, cnt) \
  442. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
  443. // note: only C flag updated correctly
  444. #define emith_rolf(d, s, cnt) { \
  445. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
  446. /* we don't have ROL so we shift to get the right carry */ \
  447. EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
  448. }
  449. #define emith_rorf(d, s, cnt) \
  450. EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
  451. #define emith_rolcf(d) \
  452. emith_adcf_r_r(d, d)
  453. #define emith_rorcf(d) \
  454. EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
  455. #define emith_negcf_r_r(d, s) \
  456. EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
  457. #define emith_mul(d, s1, s2) { \
  458. if ((d) != (s1)) /* rd != rm limitation */ \
  459. EOP_MUL(d, s1, s2); \
  460. else \
  461. EOP_MUL(d, s2, s1); \
  462. }
  463. #define emith_mul_u64(dlo, dhi, s1, s2) \
  464. EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  465. #define emith_mul_s64(dlo, dhi, s1, s2) \
  466. EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
  467. #define emith_mula_s64(dlo, dhi, s1, s2) \
  468. EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
  469. // misc
  470. #define emith_read_r_r_offs_c(cond, r, rs, offs) \
  471. EOP_LDR_IMM2(cond, r, rs, offs)
  472. #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
  473. EOP_LDRB_IMM2(cond, r, rs, offs)
  474. #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
  475. EOP_LDRH_IMM2(cond, r, rs, offs)
  476. #define emith_read_r_r_offs(r, rs, offs) \
  477. emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
  478. #define emith_read8_r_r_offs(r, rs, offs) \
  479. emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
  480. #define emith_read16_r_r_offs(r, rs, offs) \
  481. emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
  482. #define emith_ctx_read(r, offs) \
  483. emith_read_r_r_offs(r, CONTEXT_REG, offs)
  484. #define emith_ctx_read_ptr(r, offs) \
  485. emith_ctx_read(r, offs)
  486. #define emith_ctx_write(r, offs) \
  487. EOP_STR_IMM(r, CONTEXT_REG, offs)
  488. #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
  489. int v_, r_ = r, c_ = count, b_ = CONTEXT_REG; \
  490. for (v_ = 0; c_; c_--, r_++) \
  491. v_ |= 1 << r_; \
  492. if ((offs) != 0) { \
  493. EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
  494. b_ = tmpr; \
  495. } \
  496. op(b_,v_); \
  497. } while(0)
  498. #define emith_ctx_read_multiple(r, offs, count, tmpr) \
  499. emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
  500. #define emith_ctx_write_multiple(r, offs, count, tmpr) \
  501. emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
  502. #define emith_clear_msb_c(cond, d, s, count) { \
  503. u32 t; \
  504. if ((count) <= 8) { \
  505. t = (count) - 8; \
  506. t = (0xff << t) & 0xff; \
  507. EOP_BIC_IMM(d,s,8/2,t); \
  508. EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
  509. } else if ((count) >= 24) { \
  510. t = (count) - 24; \
  511. t = 0xff >> t; \
  512. EOP_AND_IMM(d,s,0,t); \
  513. EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
  514. } else { \
  515. EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
  516. EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
  517. } \
  518. }
  519. #define emith_clear_msb(d, s, count) \
  520. emith_clear_msb_c(A_COND_AL, d, s, count)
  521. #define emith_sext(d, s, bits) { \
  522. EOP_MOV_REG_LSL(d,s,32 - (bits)); \
  523. EOP_MOV_REG_ASR(d,d,32 - (bits)); \
  524. }
  525. #define emith_do_caller_regs(mask, func) { \
  526. u32 _reg_mask = (mask) & 0x500f; \
  527. if (_reg_mask) { \
  528. if (__builtin_parity(_reg_mask) == 1) \
  529. _reg_mask |= 0x10; /* eabi align */ \
  530. func(_reg_mask); \
  531. } \
  532. }
  533. #define emith_save_caller_regs(mask) \
  534. emith_do_caller_regs(mask, EOP_STMFD_SP)
  535. #define emith_restore_caller_regs(mask) \
  536. emith_do_caller_regs(mask, EOP_LDMFD_SP)
  537. // upto 4 args
  538. #define emith_pass_arg_r(arg, reg) \
  539. EOP_MOV_REG_SIMPLE(arg, reg)
  540. #define emith_pass_arg_imm(arg, imm) \
  541. emith_move_r_imm(arg, imm)
  542. #define emith_jump(target) \
  543. emith_jump_cond(A_COND_AL, target)
  544. #define emith_jump_patchable(target) \
  545. emith_jump(target)
  546. #define emith_jump_cond(cond, target) \
  547. emith_xbranch(cond, target, 0)
  548. #define emith_jump_cond_patchable(cond, target) \
  549. emith_jump_cond(cond, target)
  550. #define emith_jump_patch(ptr, target) do { \
  551. u32 *ptr_ = ptr; \
  552. u32 val_ = (u32 *)(target) - ptr_ - 2; \
  553. *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
  554. } while (0)
  555. #define emith_jump_at(ptr, target) { \
  556. u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
  557. EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
  558. }
  559. #define emith_jump_reg_c(cond, r) \
  560. EOP_C_BX(cond, r)
  561. #define emith_jump_reg(r) \
  562. emith_jump_reg_c(A_COND_AL, r)
  563. #define emith_jump_ctx_c(cond, offs) \
  564. EOP_LDR_IMM2(cond,15,CONTEXT_REG,offs)
  565. #define emith_jump_ctx(offs) \
  566. emith_jump_ctx_c(A_COND_AL, offs)
  567. #define emith_call_cond(cond, target) \
  568. emith_xbranch(cond, target, 1)
  569. #define emith_call(target) \
  570. emith_call_cond(A_COND_AL, target)
  571. #define emith_call_ctx(offs) { \
  572. emith_move_r_r(14, 15); \
  573. emith_jump_ctx(offs); \
  574. }
  575. #define emith_ret_c(cond) \
  576. emith_jump_reg_c(cond, 14)
  577. #define emith_ret() \
  578. emith_ret_c(A_COND_AL)
  579. #define emith_ret_to_ctx(offs) \
  580. emith_ctx_write(14, offs)
  581. #define emith_push_ret() \
  582. EOP_STMFD_SP(A_R14M)
  583. #define emith_pop_and_ret() \
  584. EOP_LDMFD_SP(A_R15M)
  585. #define host_instructions_updated(base, end) \
  586. cache_flush_d_inval_i(base, end)
  587. #define host_arg2reg(rd, arg) \
  588. rd = arg
  589. /* SH2 drc specific */
  590. /* pushes r12 for eabi alignment */
  591. #define emith_sh2_drc_entry() \
  592. EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R14M)
  593. #define emith_sh2_drc_exit() \
  594. EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R15M)
  595. #define emith_sh2_wcall(a, tab) { \
  596. emith_lsr(12, a, SH2_WRITE_SHIFT); \
  597. EOP_LDR_REG_LSL(A_COND_AL,12,tab,12,2); \
  598. emith_move_r_r(2, CONTEXT_REG); \
  599. emith_jump_reg(12); \
  600. }
  601. #define emith_sh2_dtbf_loop() { \
  602. int cr, rn; \
  603. int tmp_ = rcache_get_tmp(); \
  604. cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
  605. rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \
  606. emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \
  607. emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \
  608. emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
  609. cycles = 0; \
  610. emith_asrf(tmp_, cr, 2+12); /* movs tmp_, cr, asr #2+12 */\
  611. EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0); /* movmi tmp_, #0 */ \
  612. emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \
  613. emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \
  614. emith_subf_r_r(rn, tmp_); /* subs rn, tmp_ */ \
  615. EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0); /* rsbls tmp_, rn, #0 */ \
  616. EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
  617. EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \
  618. EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \
  619. rcache_free_tmp(tmp_); \
  620. }
  621. #define emith_write_sr(sr, srcr) { \
  622. emith_lsr(sr, sr, 10); \
  623. emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
  624. emith_ror(sr, sr, 22); \
  625. }
  626. #define emith_carry_to_t(srr, is_sub) { \
  627. if (is_sub) { /* has inverted C on ARM */ \
  628. emith_or_r_imm_c(A_COND_CC, srr, 1); \
  629. emith_bic_r_imm_c(A_COND_CS, srr, 1); \
  630. } else { \
  631. emith_or_r_imm_c(A_COND_CS, srr, 1); \
  632. emith_bic_r_imm_c(A_COND_CC, srr, 1); \
  633. } \
  634. }
  635. #define emith_tpop_carry(sr, is_sub) { \
  636. if (is_sub) \
  637. emith_eor_r_imm(sr, 1); \
  638. emith_lsrf(sr, sr, 1); \
  639. }
  640. #define emith_tpush_carry(sr, is_sub) { \
  641. emith_adc_r_r(sr, sr); \
  642. if (is_sub) \
  643. emith_eor_r_imm(sr, 1); \
  644. }
  645. /*
  646. * if Q
  647. * t = carry(Rn += Rm)
  648. * else
  649. * t = carry(Rn -= Rm)
  650. * T ^= t
  651. */
  652. #define emith_sh2_div1_step(rn, rm, sr) { \
  653. void *jmp0, *jmp1; \
  654. emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
  655. JMP_POS(jmp0); /* beq do_sub */ \
  656. emith_addf_r_r(rn, rm); \
  657. emith_eor_r_imm_c(A_COND_CS, sr, T); \
  658. JMP_POS(jmp1); /* b done */ \
  659. JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */ \
  660. emith_subf_r_r(rn, rm); \
  661. emith_eor_r_imm_c(A_COND_CC, sr, T); \
  662. JMP_EMIT(A_COND_AL, jmp1); /* done: */ \
  663. }