123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329 |
- /*
- * Basic macros to emit ARM A64 instructions and some utils
- * Copyright (C) 2019 kub
- *
- * This work is licensed under the terms of MAME license.
- * See COPYING file in the top-level directory.
- */
- #define HOST_REGS 32
- #define CONTEXT_REG 19
- #define RET_REG 0
- // R31 doesn't exist, it aliases either with zero or SP
- #define SP 31 // stack pointer
- #define Z0 31 // zero register
- #define LR 30 // link register
- #define FP 29 // frame pointer
- #define PR 18 // platform register
- // All operations but ptr ops are using the lower 32 bits of the A64 registers.
- // The upper 32 bits are only used in ptr ops.
- #define A64_COND_EQ 0x0
- #define A64_COND_NE 0x1
- #define A64_COND_HS 0x2
- #define A64_COND_LO 0x3
- #define A64_COND_MI 0x4
- #define A64_COND_PL 0x5
- #define A64_COND_VS 0x6
- #define A64_COND_VC 0x7
- #define A64_COND_HI 0x8
- #define A64_COND_LS 0x9
- #define A64_COND_GE 0xa
- #define A64_COND_LT 0xb
- #define A64_COND_GT 0xc
- #define A64_COND_LE 0xd
- #define A64_COND_CS A64_COND_HS
- #define A64_COND_CC A64_COND_LO
- #define A64_COND_AL 0xe
- #define A64_COND_NV 0xf
- /* unified conditions */
- #define DCOND_EQ A64_COND_EQ
- #define DCOND_NE A64_COND_NE
- #define DCOND_MI A64_COND_MI
- #define DCOND_PL A64_COND_PL
- #define DCOND_HI A64_COND_HI
- #define DCOND_HS A64_COND_HS
- #define DCOND_LO A64_COND_LO
- #define DCOND_GE A64_COND_GE
- #define DCOND_GT A64_COND_GT
- #define DCOND_LT A64_COND_LT
- #define DCOND_LS A64_COND_LS
- #define DCOND_LE A64_COND_LE
- #define DCOND_VS A64_COND_VS
- #define DCOND_VC A64_COND_VC
- #define DCOND_CS A64_COND_HS
- #define DCOND_CC A64_COND_LO
- // unified insn
- #define A64_INSN(op, b29, b22, b21, b16, b12, b10, b5, b0) \
- (((op)<<25)|((b29)<<29)|((b22)<<22)|((b21)<<21)|((b16)<<16)|((b12)<<12)|((b10)<<10)|((b5)<<5)|((b0)<<0))
- #define _ 0 // marker for "field unused"
- #define A64_NOP \
- A64_INSN(0xa,0x6,0x4,_,0x3,0x2,_,0,0x1f) // 0xd503201f
- // arithmetic/logical
- enum { OP_AND, OP_OR, OP_EOR, OP_ANDS, OP_ADD, OP_ADDS, OP_SUB, OP_SUBS };
- enum { ST_LSL, ST_LSR, ST_ASR, ST_ROR };
- enum { XT_UXTW=0x4, XT_UXTX=0x6, XT_LSL=0x7, XT_SXTW=0xc, XT_SXTX=0xe };
- #define OP_SZ64 (1 << 31) // bit for 64 bit op selection
- #define OP_N64 (1 << 22) // N-bit for 64 bit logical immediate ops
- #define A64_OP_REG(op, n, rd, rn, rm, stype, simm) /* arith+logical, ST_ */ \
- A64_INSN(0x5,(op)&3,((op)&4)|stype,n,rm,_,simm,rn,rd)
- #define A64_OP_XREG(op, rd, rn, rm, xtopt, simm) /* arith, XT_ */ \
- A64_INSN(0x5,(op)&3,0x4,1,rm,xtopt,simm,rn,rd)
- #define A64_OP_IMM12(op, rd, rn, imm, lsl12) /* arith */ \
- A64_INSN(0x8,(op)&3,((op)&4)|lsl12,_,_,_,(imm)&0xfff,rn,rd)
- #define A64_OP_IMMBM(op, rd, rn, immr, imms) /* logical */ \
- A64_INSN(0x9,(op)&3,0x0,_,immr,_,(imms)&0x3f,rn,rd)
- // rd = rn OP (rm SHIFT simm)
- #define A64_ADD_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_ADD,0,rd,rn,rm,stype,simm)
- #define A64_ADDS_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_ADDS,0,rd,rn,rm,stype,simm)
- #define A64_SUB_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_SUB,0,rd,rn,rm,stype,simm)
- #define A64_SUBS_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_SUBS,0,rd,rn,rm,stype,simm)
- #define A64_NEG_REG(rd, rm, stype, simm) \
- A64_SUB_REG(rd,Z0,rm,stype,simm)
- #define A64_NEGS_REG(rd, rm, stype, simm) \
- A64_SUBS_REG(rd,Z0,rm,stype,simm)
- #define A64_NEGC_REG(rd, rm) \
- A64_SBC_REG(rd,Z0,rm,stype,simm)
- #define A64_NEGCS_REG(rd, rm) \
- A64_SBCS_REG(rd,Z0,rm,stype,simm)
- #define A64_CMP_REG(rn, rm, stype, simm) \
- A64_SUBS_REG(Z0, rn, rm, stype, simm)
- #define A64_CMN_REG(rn, rm, stype, simm) \
- A64_ADDS_REG(Z0, rn, rm, stype, simm)
- #define A64_EOR_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_EOR,0,rd,rn,rm,stype,simm)
- #define A64_OR_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_OR,0,rd,rn,rm,stype,simm)
- #define A64_ORN_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_OR,1,rd,rn,rm,stype,simm)
- #define A64_AND_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_AND,0,rd,rn,rm,stype,simm)
- #define A64_ANDS_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_ANDS,0,rd,rn,rm,stype,simm)
- #define A64_BIC_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_AND,1,rd,rn,rm,stype,simm)
- #define A64_BICS_REG(rd, rn, rm, stype, simm) \
- A64_OP_REG(OP_ANDS,1,rd,rn,rm,stype,simm)
- #define A64_TST_REG(rn, rm, stype, simm) \
- A64_ANDS_REG(Z0, rn, rm, stype, simm)
- #define A64_MOV_REG(rd, rm, stype, simm) \
- A64_OR_REG(rd, Z0, rm, stype, simm);
- #define A64_MVN_REG(rd, rm, stype, simm) \
- A64_ORN_REG(rd, Z0, rm, stype, simm);
- // rd = rn OP (rm EXTEND simm)
- #define A64_ADD_XREG(rd, rn, rm, xtopt, simm) \
- A64_OP_XREG(OP_ADD,rd,rn,rm,xtopt,simm)
- #define A64_ADDS_XREG(rd, rn, rm, xtopt, simm) \
- A64_OP_XREG(OP_ADDS,rd,rn,rm,xtopt,simm)
- #define A64_SUB_XREG(rd, rn, rm, stype, simm) \
- A64_OP_XREG(OP_SUB,rd,rn,rm,xtopt,simm)
- #define A64_SUBS_XREG(rd, rn, rm, stype, simm) \
- A64_OP_XREG(OP_SUBS,rd,rn,rm,xtopt,simm)
- // rd = rn OP rm OP carry
- #define A64_ADC_REG(rd, rn, rm) \
- A64_INSN(0xd,OP_ADD &3,0x0,_,rm,_,_,rn,rd)
- #define A64_ADCS_REG(rd, rn, rm) \
- A64_INSN(0xd,OP_ADDS&3,0x0,_,rm,_,_,rn,rd)
- #define A64_SBC_REG(rd, rn, rm, s) \
- A64_INSN(0xd,OP_SUB &3,0x0,_,rm,_,_,rn,rd)
- #define A64_SBCS_REG(rd, rn, rm) \
- A64_INSN(0xd,OP_SUBS&3,0x0,_,rm,_,_,rn,rd)
- // rd = rn SHIFT rm
- #define A64_LSL_REG(rd, rn, rm) \
- A64_INSN(0xd,0x0,0x3,_,rm,_,0x8,rn,rd)
- #define A64_LSR_REG(rd, rn, rm) \
- A64_INSN(0xd,0x0,0x3,_,rm,_,0xa,rn,rd)
- #define A64_ASR_REG(rd, rn, rm) \
- A64_INSN(0xd,0x0,0x3,_,rm,_,0x9,rn,rd)
- #define A64_ROR_REG(rd, rn, rm) \
- A64_INSN(0xd,0x0,0x3,_,rm,_,0xb,rn,rd)
- // rd = REVERSE(n) rn
- #define A64_RBIT_REG(rd, rn) \
- A64_INSN(0xd,0x2,0x3,_,_,_,_,rn,rd)
- // rd = rn OP (imm12 << (0|12))
- #define A64_ADD_IMM(rd, rn, imm12, lsl12) \
- A64_OP_IMM12(OP_ADD, rd, rn, imm12, lsl12)
- #define A64_ADDS_IMM(rd, rn, imm12, lsl12) \
- A64_OP_IMM12(OP_ADDS, rd, rn, imm12, lsl12)
- #define A64_SUB_IMM(rd, rn, imm12, lsl12) \
- A64_OP_IMM12(OP_SUB, rd, rn, imm12, lsl12)
- #define A64_SUBS_IMM(rd, rn, imm12, lsl12) \
- A64_OP_IMM12(OP_SUBS, rd, rn, imm12, lsl12)
- #define A64_CMP_IMM(rn, imm12, lsl12) \
- A64_SUBS_IMM(Z0,rn,imm12,lsl12)
- #define A64_CMN_IMM(rn, imm12, lsl12) \
- A64_ADDS_IMM(Z0,rn,imm12,lsl12)
- // rd = rn OP immbm; immbm is a repeated special pattern of 2^n bits length
- #define A64_EOR_IMM(rd, rn, immr, imms) \
- A64_OP_IMMBM(OP_EOR,rd,rn,immr,imms)
- #define A64_OR_IMM(rd, rn, immr, imms) \
- A64_OP_IMMBM(OP_OR,rd,rn,immr,imms)
- #define A64_AND_IMM(rd, rn, immr, imms) \
- A64_OP_IMMBM(OP_AND,rd,rn,immr,imms)
- #define A64_ANDS_IMM(rd, rn, immr, imms) \
- A64_OP_IMMBM(OP_ANDS,rd,rn,immr,imms)
- #define A64_TST_IMM(rn, immr, imms) \
- A64_OP_IMMBM(OP_ANDS,Z0,rn,immr,imms)
- #define A64_MOV_IMM(rd, rn, immr, imms) \
- A64_OP_IMMBM(OP_OR,rd,Z0,immr,imms)
- // rd = (imm16 << (0|16|32|48))
- #define A64_MOVN_IMM(rd, imm16, lsl16) \
- A64_INSN(0x9,0x0,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
- #define A64_MOVZ_IMM(rd, imm16, lsl16) \
- A64_INSN(0x9,0x2,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
- #define A64_MOVK_IMM(rd, imm16, lsl16) \
- A64_INSN(0x9,0x3,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
- #define A64_MOVT_IMM(rd, imm16, lsl16) \
- A64_INSN(0x9,0x3,0x2,lsl16,_,_,_,(imm16)&0xffff,rd)
- // rd = rn SHIFT imm6
- #define A64_LSL_IMM(rd, rn, bits) /* UBFM */ \
- A64_INSN(0x9,0x2,0x4,_,32-(bits),_,31-(bits),rn,rd)
- #define A64_LSR_IMM(rd, rn, bits) /* UBFM */ \
- A64_INSN(0x9,0x2,0x4,_,bits,_,31,rn,rd)
- #define A64_ASR_IMM(rd, rn, bits) /* SBFM */ \
- A64_INSN(0x9,0x0,0x4,_,bits,_,31,rn,rd)
- #define A64_ROR_IMM(rd, rn, bits) /* EXTR */ \
- A64_INSN(0x9,0x0,0x6,_,rn,_,bits,rn,rd)
- #define A64_SXT_IMM(rd, rn, bits) \
- A64_INSN(0x9,0x0,0x4,0,0,_,bits-1,rn,rd)
- #define A64_UXT_IMM(rd, rn, bits) \
- A64_INSN(0x9,0x2,0x4,0,0,_,bits-1,rn,rd)
- // multiplication
- #define A64_SMULL(rd, rn, rm) /* Xd = Wn*Wm (+ Xa) */ \
- A64_INSN(0xd,0x4,0x4,1,rm,_,Z0,rn,rd)
- #define A64_SMADDL(rd, rn, rm, ra) \
- A64_INSN(0xd,0x4,0x4,1,rm,_,ra,rn,rd)
- #define A64_UMULL(rd, rn, rm) \
- A64_INSN(0xd,0x4,0x6,1,rm,_,Z0,rn,rd)
- #define A64_UMADDL(rd, rn, rm, ra) \
- A64_INSN(0xd,0x4,0x6,1,rm,_,ra,rn,rd)
- #define A64_MUL(rd, rn, rm) /* Wd = Wn*Wm (+ Wa) */ \
- A64_INSN(0xd,0x0,0x4,0,rm,_,Z0,rn,rd)
- #define A64_MADD(rd, rn, rm, ra) \
- A64_INSN(0xd,0x0,0x4,0,rm,_,ra,rn,rd)
- // branching
- #define A64_B(offs26) \
- A64_INSN(0xa,0x0,_,_,_,_,_,_,(offs26) >> 2)
- #define A64_BL(offs26) \
- A64_INSN(0xa,0x4,_,_,_,_,_,_,(offs26) >> 2)
- #define A64_BR(rn) \
- A64_INSN(0xb,0x6,_,_,0x1f,_,_,rn,_)
- #define A64_BLR(rn) \
- A64_INSN(0xb,0x6,_,_,0x3f,_,_,rn,_)
- #define A64_RET(rn) /* same as BR, but hint for cpu */ \
- A64_INSN(0xb,0x6,_,_,0x5f,_,_,rn,_)
- #define A64_BCOND(cond, offs19) \
- A64_INSN(0xa,0x2,_,_,_,_,_,(offs19) >> 2,(cond))
- // load pc-relative
- #define A64_LDRLIT_IMM(rd, offs19) \
- A64_INSN(0xc,0x0,0x0,_,_,_,_,(offs19) >> 2,rd)
- #define A64_LDRXLIT_IMM(rd, offs19) \
- A64_INSN(0xc,0x2,0x0,_,_,_,_,(offs19) >> 2,rd)
- #define A64_ADRXLIT_IMM(rd, offs21) \
- A64_INSN(0x8,(offs21)&3,0x0,_,_,_,_,(offs21) >> 2,rd)
- // load/store indexed base. Only the signed unscaled variant is used here.
- enum { LT_ST, LT_LD, LT_LDSX, LT_LDS };
- enum { AM_B=0x1, AM_H=0x3, AM_W=0x5, AM_X=0x7 };
- enum { AM_IDX, AM_IDXPOST, AM_IDXREG, AM_IDXPRE };
- #define A64_LDST_AM(ir,rm,optimm) (((ir)<<9)|((rm)<<4)|((optimm)&0x1ff))
- #define A64_OP_LDST(sz, op, am, mode, rm, rd) \
- A64_INSN(0xc,sz,op,_,_,am,mode,rm,rd)
- #define A64_LDSTX_IMM(rd, rn, offs9, ld, mode) \
- A64_OP_LDST(AM_X,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
- #define A64_LDST_IMM(rd, rn, offs9, ld, mode) \
- A64_OP_LDST(AM_W,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
- #define A64_LDSTH_IMM(rd, rn, offs9, ld, mode) \
- A64_OP_LDST(AM_H,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
- #define A64_LDSTB_IMM(rd, rn, offs9, ld, mode) \
- A64_OP_LDST(AM_B,ld,A64_LDST_AM(0,_,offs9),mode,rn,rd)
- // NB: pre/postindex isn't available with register offset
- #define A64_LDSTX_REG(rd, rn, rm, ld, opt) \
- A64_OP_LDST(AM_X,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
- #define A64_LDST_REG(rd, rn, rm, ld, opt) \
- A64_OP_LDST(AM_W,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
- #define A64_LDSTH_REG(rd, rn, rm, ld, opt) \
- A64_OP_LDST(AM_H,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
- #define A64_LDSTB_REG(rd, rn, rm, ld, opt) \
- A64_OP_LDST(AM_B,ld,A64_LDST_AM(1,rm,opt),AM_IDXREG,rn,rd)
- #define A64_LDSTPX_IMM(rn, r1, r2, offs7, ld, mode) \
- A64_INSN(0x4,0x5,(mode<<1)|ld,_,_,(offs7)&0x3f8,r2,rn,r1)
- // 64 bit stuff for pointer handling
- #define A64_ADDX_XREG(rd, rn, rm, xtopt, simm) \
- OP_SZ64|A64_OP_XREG(OP_ADD,rd,rn,rm,xtopt,simm)
- #define A64_ADDX_REG(rd, rn, rm, stype, simm) \
- OP_SZ64|A64_ADD_REG(rd, rn, rm, stype, simm)
- #define A64_ADDXS_REG(rd, rn, rm, stype, simm) \
- OP_SZ64|A64_ADDS_REG(rd, rn, rm, stype, simm)
- #define A64_ORX_REG(rd, rn, rm, stype, simm) \
- OP_SZ64|A64_OR_REG(rd, rn, rm, stype, simm)
- #define A64_TSTX_REG(rn, rm, stype, simm) \
- OP_SZ64|A64_TST_REG(rn, rm, stype, simm)
- #define A64_MOVX_REG(rd, rm, stype, simm) \
- OP_SZ64|A64_MOV_REG(rd, rm, stype, simm)
- #define A64_ADDX_IMM(rd, rn, imm12) \
- OP_SZ64|A64_ADD_IMM(rd, rn, imm12, 0)
- #define A64_EORX_IMM(rd, rn, immr, imms) \
- OP_SZ64|OP_N64|A64_EOR_IMM(rd, rn, immr, imms)
- #define A64_UXTX_IMM(rd, rn, bits) \
- OP_SZ64|OP_N64|A64_UXT_IMM(rd, rn, bits)
- #define A64_LSRX_IMM(rd, rn, bits) \
- OP_SZ64|OP_N64|A64_LSR_IMM(rd, rn, bits)|(63<<10)
- // XXX: tcache_ptr type for SVP and SH2 compilers differs..
- #define EMIT_PTR(ptr, x) \
- do { \
- *(u32 *)(ptr) = x; \
- ptr = (void *)((u8 *)(ptr) + sizeof(u32)); \
- } while (0)
- #define EMIT(op) \
- do { \
- EMIT_PTR(tcache_ptr, op); \
- COUNT_OP; \
- } while (0)
- // if-then-else conditional execution helpers
- #define JMP_POS(ptr) \
- ptr = tcache_ptr; \
- EMIT(A64_B(0));
- #define JMP_EMIT(cond, ptr) { \
- u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
- EMIT_PTR(ptr, A64_BCOND(cond, val_ & 0x001fffff)); \
- }
- #define JMP_EMIT_NC(ptr) { \
- u32 val_ = (u8 *)tcache_ptr - (u8 *)(ptr); \
- EMIT_PTR(ptr, A64_B(val_ & 0x0fffffff)); \
- }
- #define EMITH_JMP_START(cond) { \
- u8 *cond_ptr; \
- JMP_POS(cond_ptr)
- #define EMITH_JMP_END(cond) \
- JMP_EMIT(cond, cond_ptr); \
- }
- #define EMITH_JMP3_START(cond) { \
- u8 *cond_ptr, *else_ptr; \
- JMP_POS(cond_ptr)
- #define EMITH_JMP3_MID(cond) \
- JMP_POS(else_ptr); \
- JMP_EMIT(cond, cond_ptr);
- #define EMITH_JMP3_END() \
- JMP_EMIT_NC(else_ptr); \
- }
- // "simple" jump (no more then a few insns)
- // ARM32 will use conditional instructions here
- #define EMITH_SJMP_START EMITH_JMP_START
- #define EMITH_SJMP_END EMITH_JMP_END
- #define EMITH_SJMP3_START EMITH_JMP3_START
- #define EMITH_SJMP3_MID EMITH_JMP3_MID
- #define EMITH_SJMP3_END EMITH_JMP3_END
- #define EMITH_SJMP2_START(cond) \
- EMITH_SJMP3_START(cond)
- #define EMITH_SJMP2_MID(cond) \
- EMITH_SJMP3_MID(cond)
- #define EMITH_SJMP2_END(cond) \
- EMITH_SJMP3_END()
- // data processing, register
- #define emith_move_r_r_ptr(d, s) \
- EMIT(A64_MOVX_REG(d, s, ST_LSL, 0))
- #define emith_move_r_r_ptr_c(cond, d, s) \
- emith_move_r_r_ptr(d, s)
- #define emith_move_r_r(d, s) \
- EMIT(A64_MOV_REG(d, s, ST_LSL, 0))
- #define emith_move_r_r_c(cond, d, s) \
- emith_move_r_r(d, s)
- #define emith_mvn_r_r(d, s) \
- EMIT(A64_MVN_REG(d, s, ST_LSL, 0))
- #define emith_add_r_r_r_lsl_ptr(d, s1, s2, simm) do { \
- if (simm < 4) EMIT(A64_ADDX_XREG(d, s1, s2, XT_SXTW, simm)); \
- else EMIT(A64_ADDX_REG(d, s1, s2, ST_LSL, simm)); \
- } while (0)
- #define emith_add_r_r_r_lsl(d, s1, s2, simm) \
- EMIT(A64_ADD_REG(d, s1, s2, ST_LSL, simm))
- #define emith_addf_r_r_r_lsl(d, s1, s2, simm) \
- EMIT(A64_ADDS_REG(d, s1, s2, ST_LSL, simm))
- #define emith_addf_r_r_r_lsr(d, s1, s2, simm) \
- EMIT(A64_ADDS_REG(d, s1, s2, ST_LSR, simm))
- #define emith_sub_r_r_r_lsl(d, s1, s2, simm) \
- EMIT(A64_SUB_REG(d, s1, s2, ST_LSL, simm))
- #define emith_subf_r_r_r_lsl(d, s1, s2, simm) \
- EMIT(A64_SUBS_REG(d, s1, s2, ST_LSL, simm))
- #define emith_or_r_r_r_lsl(d, s1, s2, simm) \
- EMIT(A64_OR_REG(d, s1, s2, ST_LSL, simm))
- #define emith_eor_r_r_r_lsl(d, s1, s2, simm) \
- EMIT(A64_EOR_REG(d, s1, s2, ST_LSL, simm))
- #define emith_eor_r_r_r_lsr(d, s1, s2, simm) \
- EMIT(A64_EOR_REG(d, s1, s2, ST_LSR, simm))
- #define emith_and_r_r_r_lsl(d, s1, s2, simm) \
- EMIT(A64_AND_REG(d, s1, s2, ST_LSL, simm))
- #define emith_or_r_r_lsl(d, s, lslimm) \
- emith_or_r_r_r_lsl(d, d, s, lslimm)
- #define emith_eor_r_r_lsr(d, s, lsrimm) \
- emith_eor_r_r_r_lsr(d, d, s, lsrimm)
- #define emith_add_r_r_r(d, s1, s2) \
- emith_add_r_r_r_lsl(d, s1, s2, 0)
- #define emith_addf_r_r_r(d, s1, s2) \
- emith_addf_r_r_r_lsl(d, s1, s2, 0)
- #define emith_sub_r_r_r(d, s1, s2) \
- emith_sub_r_r_r_lsl(d, s1, s2, 0)
- #define emith_subf_r_r_r(d, s1, s2) \
- emith_subf_r_r_r_lsl(d, s1, s2, 0)
- #define emith_or_r_r_r(d, s1, s2) \
- emith_or_r_r_r_lsl(d, s1, s2, 0)
- #define emith_eor_r_r_r(d, s1, s2) \
- emith_eor_r_r_r_lsl(d, s1, s2, 0)
- #define emith_and_r_r_r(d, s1, s2) \
- emith_and_r_r_r_lsl(d, s1, s2, 0)
- #define emith_add_r_r_ptr(d, s) \
- emith_add_r_r_r_lsl_ptr(d, d, s, 0)
- #define emith_add_r_r(d, s) \
- emith_add_r_r_r(d, d, s)
- #define emith_sub_r_r(d, s) \
- emith_sub_r_r_r(d, d, s)
- #define emith_neg_r_r(d, s) \
- EMIT(A64_NEG_REG(d, s, ST_LSL, 0))
- #define emith_adc_r_r_r(d, s1, s2) \
- EMIT(A64_ADC_REG(d, s1, s2))
- #define emith_adc_r_r(d, s) \
- EMIT(A64_ADC_REG(d, d, s))
- #define emith_adcf_r_r_r(d, s1, s2) \
- EMIT(A64_ADCS_REG(d, s1, s2))
- #define emith_sbcf_r_r_r(d, s1, s2) \
- EMIT(A64_SBCS_REG(d, s1, s2))
- #define emith_and_r_r(d, s) \
- emith_and_r_r_r(d, d, s)
- #define emith_and_r_r_c(cond, d, s) \
- emith_and_r_r(d, s)
- #define emith_or_r_r(d, s) \
- emith_or_r_r_r(d, d, s)
- #define emith_eor_r_r(d, s) \
- emith_eor_r_r_r(d, d, s)
- #define emith_tst_r_r_ptr(d, s) \
- EMIT(A64_TSTX_REG(d, s, ST_LSL, 0))
- #define emith_tst_r_r(d, s) \
- EMIT(A64_TST_REG(d, s, ST_LSL, 0))
- #define emith_teq_r_r(d, s) do { \
- int _t = rcache_get_tmp(); \
- emith_eor_r_r_r(_t, d, s); \
- emith_cmp_r_imm(_t, 0); \
- rcache_free_tmp(_t); \
- } while (0)
- #define emith_cmp_r_r(d, s) \
- EMIT(A64_CMP_REG(d, s, ST_LSL, 0))
- #define emith_addf_r_r(d, s) \
- emith_addf_r_r_r(d, d, s)
- #define emith_subf_r_r(d, s) \
- emith_subf_r_r_r(d, d, s)
- #define emith_adcf_r_r(d, s) \
- emith_adcf_r_r_r(d, d, s)
- #define emith_sbcf_r_r(d, s) \
- emith_sbcf_r_r_r(d, d, s)
- #define emith_negcf_r_r(d, s) \
- emith_sbcf_r_r_r(d, Z0, s)
- // move immediate
- static void emith_move_imm64(int r, int wx, int64_t imm)
- {
- int sz64 = wx ? OP_SZ64:0;
- int c, s;
- if (!imm) {
- EMIT(sz64|A64_MOVZ_IMM(r, imm, 0));
- return;
- }
- if (imm && -imm == (u16)-imm) {
- EMIT(sz64|A64_MOVN_IMM(r, ~imm, 0));
- return;
- }
- for (c = s = 0; s < (wx ? 4:2) && imm; s++, imm >>= 16)
- if ((u16)(imm)) {
- if (c++) EMIT(sz64|A64_MOVK_IMM(r, imm, s));
- else EMIT(sz64|A64_MOVZ_IMM(r, imm, s));
- }
- }
- #define emith_move_r_ptr_imm(r, imm) \
- emith_move_imm64(r, 1, (intptr_t)(imm))
- #define emith_move_r_imm(r, imm) \
- emith_move_imm64(r, 0, (s32)(imm))
- #define emith_move_r_imm_c(cond, r, imm) \
- emith_move_r_imm(r, imm)
- // arithmetic, immediate
- static void emith_arith_imm(int op, int wx, int rd, int rn, s32 imm)
- {
- u32 sz64 = wx ? OP_SZ64:0;
- if (imm < 0) {
- op ^= (OP_ADD ^ OP_SUB);
- imm = -imm;
- }
- if (imm == 0) {
- // value 0, must emit if op is *S or source isn't dest
- if ((op & 1) || rd != rn)
- EMIT(sz64|A64_OP_IMM12(op, rd, rn, 0, 0));
- } else if (imm >> 24) {
- // value too large
- int _t = rcache_get_tmp();
- emith_move_r_imm(_t, imm);
- EMIT(sz64|A64_OP_REG(op, 0, rd, rn, _t, ST_LSL, 0));
- rcache_free_tmp(_t);
- } else {
- int rs = rn;
- if ((imm) & 0x000fff) {
- EMIT(sz64|A64_OP_IMM12(op, rd, rs, imm, 0)); rs = rd;
- }
- if ((imm) & 0xfff000) {
- EMIT(sz64|A64_OP_IMM12(op, rd, rs, imm >>12, 1));
- }
- }
- }
- #define emith_add_r_imm(r, imm) \
- emith_arith_imm(OP_ADD, 0, r, r, imm)
- #define emith_add_r_imm_c(cond, r, imm) \
- emith_add_r_imm(r, imm)
- #define emith_addf_r_imm(r, imm) \
- emith_arith_imm(OP_ADDS, 0, r, r, imm)
- #define emith_sub_r_imm(r, imm) \
- emith_arith_imm(OP_SUB, 0, r, r, imm)
- #define emith_sub_r_imm_c(cond, r, imm) \
- emith_sub_r_imm(r, imm)
- #define emith_subf_r_imm(r, imm) \
- emith_arith_imm(OP_SUBS, 0, r, r, imm)
- #define emith_adc_r_imm(r, imm) do { \
- int _t = rcache_get_tmp(); \
- emith_move_r_imm(_t, imm); \
- emith_adc_r_r(r, _t); \
- rcache_free_tmp(_t); \
- } while (0)
- #define emith_adcf_r_imm(r, imm) do { \
- int _t = rcache_get_tmp(); \
- emith_move_r_imm(_t, imm); \
- emith_adcf_r_r(r, _t); \
- rcache_free_tmp(_t); \
- } while (0)
- #define emith_cmp_r_imm(r, imm) do { \
- u32 op_ = OP_SUBS, imm_ = (u8)imm; \
- if ((s8)imm_ < 0) { \
- imm_ = (u8)-imm_; \
- op_ = OP_ADDS; \
- } \
- EMIT(A64_OP_IMM12(op_, Z0, r, imm_, 0)); \
- } while (0)
- #define emith_add_r_r_ptr_imm(d, s, imm) \
- emith_arith_imm(OP_ADD, 1, d, s, imm)
- #define emith_add_r_r_imm(d, s, imm) \
- emith_arith_imm(OP_ADD, 0, d, s, imm)
- #define emith_sub_r_r_imm(d, s, imm) \
- emith_arith_imm(OP_SUB, 0, d, s, imm)
- #define emith_sub_r_r_imm_c(cond, d, s, imm) \
- emith_sub_r_r_imm(d, s, imm)
- #define emith_subf_r_r_imm(d, s, imm) \
- emith_arith_imm(OP_SUBS, 0, d, s, imm)
- // logical, immediate; the value describes a bitmask, see ARMv8 ArchRefMan
- // NB: deal only with simple masks 0{n}1{m}0{o} or 1{n}0{m}1{o}, 0<m<32 n+m+o=32
- static int emith_log_isbm(u32 imm, int *n, int *m, int *invert)
- {
- *invert = (s32)imm < 0; // topmost bit set?
- if (*invert)
- imm = ~imm;
- if (imm) {
- *n = __builtin_clz(imm); imm = ~(imm << *n); // insert 1's
- *m = __builtin_clz(imm); imm = ~ imm << *m; // insert 0's
- return !imm;
- } else {
- *n = *m = 0;
- return 0;
- }
- }
- static void emith_log_imm(int op, int wx, int rd, int rn, u32 imm)
- {
- int n, m, invert;
- u32 sz64 = wx ? OP_SZ64:0;
- if (emith_log_isbm(imm, &n, &m, &invert) && (!wx || !invert)) {
- n += (wx ? 32:0); // extend pattern if 64 bit regs are used
- if (invert) EMIT(sz64|A64_OP_IMMBM(op, rd, rn, n, 32-m-1));
- else EMIT(sz64|A64_OP_IMMBM(op, rd, rn, n+m, m-1));
- } else {
- // imm too complex
- int _t = rcache_get_tmp();
- if (count_bits(imm) > 16) {
- emith_move_r_imm(_t, ~imm);
- EMIT(sz64|A64_OP_REG(op, 1, rd, rn, _t, ST_LSL, 0));
- } else {
- emith_move_r_imm(_t, imm);
- EMIT(sz64|A64_OP_REG(op, 0, rd, rn, _t, ST_LSL, 0));
- }
- rcache_free_tmp(_t);
- }
- }
- #define emith_and_r_imm(r, imm) \
- emith_log_imm(OP_AND, 0, r, r, imm)
- #define emith_or_r_imm(r, imm) \
- emith_log_imm(OP_OR, 0, r, r, imm)
- #define emith_or_r_imm_c(cond, r, imm) \
- emith_or_r_imm(r, imm)
- #define emith_eor_r_imm_ptr(r, imm) \
- emith_log_imm(OP_EOR, 1, r, r, imm)
- #define emith_eor_r_imm_ptr_c(cond, r, imm) \
- emith_eor_r_imm_ptr(r, imm)
- #define emith_eor_r_imm(r, imm) \
- emith_log_imm(OP_EOR, 0, r, r, imm)
- #define emith_eor_r_imm_c(cond, r, imm) \
- emith_eor_r_imm(r, imm)
- /* NB: BIC #imm not available in A64; use AND #~imm instead */
- #define emith_bic_r_imm(r, imm) \
- emith_log_imm(OP_AND, 0, r, r, ~(imm))
- #define emith_bic_r_imm_c(cond, r, imm) \
- emith_bic_r_imm(r, imm)
- #define emith_tst_r_imm(r, imm) \
- emith_log_imm(OP_ANDS, 0, Z0, r, imm)
- #define emith_tst_r_imm_c(cond, r, imm) \
- emith_tst_r_imm(r, imm)
- #define emith_and_r_r_imm(d, s, imm) \
- emith_log_imm(OP_AND, 0, d, s, imm)
- #define emith_or_r_r_imm(d, s, imm) \
- emith_log_imm(OP_OR, 0, d, s, imm)
- #define emith_eor_r_r_imm(d, s, imm) \
- emith_log_imm(OP_EOR, 0, d, s, imm)
- // shift
- #define emith_lsl(d, s, cnt) \
- EMIT(A64_LSL_IMM(d, s, cnt))
- #define emith_lsr(d, s, cnt) \
- EMIT(A64_LSR_IMM(d, s, cnt))
- #define emith_asr(d, s, cnt) \
- EMIT(A64_ASR_IMM(d, s, cnt))
- #define emith_ror(d, s, cnt) \
- EMIT(A64_ROR_IMM(d, s, cnt))
- #define emith_ror_c(cond, d, s, cnt) \
- emith_ror(d, s, cnt)
- #define emith_rol(d, s, cnt) \
- EMIT(A64_ROR_IMM(d, s, 32-(cnt)))
- // NB: shift with carry not directly supported in A64 :-|.
- #define emith_lslf(d, s, cnt) do { \
- if ((cnt) > 1) { \
- emith_lsl(d, s, cnt-1); \
- emith_addf_r_r_r(d, d, d); \
- } else if ((cnt) > 0) \
- emith_addf_r_r_r(d, s, s); \
- } while (0)
- #define emith_lsrf(d, s, cnt) do { \
- EMIT(A64_RBIT_REG(d, s)); \
- emith_lslf(d, d, cnt); \
- EMIT(A64_RBIT_REG(d, d)); \
- } while (0)
- #define emith_asrf(d, s, cnt) do { \
- int _s = s; \
- if ((cnt) > 1) { \
- emith_asr(d, s, cnt-1); \
- _s = d; \
- } \
- if ((cnt) > 0) { \
- emith_addf_r_r_r(Z0, _s, _s); \
- EMIT(A64_RBIT_REG(d, _s)); \
- emith_adcf_r_r_r(d, d, d); \
- EMIT(A64_RBIT_REG(d, d)); \
- } \
- } while (0)
- #define emith_rolf(d, s, cnt) do { \
- int _s = s; \
- if ((cnt) > 1) { \
- emith_rol(d, s, cnt-1); \
- _s = d; \
- } \
- if ((cnt) > 0) { \
- emith_addf_r_r_r(d, _s, _s); \
- emith_adc_r_r_r(d, d, Z0); \
- } \
- } while (0)
- #define emith_rorf(d, s, cnt) do { \
- if ((cnt) > 0) { \
- emith_ror(d, s, cnt); \
- emith_addf_r_r_r(Z0, d, d); \
- } \
- } while (0)
- #define emith_rolcf(d) \
- emith_adcf_r_r(d, d)
- #define emith_rorcf(d) do { \
- EMIT(A64_RBIT_REG(d, d)); \
- emith_adcf_r_r(d, d); \
- EMIT(A64_RBIT_REG(d, d)); \
- } while (0)
- // signed/unsigned extend
- #define emith_clear_msb(d, s, count) /* bits to clear */ \
- EMIT(A64_UXT_IMM(d, s, 32-(count)))
- #define emith_clear_msb_c(cond, d, s, count) \
- emith_clear_msb(d, s, count)
- #define emith_sext(d, s, count) /* bits to keep */ \
- EMIT(A64_SXT_IMM(d, s, count))
- // multiply Rd = Rn*Rm (+ Ra)
- #define emith_mul(d, s1, s2) \
- EMIT(A64_MUL(d, s1, s2))
- // NB: must combine/split Xd from/into 2 Wd's; play safe and clear upper bits
- #define emith_combine64(dlo, dhi) \
- EMIT(A64_UXTX_IMM(dlo, dlo, 32)); \
- EMIT(A64_ORX_REG(dlo, dlo, dhi, ST_LSL, 32));
- #define emith_split64(dlo, dhi) \
- EMIT(A64_LSRX_IMM(dhi, dlo, 32)); \
- EMIT(A64_UXTX_IMM(dlo, dlo, 32));
- #define emith_mul_u64(dlo, dhi, s1, s2) do { \
- EMIT(A64_UMULL(dlo, s1, s2)); \
- emith_split64(dlo, dhi); \
- } while (0)
- #define emith_mul_s64(dlo, dhi, s1, s2) do { \
- EMIT(A64_SMULL(dlo, s1, s2)); \
- emith_split64(dlo, dhi); \
- } while (0)
- #define emith_mula_s64(dlo, dhi, s1, s2) do { \
- emith_combine64(dlo, dhi); \
- EMIT(A64_SMADDL(dlo, s1, s2, dlo)); \
- emith_split64(dlo, dhi); \
- } while (0)
- #define emith_mula_s64_c(cond, dlo, dhi, s1, s2) \
- emith_mula_s64(dlo, dhi, s1, s2)
- // load/store. offs has 9 bits signed, hence larger offs may use a temp
- static void emith_ldst_offs(int sz, int rd, int rn, int o9, int ld, int mode)
- {
- if (o9 >= -256 && o9 < 256) {
- EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,o9), mode, rn, rd));
- } else if (mode == AM_IDXPRE) {
- emith_add_r_r_ptr_imm(rn, rn, o9);
- EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,0), AM_IDX, rn, rd));
- } else if (mode == AM_IDXPOST) {
- EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,0), AM_IDX, rn, rd));
- emith_add_r_r_ptr_imm(rn, rn, o9);
- } else {
- int _t = rcache_get_tmp();
- emith_add_r_r_ptr_imm(_t, rn, o9);
- EMIT(A64_OP_LDST(sz, ld, A64_LDST_AM(0,_,0), AM_IDX, _t, rd));
- rcache_free_tmp(_t);
- }
- }
- #define emith_read_r_r_offs_ptr(r, rs, offs) \
- emith_ldst_offs(AM_X, r, rs, offs, LT_LD, AM_IDX)
- #define emith_read_r_r_offs_ptr_c(cond, r, rs, offs) \
- emith_read_r_r_offs_ptr(r, rs, offs)
- #define emith_read_r_r_offs(r, rs, offs) \
- emith_ldst_offs(AM_W, r, rs, offs, LT_LD, AM_IDX)
- #define emith_read_r_r_offs_c(cond, r, rs, offs) \
- emith_read_r_r_offs(r, rs, offs)
-
- #define emith_read_r_r_r_ptr(r, rs, rm) \
- EMIT(A64_LDSTX_REG(r, rs, rm, LT_LD, XT_SXTW))
- #define emith_read_r_r_r(r, rs, rm) \
- EMIT(A64_LDST_REG(r, rs, rm, LT_LD, XT_SXTW))
- #define emith_read_r_r_r_c(cond, r, rs, rm) \
- emith_read_r_r_r(r, rs, rm)
- #define emith_read_r_r_r_ptr_wb(r, rs, rm) do { \
- emith_read_r_r_r_ptr(r, rs, rm); \
- emith_add_r_r_ptr(rs, rm); \
- } while (0)
- #define emith_read_r_r_r_wb(r, rs, rm) do { \
- emith_read_r_r_r(r, rs, rm); \
- emith_add_r_r_ptr(rs, rm); \
- } while (0)
- #define emith_read8_r_r_offs(r, rs, offs) \
- emith_ldst_offs(AM_B, r, rs, offs, LT_LD, AM_IDX)
- #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
- emith_read8_r_r_offs(r, rs, offs)
- #define emith_read8_r_r_r(r, rs, rm) \
- EMIT(A64_LDSTB_REG(r, rs, rm, LT_LD, XT_SXTW))
- #define emith_read8_r_r_r_c(cond, r, rs, rm) \
- emith_read8_r_r_r(r, rs, rm)
- #define emith_read16_r_r_offs(r, rs, offs) \
- emith_ldst_offs(AM_H, r, rs, offs, LT_LD, AM_IDX)
- #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
- emith_read16_r_r_offs(r, rs, offs)
- #define emith_read16_r_r_r(r, rs, rm) \
- EMIT(A64_LDSTH_REG(r, rs, rm, LT_LD, XT_SXTW))
- #define emith_read16_r_r_r_c(cond, r, rs, rm) \
- emith_read16_r_r_r(r, rs, rm)
- #define emith_read8s_r_r_offs(r, rs, offs) \
- emith_ldst_offs(AM_B, r, rs, offs, LT_LDS, AM_IDX)
- #define emith_read8s_r_r_offs_c(cond, r, rs, offs) \
- emith_read8s_r_r_offs(r, rs, offs)
- #define emith_read8s_r_r_r(r, rs, rm) \
- EMIT(A64_LDSTB_REG(r, rs, rm, LT_LDS, XT_SXTW))
- #define emith_read8s_r_r_r_c(cond, r, rs, rm) \
- emith_read8s_r_r_r(r, rs, rm)
- #define emith_read16s_r_r_offs(r, rs, offs) \
- emith_ldst_offs(AM_H, r, rs, offs, LT_LDS, AM_IDX)
- #define emith_read16s_r_r_offs_c(cond, r, rs, offs) \
- emith_read16s_r_r_offs(r, rs, offs)
- #define emith_read16s_r_r_r(r, rs, rm) \
- EMIT(A64_LDSTH_REG(r, rs, rm, LT_LDS, XT_SXTW))
- #define emith_read16s_r_r_r_c(cond, r, rs, rm) \
- emith_read16s_r_r_r(r, rs, rm)
- #define emith_write_r_r_offs_ptr(r, rs, offs) \
- emith_ldst_offs(AM_X, r, rs, offs, LT_ST, AM_IDX)
- #define emith_write_r_r_offs_ptr_c(cond, r, rs, offs) \
- emith_write_r_r_offs_ptr(r, rs, offs)
- #define emith_write_r_r_r_ptr(r, rs, rm) \
- EMIT(A64_LDSTX_REG(r, rs, rm, LT_ST, XT_SXTW))
- #define emith_write_r_r_r_ptr_c(cond, r, rs, rm) \
- emith_write_r_r_r_ptr(r, rs, rm)
- #define emith_write_r_r_offs(r, rs, offs) \
- emith_ldst_offs(AM_W, r, rs, offs, LT_ST, AM_IDX)
- #define emith_write_r_r_offs_c(cond, r, rs, offs) \
- emith_write_r_r_offs(r, rs, offs)
- #define emith_write_r_r_r(r, rs, rm) \
- EMIT(A64_LDST_REG(r, rs, rm, LT_ST, XT_SXTW))
- #define emith_write_r_r_r_c(cond, r, rs, rm) \
- emith_write_r_r_r(r, rs, rm)
- #define emith_write_r_r_r_ptr_wb(r, rs, rm) do { \
- emith_write_r_r_r_ptr(r, rs, rm); \
- emith_add_r_r_ptr(rs, rm); \
- } while (0)
- #define emith_write_r_r_r_wb(r, rs, rm) do { \
- emith_write_r_r_r(r, rs, rm); \
- emith_add_r_r_ptr(rs, rm); \
- } while (0)
- #define emith_ctx_read_ptr(r, offs) \
- emith_read_r_r_offs_ptr(r, CONTEXT_REG, offs)
- #define emith_ctx_read(r, offs) \
- emith_read_r_r_offs(r, CONTEXT_REG, offs)
- #define emith_ctx_read_c(cond, r, offs) \
- emith_ctx_read(r, offs)
- #define emith_ctx_write_ptr(r, offs) \
- emith_write_r_r_offs_ptr(r, CONTEXT_REG, offs)
- #define emith_ctx_write(r, offs) \
- emith_write_r_r_offs(r, CONTEXT_REG, offs)
- #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
- int r_ = r, offs_ = offs, cnt_ = cnt; \
- for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
- emith_ctx_read(r_, offs_); \
- } while (0)
- #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
- int r_ = r, offs_ = offs, cnt_ = cnt; \
- for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
- emith_ctx_write(r_, offs_); \
- } while (0)
- // push pairs; NB: SP must be 16 byte aligned (HW requirement!)
- #define emith_push2(r1, r2) \
- EMIT(A64_LDSTPX_IMM(SP, r1, r2, -2*8, LT_ST, AM_IDXPRE))
- #define emith_pop2(r1, r2) \
- EMIT(A64_LDSTPX_IMM(SP, r1, r2, 2*8, LT_LD, AM_IDXPOST))
- // function call handling
- #define emith_save_caller_regs(mask) do { \
- int _c, _r1, _r2; u32 _m = mask & 0x3ffff; \
- if (__builtin_parity(_m) == 1) _m |= 0x40000; /* hardware align */ \
- for (_c = HOST_REGS, _r1 = -1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
- if (_m & (1 << _c)) { \
- _r2 = _r1, _r1 = _c; \
- if (_r2 != -1) { \
- emith_push2(_r1, _r2); \
- _r1 = -1; \
- } \
- } \
- } while (0)
- #define emith_restore_caller_regs(mask) do { \
- int _c, _r1, _r2; u32 _m = mask & 0x3ffff; \
- if (__builtin_parity(_m) == 1) _m |= 0x40000; /* hardware align */ \
- for (_c = 0, _r1 = -1; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
- if (_m & (1 << _c)) { \
- _r2 = _r1, _r1 = _c; \
- if (_r2 != -1) { \
- emith_pop2(_r2, _r1); \
- _r1 = -1; \
- } \
- } \
- } while (0)
- #define host_arg2reg(rd, arg) \
- rd = arg
- #define emith_pass_arg_r(arg, reg) \
- emith_move_r_r(arg, reg)
- #define emith_pass_arg_imm(arg, imm) \
- emith_move_r_imm(arg, imm)
- // branching; NB: A64 B.cond has only +/- 1MB range
- #define emith_bcond(ptr, patch, cond, target) do { \
- u32 disp_ = (u8 *)target - (u8 *)ptr; \
- if (disp_ >= 0xfff00000 || disp_ <= 0x000fffff) { /* can use near B.c */ \
- EMIT_PTR(ptr, A64_BCOND(cond, disp_ & 0x001fffff)); \
- if (patch) EMIT_PTR(ptr, A64_NOP); /* reserve space for far B */ \
- } else { /* far branch if near branch isn't possible */ \
- EMIT_PTR(ptr, A64_BCOND(emith_invert_cond(cond), 8)); \
- EMIT_PTR(ptr, A64_B((disp_ - 4) & 0x0fffffff)); \
- } \
- } while (0)
- #define emith_jump(target) do {\
- u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
- EMIT(A64_B(disp_ & 0x0fffffff)); \
- } while (0)
- #define emith_jump_patchable(target) \
- emith_jump(target)
- #define emith_jump_cond(cond, target) \
- emith_bcond(tcache_ptr, 0, cond, target)
- #define emith_jump_cond_patchable(cond, target) \
- emith_bcond(tcache_ptr, 1, cond, target)
- #define emith_jump_patch(ptr, target) ({ \
- u32 *ptr_ = (u32 *)ptr; \
- u32 disp_ = (u8 *)(target) - (u8 *)(ptr_); \
- int cond_ = ptr_[0] & 0xf; \
- if ((ptr_[0] & 0xff000000) == 0x54000000) { /* B.cond */ \
- if (ptr_[1] != A64_NOP) cond_ = emith_invert_cond(cond_); \
- emith_bcond(ptr_, 1, cond_, target); \
- } else if (ptr_[0] & 0x80000000) \
- EMIT_PTR(ptr_, A64_BL((disp_) & 0x0fffffff)); \
- else EMIT_PTR(ptr_, A64_B((disp_) & 0x0fffffff)); \
- (u8 *)ptr; \
- })
- #define emith_jump_reg(r) \
- EMIT(A64_BR(r))
- #define emith_jump_reg_c(cond, r) \
- emith_jump_reg(r)
- #define emith_jump_ctx(offs) do { \
- int _t = rcache_get_tmp(); \
- emith_ctx_read_ptr(_t, offs); \
- emith_jump_reg(_t); \
- rcache_free_tmp(_t); \
- } while (0)
- #define emith_jump_ctx_c(cond, offs) \
- emith_jump_ctx(offs)
- #define emith_call(target) do { \
- u32 disp_ = (u8 *)target - (u8 *)tcache_ptr; \
- EMIT(A64_BL(disp_ & 0x0fffffff)); \
- } while (0)
- #define emith_call_cond(cond, target) \
- emith_call(target)
- #define emith_call_reg(r) \
- EMIT(A64_BLR(r))
- #define emith_call_ctx(offs) do { \
- int _t = rcache_get_tmp(); \
- emith_ctx_read_ptr(_t, offs); \
- emith_call_reg(_t); \
- rcache_free_tmp(_t); \
- } while (0)
- #define emith_call_link(r, target) do { \
- EMIT(A64_ADRXLIT_IMM(r, 8)); \
- emith_jump(target); \
- } while (0)
- #define emith_call_cleanup() /**/
- #define emith_ret() \
- EMIT(A64_RET(LR))
- #define emith_ret_c(cond) \
- emith_ret()
- #define emith_ret_to_ctx(offs) \
- emith_ctx_write_ptr(LR, offs)
- // NB: pushes r or r18 for SP hardware alignment
- #define emith_push_ret(r) do { \
- int r_ = (r >= 0 ? r : 18); \
- emith_push2(r_, LR); \
- } while (0)
- #define emith_pop_and_ret(r) do { \
- int r_ = (r >= 0 ? r : 18); \
- emith_pop2(r_, LR); \
- emith_ret(); \
- } while (0)
- // emitter ABI stuff
- #define emith_pool_check() /**/
- #define emith_pool_commit(j) /**/
- #define emith_insn_ptr() ((u8 *)tcache_ptr)
- #define emith_flush() /**/
- #define host_instructions_updated(base, end) __builtin___clear_cache(base, end)
- #define emith_jump_patch_size() 8
- #define emith_rw_offs_max() 0xff
- // SH2 drc specific
- #define emith_sh2_drc_entry() do { \
- emith_push2(LR, FP); \
- emith_push2(28, 27); \
- emith_push2(26, 25); \
- emith_push2(24, 23); \
- emith_push2(22, 21); \
- emith_push2(20, 19); \
- } while (0)
- #define emith_sh2_drc_exit() do { \
- emith_pop2(20, 19); \
- emith_pop2(22, 21); \
- emith_pop2(24, 23); \
- emith_pop2(26, 25); \
- emith_pop2(28, 27); \
- emith_pop2(LR, FP); \
- emith_ret(); \
- } while (0)
- // NB: assumes a is in arg0, tab, func and mask are temp
- #define emith_sh2_rcall(a, tab, func, mask) do { \
- emith_lsr(mask, a, SH2_READ_SHIFT); \
- EMIT(A64_ADDX_REG(tab, tab, mask, ST_LSL, 4)); \
- emith_read_r_r_offs_ptr(func, tab, 0); \
- emith_read_r_r_offs(mask, tab, 8); \
- EMIT(A64_ADDXS_REG(func, func, func, ST_LSL, 0)); \
- } while (0)
- // NB: assumes a, val are in arg0 and arg1, tab and func are temp
- #define emith_sh2_wcall(a, val, tab, func) do { \
- emith_lsr(func, a, SH2_WRITE_SHIFT); \
- emith_lsl(func, func, 3); \
- emith_read_r_r_r_ptr(func, tab, func); \
- emith_move_r_r_ptr(2, CONTEXT_REG); /* arg2 */ \
- emith_jump_reg(func); \
- } while (0)
- #define emith_sh2_delay_loop(cycles, reg) do { \
- int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL); \
- int t1 = rcache_get_tmp(); \
- int t2 = rcache_get_tmp(); \
- int t3 = rcache_get_tmp(); \
- /* if (sr < 0) return */ \
- emith_asrf(t2, sr, 12); \
- EMITH_JMP_START(DCOND_LE); \
- /* turns = sr.cycles / cycles */ \
- emith_move_r_imm(t3, (u32)((1ULL<<32) / (cycles)) + 1); \
- emith_mul_u64(t1, t2, t2, t3); /* multiply by 1/x */ \
- rcache_free_tmp(t3); \
- if (reg >= 0) { \
- /* if (reg <= turns) turns = reg-1 */ \
- t3 = rcache_get_reg(reg, RC_GR_RMW, NULL); \
- emith_cmp_r_r(t3, t2); \
- EMITH_SJMP_START(DCOND_HI); \
- emith_sub_r_r_imm_c(DCOND_LS, t2, t3, 1); \
- EMITH_SJMP_END(DCOND_HI); \
- /* if (reg <= 1) turns = 0 */ \
- emith_cmp_r_imm(t3, 1); \
- EMITH_SJMP_START(DCOND_HI); \
- emith_move_r_imm_c(DCOND_LS, t2, 0); \
- EMITH_SJMP_END(DCOND_HI); \
- /* reg -= turns */ \
- emith_sub_r_r(t3, t2); \
- } \
- /* sr.cycles -= turns * cycles; */ \
- emith_move_r_imm(t1, cycles); \
- emith_mul(t1, t2, t1); \
- emith_sub_r_r_r_lsl(sr, sr, t1, 12); \
- EMITH_JMP_END(DCOND_LE); \
- rcache_free_tmp(t1); \
- rcache_free_tmp(t2); \
- } while (0)
- /*
- * if Q
- * t = carry(Rn += Rm)
- * else
- * t = carry(Rn -= Rm)
- * T ^= t
- */
- #define emith_sh2_div1_step(rn, rm, sr) do { \
- int tmp_ = rcache_get_tmp(); \
- emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
- EMITH_SJMP3_START(DCOND_EQ); \
- emith_addf_r_r(rn, rm); \
- emith_adc_r_r_r(tmp_, Z0, Z0); \
- EMITH_SJMP3_MID(DCOND_EQ); \
- emith_subf_r_r(rn, rm); \
- emith_adc_r_r_r(tmp_, Z0, Z0); \
- emith_eor_r_imm(tmp_, 1); \
- EMITH_SJMP3_END(); \
- emith_eor_r_r(sr, tmp_); \
- rcache_free_tmp(tmp_); \
- } while (0)
- /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
- #define emith_sh2_macl(ml, mh, rn, rm, sr) do { \
- emith_tst_r_imm(sr, S); \
- EMITH_SJMP_START(DCOND_EQ); \
- /* MACH top 16 bits unused if saturated. sign ext for overfl detect */ \
- emith_sext(mh, mh, 16); \
- EMITH_SJMP_END(DCOND_EQ); \
- emith_mula_s64(ml, mh, rn, rm); \
- emith_tst_r_imm(sr, S); \
- EMITH_SJMP_START(DCOND_EQ); \
- /* overflow if top 17 bits of MACH aren't all 1 or 0 */ \
- /* to check: add MACH[15] to MACH[31:16]. this is 0 if no overflow */ \
- emith_asrf(rn, mh, 16); /* sum = (MACH>>16) + ((MACH>>15)&1) */ \
- emith_adcf_r_imm(rn, 0); /* (MACH>>15) is in carry after shift */ \
- EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> ov */ \
- emith_move_r_imm_c(DCOND_NE, ml, 0x0000); /* -overflow */ \
- emith_move_r_imm_c(DCOND_NE, mh, 0x8000); \
- EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> +ovl */ \
- emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0xffffffff */ \
- emith_sub_r_imm_c(DCOND_GT, mh, 1); /* 0x00007fff */ \
- EMITH_SJMP_END(DCOND_LE); \
- EMITH_SJMP_END(DCOND_EQ); \
- EMITH_SJMP_END(DCOND_EQ); \
- } while (0)
- /* mh:ml += rn*rm, does saturation if required by S bit. rn, rm must be TEMP */
- #define emith_sh2_macw(ml, mh, rn, rm, sr) do { \
- emith_tst_r_imm(sr, S); \
- EMITH_SJMP_START(DCOND_EQ); \
- /* XXX: MACH should be untouched when S is set? */ \
- emith_asr(mh, ml, 31); /* sign ext MACL to MACH for ovrfl check */ \
- EMITH_SJMP_END(DCOND_EQ); \
- emith_mula_s64(ml, mh, rn, rm); \
- emith_tst_r_imm(sr, S); \
- EMITH_SJMP_START(DCOND_EQ); \
- /* overflow if top 33 bits of MACH:MACL aren't all 1 or 0 */ \
- /* to check: add MACL[31] to MACH. this is 0 if no overflow */ \
- emith_lsr(rn, ml, 31); \
- emith_addf_r_r(rn, mh); /* sum = MACH + ((MACL>>31)&1) */ \
- EMITH_SJMP_START(DCOND_EQ); /* sum != 0 -> overflow */ \
- /* XXX: LSB signalling only in SH1, or in SH2 too? */ \
- emith_move_r_imm_c(DCOND_NE, mh, 0x00000001); /* LSB of MACH */ \
- emith_move_r_imm_c(DCOND_NE, ml, 0x80000000); /* negative ovrfl */ \
- EMITH_SJMP_START(DCOND_LE); /* sum > 0 -> positive ovrfl */ \
- emith_sub_r_imm_c(DCOND_GT, ml, 1); /* 0x7fffffff */ \
- EMITH_SJMP_END(DCOND_LE); \
- EMITH_SJMP_END(DCOND_EQ); \
- EMITH_SJMP_END(DCOND_EQ); \
- } while (0)
- #define emith_write_sr(sr, srcr) do { \
- emith_lsr(sr, sr, 10); \
- emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
- emith_ror(sr, sr, 22); \
- } while (0)
- #define emith_carry_to_t(srr, is_sub) do { \
- emith_lsr(sr, sr, 1); \
- emith_adc_r_r(sr, sr); \
- if (is_sub) /* SUB has inverted C on ARM */ \
- emith_eor_r_imm(sr, 1); \
- } while (0)
- #define emith_tpop_carry(sr, is_sub) do { \
- if (is_sub) \
- emith_eor_r_imm(sr, 1); \
- emith_lsrf(sr, sr, 1); \
- } while (0)
- #define emith_tpush_carry(sr, is_sub) do { \
- emith_adc_r_r(sr, sr); \
- if (is_sub) \
- emith_eor_r_imm(sr, 1); \
- } while (0)
- #ifdef T
- // T bit handling
- #define emith_invert_cond(cond) \
- ((cond) ^ 1)
- static void emith_clr_t_cond(int sr)
- {
- emith_bic_r_imm(sr, T);
- }
- static void emith_set_t_cond(int sr, int cond)
- {
- EMITH_SJMP_START(emith_invert_cond(cond));
- emith_or_r_imm_c(cond, sr, T);
- EMITH_SJMP_END(emith_invert_cond(cond));
- }
- #define emith_get_t_cond() -1
- #define emith_sync_t(sr) ((void)sr)
- #define emith_invalidate_t()
- static void emith_set_t(int sr, int val)
- {
- if (val)
- emith_or_r_imm(sr, T);
- else
- emith_bic_r_imm(sr, T);
- }
- static int emith_tst_t(int sr, int tf)
- {
- emith_tst_r_imm(sr, T);
- return tf ? DCOND_NE: DCOND_EQ;
- }
- #endif
|