compiler.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048
  1. /*
  2. * vim:shiftwidth=2:expandtab
  3. *
  4. * notes:
  5. * - tcache, block descriptor, link buffer overflows result in sh2_translate()
  6. * failure, followed by full tcache invalidation for that region
  7. * - jumps between blocks are tracked for SMC handling (in block_links[]),
  8. * except jumps between different tcaches
  9. *
  10. * implemented:
  11. * - static register allocation
  12. * - remaining register caching and tracking in temporaries
  13. * - block-local branch linking
  14. * - block linking (except between tcaches)
  15. * - some constant propagation
  16. *
  17. * TODO:
  18. * - proper SMC handling
  19. * - better constant propagation
  20. * - stack caching?
  21. * - bug fixing
  22. */
  23. #include <stddef.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <assert.h>
  27. #include "../../pico/pico_int.h"
  28. #include "sh2.h"
  29. #include "compiler.h"
  30. #include "../drc/cmn.h"
  31. #include "../debug.h"
  32. // features
  33. #define PROPAGATE_CONSTANTS 1
  34. #define LINK_BRANCHES 1
  35. // max literal offset from the block end
  36. #define MAX_LITERAL_OFFSET 32*2
  37. // debug stuff {
  38. #ifndef DRC_DEBUG
  39. #define DRC_DEBUG 0
  40. #endif
  41. #if DRC_DEBUG
  42. #define dbg(l,...) { \
  43. if ((l) & DRC_DEBUG) \
  44. elprintf(EL_STATUS, ##__VA_ARGS__); \
  45. }
  46. #include "mame/sh2dasm.h"
  47. #include <platform/linux/host_dasm.h>
  48. static int insns_compiled, hash_collisions, host_insn_count;
  49. #define COUNT_OP \
  50. host_insn_count++
  51. #else // !DRC_DEBUG
  52. #define COUNT_OP
  53. #define dbg(...)
  54. #endif
  55. #if (DRC_DEBUG & 2)
  56. static u8 *tcache_dsm_ptrs[3];
  57. static char sh2dasm_buff[64];
  58. #define do_host_disasm(tcid) \
  59. host_dasm(tcache_dsm_ptrs[tcid], tcache_ptr - tcache_dsm_ptrs[tcid]); \
  60. tcache_dsm_ptrs[tcid] = tcache_ptr
  61. #else
  62. #define do_host_disasm(x)
  63. #endif
  64. #if (DRC_DEBUG & 4) || defined(PDB)
  65. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  66. {
  67. if (block != NULL) {
  68. dbg(4, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  69. sh2->pc, block, (signed int)sr >> 12);
  70. pdb_step(sh2, sh2->pc);
  71. }
  72. return block;
  73. }
  74. #endif
  75. // } debug
  76. #define BLOCK_CYCLE_LIMIT 100
  77. #define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6)
  78. #define TCACHE_BUFFERS 3
  79. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  80. // BIOS shares tcache with data array because it's only used for init
  81. // and can be discarded early
  82. // XXX: need to tune sizes
  83. static const int tcache_sizes[TCACHE_BUFFERS] = {
  84. DRC_TCACHE_SIZE * 6 / 8, // ROM, DRAM
  85. DRC_TCACHE_SIZE / 8, // BIOS, data array in master sh2
  86. DRC_TCACHE_SIZE / 8, // ... slave
  87. };
  88. static u8 *tcache_bases[TCACHE_BUFFERS];
  89. static u8 *tcache_ptrs[TCACHE_BUFFERS];
  90. // ptr for code emiters
  91. static u8 *tcache_ptr;
  92. typedef struct block_desc_ {
  93. u32 addr; // SH2 PC address
  94. u32 end_addr; // TODO rm?
  95. void *tcache_ptr; // translated block for above PC
  96. struct block_desc_ *next; // next block with the same PC hash
  97. #if (DRC_DEBUG & 1)
  98. int refcount;
  99. #endif
  100. } block_desc;
  101. typedef struct block_link_ {
  102. u32 target_pc;
  103. void *jump;
  104. // struct block_link_ *next;
  105. } block_link;
  106. static const int block_max_counts[TCACHE_BUFFERS] = {
  107. 4*1024,
  108. 256,
  109. 256,
  110. };
  111. static block_desc *block_tables[TCACHE_BUFFERS];
  112. static block_link *block_links[TCACHE_BUFFERS];
  113. static int block_counts[TCACHE_BUFFERS];
  114. static int block_link_counts[TCACHE_BUFFERS];
  115. // host register tracking
  116. enum {
  117. HR_FREE,
  118. HR_CACHED, // 'val' has sh2_reg_e
  119. // HR_CONST, // 'val' has a constant
  120. HR_TEMP, // reg used for temp storage
  121. };
  122. enum {
  123. HRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  124. HRF_LOCKED = 1 << 1, // HR_CACHED can't be evicted
  125. };
  126. typedef struct {
  127. u32 hreg:5; // "host" reg
  128. u32 greg:5; // "guest" reg
  129. u32 type:3;
  130. u32 flags:3;
  131. u32 stamp:16; // kind of a timestamp
  132. } temp_reg_t;
  133. // note: reg_temp[] must have at least the amount of
  134. // registers used by handlers in worst case (currently 4)
  135. #ifdef ARM
  136. #include "../drc/emit_arm.c"
  137. static const int reg_map_g2h[] = {
  138. 4, 5, 6, 7,
  139. 8, -1, -1, -1,
  140. -1, -1, -1, -1,
  141. -1, -1, -1, 9,
  142. -1, -1, -1, 10,
  143. -1, -1, -1, -1,
  144. };
  145. static temp_reg_t reg_temp[] = {
  146. { 0, },
  147. { 1, },
  148. { 12, },
  149. { 14, },
  150. { 2, },
  151. { 3, },
  152. };
  153. #elif defined(__i386__)
  154. #include "../drc/emit_x86.c"
  155. static const int reg_map_g2h[] = {
  156. xSI,-1, -1, -1,
  157. -1, -1, -1, -1,
  158. -1, -1, -1, -1,
  159. -1, -1, -1, -1,
  160. -1, -1, -1, xDI,
  161. -1, -1, -1, -1,
  162. };
  163. // ax, cx, dx are usually temporaries by convention
  164. static temp_reg_t reg_temp[] = {
  165. { xAX, },
  166. { xBX, },
  167. { xCX, },
  168. { xDX, },
  169. };
  170. #else
  171. #error unsupported arch
  172. #endif
  173. #define T 0x00000001
  174. #define S 0x00000002
  175. #define I 0x000000f0
  176. #define Q 0x00000100
  177. #define M 0x00000200
  178. #define T_save 0x00000800
  179. #define I_SHIFT 4
  180. #define Q_SHIFT 8
  181. #define M_SHIFT 9
  182. // ROM hash table
  183. #define MAX_HASH_ENTRIES 1024
  184. #define HASH_MASK (MAX_HASH_ENTRIES - 1)
  185. static void **hash_table;
  186. #define HASH_FUNC(hash_tab, addr) \
  187. ((block_desc **)(hash_tab))[(addr) & HASH_MASK]
  188. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  189. static void (*sh2_drc_dispatcher)(void);
  190. static void (*sh2_drc_exit)(void);
  191. static void (*sh2_drc_test_irq)(void);
  192. static u32 REGPARM(2) (*sh2_drc_read8)(u32 a, SH2 *sh2);
  193. static u32 REGPARM(2) (*sh2_drc_read16)(u32 a, SH2 *sh2);
  194. static u32 REGPARM(2) (*sh2_drc_read32)(u32 a, SH2 *sh2);
  195. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  196. static void REGPARM(2) (*sh2_drc_write8_slot)(u32 a, u32 d);
  197. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  198. static void REGPARM(2) (*sh2_drc_write16_slot)(u32 a, u32 d);
  199. static int REGPARM(3) (*sh2_drc_write32)(u32 a, u32 d, SH2 *sh2);
  200. extern void REGPARM(2) sh2_do_op(SH2 *sh2, int opcode);
  201. static void flush_tcache(int tcid)
  202. {
  203. dbg(1, "tcache #%d flush! (%d/%d, bds %d/%d)", tcid,
  204. tcache_ptrs[tcid] - tcache_bases[tcid], tcache_sizes[tcid],
  205. block_counts[tcid], block_max_counts[tcid]);
  206. block_counts[tcid] = 0;
  207. tcache_ptrs[tcid] = tcache_bases[tcid];
  208. if (tcid == 0) { // ROM, RAM
  209. memset(hash_table, 0, sizeof(hash_table[0]) * MAX_HASH_ENTRIES);
  210. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  211. }
  212. else
  213. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[0]));
  214. #if (DRC_DEBUG & 2)
  215. tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
  216. #endif
  217. }
  218. #if LINK_BRANCHES
  219. // add block links (tracked branches)
  220. static int dr_add_block_link(u32 target_pc, void *jump, int tcache_id)
  221. {
  222. block_link *bl = block_links[tcache_id];
  223. int cnt = block_link_counts[tcache_id];
  224. if (cnt >= block_max_counts[tcache_id] * 2) {
  225. printf("bl overflow for tcache %d\n", tcache_id);
  226. return -1;
  227. }
  228. bl[cnt].target_pc = target_pc;
  229. bl[cnt].jump = jump;
  230. block_link_counts[tcache_id]++;
  231. return 0;
  232. }
  233. #endif
  234. static void *dr_find_block(block_desc *tab, u32 addr)
  235. {
  236. for (tab = tab->next; tab != NULL; tab = tab->next)
  237. if (tab->addr == addr)
  238. break;
  239. if (tab != NULL)
  240. return tab->tcache_ptr;
  241. printf("block miss for %08x\n", addr);
  242. return NULL;
  243. }
  244. static block_desc *dr_add_block(u32 addr, int tcache_id, int *blk_id)
  245. {
  246. int *bcount = &block_counts[tcache_id];
  247. block_desc *bd;
  248. if (*bcount >= block_max_counts[tcache_id]) {
  249. printf("bd overflow for tcache %d\n", tcache_id);
  250. return NULL;
  251. }
  252. bd = &block_tables[tcache_id][*bcount];
  253. bd->addr = addr;
  254. bd->tcache_ptr = tcache_ptr;
  255. *blk_id = *bcount;
  256. (*bcount)++;
  257. if ((addr & 0xc6000000) == 0x02000000) { // ROM
  258. bd->next = HASH_FUNC(hash_table, addr);
  259. HASH_FUNC(hash_table, addr) = bd;
  260. #if (DRC_DEBUG & 1)
  261. if (bd->next != NULL) {
  262. printf(" hash collision with %08x\n", bd->next->addr);
  263. hash_collisions++;
  264. }
  265. #endif
  266. }
  267. return bd;
  268. }
  269. #define ADD_TO_ARRAY(array, count, item, failcode) \
  270. array[count++] = item; \
  271. if (count >= ARRAY_SIZE(array)) { \
  272. printf("warning: " #array " overflow\n"); \
  273. failcode; \
  274. }
  275. int find_in_array(u32 *array, size_t size, u32 what)
  276. {
  277. size_t i;
  278. for (i = 0; i < size; i++)
  279. if (what == array[i])
  280. return i;
  281. return -1;
  282. }
  283. // ---------------------------------------------------------------
  284. typedef enum {
  285. RC_GR_READ,
  286. RC_GR_WRITE,
  287. RC_GR_RMW,
  288. } rc_gr_mode;
  289. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking);
  290. // guest regs with constants
  291. static u32 dr_gcregs[24];
  292. // a mask of constant/dirty regs
  293. static u32 dr_gcregs_mask;
  294. static u32 dr_gcregs_dirty;
  295. static void gconst_new(sh2_reg_e r, u32 val)
  296. {
  297. #if PROPAGATE_CONSTANTS
  298. int i;
  299. dr_gcregs_mask |= 1 << r;
  300. dr_gcregs_dirty |= 1 << r;
  301. dr_gcregs[r] = val;
  302. // throw away old r that we might have cached
  303. for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
  304. if ((reg_temp[i].type == HR_CACHED) &&
  305. reg_temp[i].greg == r) {
  306. reg_temp[i].type = HR_FREE;
  307. reg_temp[i].flags = 0;
  308. }
  309. }
  310. #endif
  311. }
  312. static int gconst_get(sh2_reg_e r, u32 *val)
  313. {
  314. if (dr_gcregs_mask & (1 << r)) {
  315. *val = dr_gcregs[r];
  316. return 1;
  317. }
  318. return 0;
  319. }
  320. static int gconst_check(sh2_reg_e r)
  321. {
  322. if ((dr_gcregs_mask | dr_gcregs_dirty) & (1 << r))
  323. return 1;
  324. return 0;
  325. }
  326. // update hr if dirty, else do nothing
  327. static int gconst_try_read(int hr, sh2_reg_e r)
  328. {
  329. if (dr_gcregs_dirty & (1 << r)) {
  330. emith_move_r_imm(hr, dr_gcregs[r]);
  331. dr_gcregs_dirty &= ~(1 << r);
  332. return 1;
  333. }
  334. return 0;
  335. }
  336. static void gconst_check_evict(sh2_reg_e r)
  337. {
  338. if (dr_gcregs_mask & (1 << r))
  339. // no longer cached in reg, make dirty again
  340. dr_gcregs_dirty |= 1 << r;
  341. }
  342. static void gconst_kill(sh2_reg_e r)
  343. {
  344. dr_gcregs_mask &= ~(1 << r);
  345. dr_gcregs_dirty &= ~(1 << r);
  346. }
  347. static void gconst_clean(void)
  348. {
  349. int i;
  350. for (i = 0; i < ARRAY_SIZE(dr_gcregs); i++)
  351. if (dr_gcregs_dirty & (1 << i)) {
  352. // using RC_GR_READ here: it will call gconst_try_read,
  353. // cache the reg and mark it dirty.
  354. rcache_get_reg_(i, RC_GR_READ, 0);
  355. }
  356. }
  357. static void gconst_invalidate(void)
  358. {
  359. dr_gcregs_mask = dr_gcregs_dirty = 0;
  360. }
  361. // register chache
  362. static u16 rcache_counter;
  363. static temp_reg_t *rcache_evict(void)
  364. {
  365. // evict reg with oldest stamp
  366. int i, oldest = -1;
  367. u16 min_stamp = (u16)-1;
  368. for (i = 0; i < ARRAY_SIZE(reg_temp); i++) {
  369. if (reg_temp[i].type == HR_CACHED && !(reg_temp[i].flags & HRF_LOCKED) &&
  370. reg_temp[i].stamp <= min_stamp) {
  371. min_stamp = reg_temp[i].stamp;
  372. oldest = i;
  373. }
  374. }
  375. if (oldest == -1) {
  376. printf("no registers to evict, aborting\n");
  377. exit(1);
  378. }
  379. i = oldest;
  380. if (reg_temp[i].type == HR_CACHED) {
  381. if (reg_temp[i].flags & HRF_DIRTY)
  382. // writeback
  383. emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
  384. gconst_check_evict(reg_temp[i].greg);
  385. }
  386. reg_temp[i].type = HR_FREE;
  387. reg_temp[i].flags = 0;
  388. return &reg_temp[i];
  389. }
  390. static int get_reg_static(sh2_reg_e r, rc_gr_mode mode)
  391. {
  392. int i = reg_map_g2h[r];
  393. if (i != -1) {
  394. if (mode != RC_GR_WRITE)
  395. gconst_try_read(i, r);
  396. }
  397. return i;
  398. }
  399. // note: must not be called when doing conditional code
  400. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking)
  401. {
  402. temp_reg_t *tr;
  403. int i, ret;
  404. // maybe statically mapped?
  405. ret = get_reg_static(r, mode);
  406. if (ret != -1)
  407. goto end;
  408. rcache_counter++;
  409. // maybe already cached?
  410. // if so, prefer against gconst (they must be in sync)
  411. for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
  412. if (reg_temp[i].type == HR_CACHED && reg_temp[i].greg == r) {
  413. reg_temp[i].stamp = rcache_counter;
  414. if (mode != RC_GR_READ)
  415. reg_temp[i].flags |= HRF_DIRTY;
  416. ret = reg_temp[i].hreg;
  417. goto end;
  418. }
  419. }
  420. // use any free reg
  421. for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
  422. if (reg_temp[i].type == HR_FREE) {
  423. tr = &reg_temp[i];
  424. goto do_alloc;
  425. }
  426. }
  427. tr = rcache_evict();
  428. do_alloc:
  429. tr->type = HR_CACHED;
  430. if (do_locking)
  431. tr->flags |= HRF_LOCKED;
  432. if (mode != RC_GR_READ)
  433. tr->flags |= HRF_DIRTY;
  434. tr->greg = r;
  435. tr->stamp = rcache_counter;
  436. ret = tr->hreg;
  437. if (mode != RC_GR_WRITE) {
  438. if (gconst_check(r)) {
  439. if (gconst_try_read(ret, r))
  440. tr->flags |= HRF_DIRTY;
  441. }
  442. else
  443. emith_ctx_read(tr->hreg, r * 4);
  444. }
  445. end:
  446. if (mode != RC_GR_READ)
  447. gconst_kill(r);
  448. return ret;
  449. }
  450. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode)
  451. {
  452. return rcache_get_reg_(r, mode, 1);
  453. }
  454. static int rcache_get_tmp(void)
  455. {
  456. temp_reg_t *tr;
  457. int i;
  458. for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
  459. if (reg_temp[i].type == HR_FREE) {
  460. tr = &reg_temp[i];
  461. goto do_alloc;
  462. }
  463. tr = rcache_evict();
  464. do_alloc:
  465. tr->type = HR_TEMP;
  466. return tr->hreg;
  467. }
  468. static int rcache_get_arg_id(int arg)
  469. {
  470. int i, r = 0;
  471. host_arg2reg(r, arg);
  472. for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
  473. if (reg_temp[i].hreg == r)
  474. break;
  475. if (i == ARRAY_SIZE(reg_temp))
  476. // let's just say it's untracked arg reg
  477. return r;
  478. if (reg_temp[i].type == HR_CACHED) {
  479. // writeback
  480. if (reg_temp[i].flags & HRF_DIRTY)
  481. emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
  482. gconst_check_evict(reg_temp[i].greg);
  483. }
  484. else if (reg_temp[i].type == HR_TEMP) {
  485. printf("arg %d reg %d already used, aborting\n", arg, r);
  486. exit(1);
  487. }
  488. reg_temp[i].type = HR_FREE;
  489. reg_temp[i].flags = 0;
  490. return i;
  491. }
  492. // get a reg to be used as function arg
  493. static int rcache_get_tmp_arg(int arg)
  494. {
  495. int id = rcache_get_arg_id(arg);
  496. reg_temp[id].type = HR_TEMP;
  497. return reg_temp[id].hreg;
  498. }
  499. // same but caches a reg. RC_GR_READ only.
  500. static int rcache_get_reg_arg(int arg, sh2_reg_e r)
  501. {
  502. int i, srcr, dstr, dstid;
  503. int dirty = 0;
  504. dstid = rcache_get_arg_id(arg);
  505. dstr = reg_temp[dstid].hreg;
  506. // maybe already statically mapped?
  507. srcr = get_reg_static(r, RC_GR_READ);
  508. if (srcr != -1)
  509. goto do_cache;
  510. // maybe already cached?
  511. for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
  512. if ((reg_temp[i].type == HR_CACHED) &&
  513. reg_temp[i].greg == r)
  514. {
  515. srcr = reg_temp[i].hreg;
  516. goto do_cache;
  517. }
  518. }
  519. // must read
  520. srcr = dstr;
  521. if (gconst_check(r)) {
  522. if (gconst_try_read(srcr, r))
  523. dirty = 1;
  524. }
  525. else
  526. emith_ctx_read(srcr, r * 4);
  527. do_cache:
  528. if (dstr != srcr)
  529. emith_move_r_r(dstr, srcr);
  530. reg_temp[dstid].stamp = ++rcache_counter;
  531. reg_temp[dstid].type = HR_CACHED;
  532. reg_temp[dstid].greg = r;
  533. reg_temp[dstid].flags |= HRF_LOCKED;
  534. if (dirty)
  535. reg_temp[dstid].flags |= HRF_DIRTY;
  536. return dstr;
  537. }
  538. static void rcache_free_tmp(int hr)
  539. {
  540. int i;
  541. for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
  542. if (reg_temp[i].hreg == hr)
  543. break;
  544. if (i == ARRAY_SIZE(reg_temp) || reg_temp[i].type != HR_TEMP) {
  545. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, reg_temp[i].type);
  546. return;
  547. }
  548. reg_temp[i].type = HR_FREE;
  549. reg_temp[i].flags = 0;
  550. }
  551. static void rcache_unlock(int hr)
  552. {
  553. int i;
  554. for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
  555. if (reg_temp[i].type == HR_CACHED && reg_temp[i].hreg == hr)
  556. reg_temp[i].flags &= ~HRF_LOCKED;
  557. }
  558. static void rcache_unlock_all(void)
  559. {
  560. int i;
  561. for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
  562. reg_temp[i].flags &= ~HRF_LOCKED;
  563. }
  564. static void rcache_clean(void)
  565. {
  566. int i;
  567. gconst_clean();
  568. for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
  569. if (reg_temp[i].type == HR_CACHED && (reg_temp[i].flags & HRF_DIRTY)) {
  570. // writeback
  571. emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
  572. reg_temp[i].flags &= ~HRF_DIRTY;
  573. }
  574. }
  575. static void rcache_invalidate(void)
  576. {
  577. int i;
  578. for (i = 0; i < ARRAY_SIZE(reg_temp); i++) {
  579. reg_temp[i].type = HR_FREE;
  580. reg_temp[i].flags = 0;
  581. }
  582. rcache_counter = 0;
  583. gconst_invalidate();
  584. }
  585. static void rcache_flush(void)
  586. {
  587. rcache_clean();
  588. rcache_invalidate();
  589. }
  590. // ---------------------------------------------------------------
  591. // address space stuff
  592. static void *dr_get_pc_base(u32 pc, int is_slave)
  593. {
  594. void *ret = NULL;
  595. u32 mask = 0;
  596. if ((pc & ~0x7ff) == 0) {
  597. // BIOS
  598. ret = is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
  599. mask = 0x7ff;
  600. }
  601. else if ((pc & 0xfffff000) == 0xc0000000) {
  602. // data array
  603. ret = Pico32xMem->data_array[is_slave];
  604. mask = 0xfff;
  605. }
  606. else if ((pc & 0xc6000000) == 0x06000000) {
  607. // SDRAM
  608. ret = Pico32xMem->sdram;
  609. mask = 0x03ffff;
  610. }
  611. else if ((pc & 0xc6000000) == 0x02000000) {
  612. // ROM
  613. ret = Pico.rom;
  614. mask = 0x3fffff;
  615. }
  616. if (ret == NULL)
  617. return (void *)-1; // NULL is valid value
  618. return (char *)ret - (pc & ~mask);
  619. }
  620. static int emit_get_rbase_and_offs(u32 a, u32 *offs)
  621. {
  622. int poffs = -1;
  623. u32 mask = 0;
  624. int hr;
  625. if ((a & ~0x7ff) == 0) {
  626. // BIOS
  627. poffs = offsetof(SH2, p_bios);
  628. mask = 0x7ff;
  629. }
  630. else if ((a & 0xfffff000) == 0xc0000000) {
  631. // data array
  632. poffs = offsetof(SH2, p_da);
  633. mask = 0xfff;
  634. }
  635. else if ((a & 0xc6000000) == 0x06000000) {
  636. // SDRAM
  637. poffs = offsetof(SH2, p_sdram);
  638. mask = 0x03ffff;
  639. }
  640. else if ((a & 0xc6000000) == 0x02000000) {
  641. // ROM
  642. poffs = offsetof(SH2, p_rom);
  643. mask = 0x3fffff;
  644. }
  645. if (poffs == -1)
  646. return -1;
  647. // XXX: could use related reg
  648. hr = rcache_get_tmp();
  649. emith_ctx_read(hr, poffs);
  650. emith_add_r_imm(hr, a & mask & ~0xff);
  651. *offs = a & 0xff; // XXX: ARM oriented..
  652. return hr;
  653. }
  654. static void REGPARM(3) *lookup_block(u32 pc, int is_slave, int *tcache_id)
  655. {
  656. block_desc *bd = NULL;
  657. void *block = NULL;
  658. *tcache_id = 0;
  659. // we have full block id tables for data_array and RAM
  660. // BIOS goes to data_array table too
  661. if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0) {
  662. int blkid = Pico32xMem->drcblk_da[is_slave][(pc & 0xfff) >> SH2_DRCBLK_DA_SHIFT];
  663. *tcache_id = 1 + is_slave;
  664. if (blkid & 1) {
  665. bd = &block_tables[*tcache_id][blkid >> 1];
  666. block = bd->tcache_ptr;
  667. }
  668. }
  669. // RAM
  670. else if ((pc & 0xc6000000) == 0x06000000) {
  671. int blkid = Pico32xMem->drcblk_ram[(pc & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT];
  672. if (blkid & 1) {
  673. bd = &block_tables[0][blkid >> 1];
  674. block = bd->tcache_ptr;
  675. }
  676. }
  677. // ROM
  678. else if ((pc & 0xc6000000) == 0x02000000) {
  679. bd = HASH_FUNC(hash_table, pc);
  680. if (bd != NULL) {
  681. if (bd->addr == pc)
  682. block = bd->tcache_ptr;
  683. else
  684. block = dr_find_block(bd, pc);
  685. }
  686. }
  687. #if (DRC_DEBUG & 1)
  688. if (bd != NULL)
  689. bd->refcount++;
  690. #endif
  691. return block;
  692. }
  693. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  694. {
  695. #if PROPAGATE_CONSTANTS
  696. gconst_new(dst, imm);
  697. #else
  698. int hr = rcache_get_reg(dst, RC_GR_WRITE);
  699. emith_move_r_imm(hr, imm);
  700. #endif
  701. }
  702. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  703. {
  704. int hr_d = rcache_get_reg(dst, RC_GR_WRITE);
  705. int hr_s = rcache_get_reg(src, RC_GR_READ);
  706. emith_move_r_r(hr_d, hr_s);
  707. }
  708. // T must be clear, and comparison done just before this
  709. static void emit_or_t_if_eq(int srr)
  710. {
  711. EMITH_SJMP_START(DCOND_NE);
  712. emith_or_r_imm_c(DCOND_EQ, srr, T);
  713. EMITH_SJMP_END(DCOND_NE);
  714. }
  715. // arguments must be ready
  716. // reg cache must be clean before call
  717. static int emit_memhandler_read_(int size, int ram_check)
  718. {
  719. int arg0, arg1;
  720. host_arg2reg(arg0, 0);
  721. rcache_clean();
  722. // must writeback cycles for poll detection stuff
  723. // FIXME: rm
  724. if (reg_map_g2h[SHR_SR] != -1)
  725. emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
  726. arg1 = rcache_get_tmp_arg(1);
  727. emith_move_r_r(arg1, CONTEXT_REG);
  728. #ifndef PDB_NET
  729. if (ram_check && Pico.rom == (void *)0x02000000 && Pico32xMem->sdram == (void *)0x06000000) {
  730. int tmp = rcache_get_tmp();
  731. emith_and_r_r_imm(tmp, arg0, 0xfb000000);
  732. emith_cmp_r_imm(tmp, 0x02000000);
  733. switch (size) {
  734. case 0: // 8
  735. EMITH_SJMP3_START(DCOND_NE);
  736. emith_eor_r_imm_c(DCOND_EQ, arg0, 1);
  737. emith_read8_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
  738. EMITH_SJMP3_MID(DCOND_NE);
  739. emith_call_cond(DCOND_NE, sh2_drc_read8);
  740. EMITH_SJMP3_END();
  741. break;
  742. case 1: // 16
  743. EMITH_SJMP3_START(DCOND_NE);
  744. emith_read16_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
  745. EMITH_SJMP3_MID(DCOND_NE);
  746. emith_call_cond(DCOND_NE, sh2_drc_read16);
  747. EMITH_SJMP3_END();
  748. break;
  749. case 2: // 32
  750. EMITH_SJMP3_START(DCOND_NE);
  751. emith_read_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
  752. emith_ror_c(DCOND_EQ, arg0, arg0, 16);
  753. EMITH_SJMP3_MID(DCOND_NE);
  754. emith_call_cond(DCOND_NE, sh2_drc_read32);
  755. EMITH_SJMP3_END();
  756. break;
  757. }
  758. }
  759. else
  760. #endif
  761. {
  762. switch (size) {
  763. case 0: // 8
  764. emith_call(sh2_drc_read8);
  765. break;
  766. case 1: // 16
  767. emith_call(sh2_drc_read16);
  768. break;
  769. case 2: // 32
  770. emith_call(sh2_drc_read32);
  771. break;
  772. }
  773. }
  774. rcache_invalidate();
  775. // assuming arg0 and retval reg matches
  776. return rcache_get_tmp_arg(0);
  777. }
  778. static int emit_memhandler_read(int size)
  779. {
  780. return emit_memhandler_read_(size, 1);
  781. }
  782. static int emit_memhandler_read_rr(sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  783. {
  784. int hr, hr2, ram_check = 1;
  785. u32 val, offs2;
  786. if (gconst_get(rs, &val)) {
  787. hr = emit_get_rbase_and_offs(val + offs, &offs2);
  788. if (hr != -1) {
  789. hr2 = rcache_get_reg(rd, RC_GR_WRITE);
  790. switch (size) {
  791. case 0: // 8
  792. emith_read8_r_r_offs(hr2, hr, offs2 ^ 1);
  793. emith_sext(hr2, hr2, 8);
  794. break;
  795. case 1: // 16
  796. emith_read16_r_r_offs(hr2, hr, offs2);
  797. emith_sext(hr2, hr2, 16);
  798. break;
  799. case 2: // 32
  800. emith_read_r_r_offs(hr2, hr, offs2);
  801. emith_ror(hr2, hr2, 16);
  802. break;
  803. }
  804. rcache_free_tmp(hr);
  805. return hr2;
  806. }
  807. ram_check = 0;
  808. }
  809. hr = rcache_get_reg_arg(0, rs);
  810. if (offs != 0)
  811. emith_add_r_imm(hr, offs);
  812. hr = emit_memhandler_read_(size, ram_check);
  813. hr2 = rcache_get_reg(rd, RC_GR_WRITE);
  814. if (size != 2) {
  815. emith_sext(hr2, hr, (size == 1) ? 16 : 8);
  816. } else
  817. emith_move_r_r(hr2, hr);
  818. rcache_free_tmp(hr);
  819. return hr2;
  820. }
  821. static void emit_memhandler_write(int size, u32 pc, int delay)
  822. {
  823. int ctxr;
  824. host_arg2reg(ctxr, 2);
  825. switch (size) {
  826. case 0: // 8
  827. // XXX: consider inlining sh2_drc_write8
  828. if (delay) {
  829. emith_call(sh2_drc_write8_slot);
  830. } else {
  831. emit_move_r_imm32(SHR_PC, pc);
  832. rcache_clean();
  833. emith_call(sh2_drc_write8);
  834. }
  835. break;
  836. case 1: // 16
  837. if (delay) {
  838. emith_call(sh2_drc_write16_slot);
  839. } else {
  840. emit_move_r_imm32(SHR_PC, pc);
  841. rcache_clean();
  842. emith_call(sh2_drc_write16);
  843. }
  844. break;
  845. case 2: // 32
  846. emith_move_r_r(ctxr, CONTEXT_REG);
  847. emith_call(sh2_drc_write32);
  848. break;
  849. }
  850. rcache_invalidate();
  851. }
  852. // @(Rx,Ry)
  853. static int emit_indirect_indexed_read(int rx, int ry, int size)
  854. {
  855. int a0, t;
  856. a0 = rcache_get_reg_arg(0, rx);
  857. t = rcache_get_reg(ry, RC_GR_READ);
  858. emith_add_r_r(a0, t);
  859. return emit_memhandler_read(size);
  860. }
  861. // read @Rn, @rm
  862. static void emit_indirect_read_double(u32 *rnr, u32 *rmr, int rn, int rm, int size)
  863. {
  864. int tmp;
  865. rcache_get_reg_arg(0, rn);
  866. tmp = emit_memhandler_read(size);
  867. emith_ctx_write(tmp, offsetof(SH2, drc_tmp));
  868. rcache_free_tmp(tmp);
  869. tmp = rcache_get_reg(rn, RC_GR_RMW);
  870. emith_add_r_imm(tmp, 1 << size);
  871. rcache_unlock(tmp);
  872. rcache_get_reg_arg(0, rm);
  873. *rmr = emit_memhandler_read(size);
  874. *rnr = rcache_get_tmp();
  875. emith_ctx_read(*rnr, offsetof(SH2, drc_tmp));
  876. tmp = rcache_get_reg(rm, RC_GR_RMW);
  877. emith_add_r_imm(tmp, 1 << size);
  878. rcache_unlock(tmp);
  879. }
  880. static void emit_do_static_regs(int is_write, int tmpr)
  881. {
  882. int i, r, count;
  883. for (i = 0; i < ARRAY_SIZE(reg_map_g2h); i++) {
  884. r = reg_map_g2h[i];
  885. if (r == -1)
  886. continue;
  887. for (count = 1; i < ARRAY_SIZE(reg_map_g2h) - 1; i++, r++) {
  888. if (reg_map_g2h[i + 1] != r + 1)
  889. break;
  890. count++;
  891. }
  892. if (count > 1) {
  893. // i, r point to last item
  894. if (is_write)
  895. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  896. else
  897. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  898. } else {
  899. if (is_write)
  900. emith_ctx_write(r, i * 4);
  901. else
  902. emith_ctx_read(r, i * 4);
  903. }
  904. }
  905. }
  906. static void emit_block_entry(void)
  907. {
  908. int arg0, arg1, arg2;
  909. host_arg2reg(arg0, 0);
  910. host_arg2reg(arg1, 1);
  911. host_arg2reg(arg2, 2);
  912. #if (DRC_DEBUG & 4) || defined(PDB)
  913. emit_do_static_regs(1, arg2);
  914. emith_move_r_r(arg1, CONTEXT_REG);
  915. emith_move_r_r(arg2, rcache_get_reg(SHR_SR, RC_GR_READ));
  916. emith_call(sh2_drc_log_entry);
  917. rcache_invalidate();
  918. #endif
  919. emith_tst_r_r(arg0, arg0);
  920. EMITH_SJMP_START(DCOND_EQ);
  921. emith_jump_reg_c(DCOND_NE, arg0);
  922. EMITH_SJMP_END(DCOND_EQ);
  923. }
  924. void dr_link_blocks(void *target, u32 pc, int tcache_id)
  925. {
  926. #if LINK_BRANCHES
  927. block_link *bl = block_links[tcache_id];
  928. int cnt = block_link_counts[tcache_id];
  929. int i;
  930. for (i = 0; i < cnt; i++) {
  931. if (bl[i].target_pc == pc) {
  932. dbg(1, "- link from %p", bl[i].jump);
  933. emith_jump_patch(bl[i].jump, target);
  934. // XXX: sync ARM caches (old jump should be fine)?
  935. }
  936. }
  937. #endif
  938. }
  939. void *dr_prepare_ext_branch(u32 pc, SH2 *sh2, int tcache_id)
  940. {
  941. #if LINK_BRANCHES
  942. int target_tcache_id;
  943. void *target;
  944. int ret;
  945. target = lookup_block(pc, sh2->is_slave, &target_tcache_id);
  946. if (target_tcache_id == tcache_id) {
  947. // allow linking blocks only from local cache
  948. ret = dr_add_block_link(pc, tcache_ptr, tcache_id);
  949. if (ret < 0)
  950. return NULL;
  951. }
  952. if (target == NULL || target_tcache_id != tcache_id)
  953. target = sh2_drc_dispatcher;
  954. return target;
  955. #else
  956. return sh2_drc_dispatcher;
  957. #endif
  958. }
  959. #define DELAYED_OP \
  960. drcf.delayed_op = 2
  961. #define DELAY_SAVE_T(sr) { \
  962. emith_bic_r_imm(sr, T_save); \
  963. emith_tst_r_imm(sr, T); \
  964. EMITH_SJMP_START(DCOND_EQ); \
  965. emith_or_r_imm_c(DCOND_NE, sr, T_save); \
  966. EMITH_SJMP_END(DCOND_EQ); \
  967. drcf.use_saved_t = 1; \
  968. }
  969. #define FLUSH_CYCLES(sr) \
  970. if (cycles > 0) { \
  971. emith_sub_r_imm(sr, cycles << 12); \
  972. cycles = 0; \
  973. }
  974. #define CHECK_UNHANDLED_BITS(mask) { \
  975. if ((op & (mask)) != 0) \
  976. goto default_; \
  977. }
  978. #define FETCH_OP(pc) \
  979. dr_pc_base[(pc) / 2]
  980. #define FETCH32(a) \
  981. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  982. #define GET_Fx() \
  983. ((op >> 4) & 0x0f)
  984. #define GET_Rm GET_Fx
  985. #define GET_Rn() \
  986. ((op >> 8) & 0x0f)
  987. #define CHECK_FX_LT(n) \
  988. if (GET_Fx() >= n) \
  989. goto default_
  990. #define MAX_LOCAL_BRANCHES 32
  991. // op_flags: data from 1st pass
  992. #define OP_FLAGS(pc) op_flags[((pc) - base_pc) / 2]
  993. #define OF_DELAY_OP (1 << 0)
  994. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  995. {
  996. // XXX: maybe use structs instead?
  997. void *branch_target_ptr[MAX_LOCAL_BRANCHES];
  998. u32 branch_target_pc[MAX_LOCAL_BRANCHES];
  999. int branch_target_count = 0;
  1000. void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
  1001. u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
  1002. int branch_patch_count = 0;
  1003. int pending_branch_cond = -1;
  1004. int pending_branch_pc = 0;
  1005. u8 op_flags[BLOCK_CYCLE_LIMIT + 1];
  1006. struct {
  1007. u32 delayed_op:2;
  1008. u32 test_irq:1;
  1009. u32 use_saved_t:1; // delayed op modifies T
  1010. } drcf = { 0, };
  1011. // PC of current, first, last, last_target_blk SH2 insn
  1012. u32 pc, base_pc, end_pc, out_pc;
  1013. u32 last_inlined_literal = 0;
  1014. void *block_entry;
  1015. block_desc *this_block;
  1016. u16 *dr_pc_base;
  1017. int blkid_main = 0;
  1018. int skip_op = 0;
  1019. u32 tmp, tmp2;
  1020. int cycles;
  1021. int op;
  1022. int i;
  1023. base_pc = sh2->pc;
  1024. // get base/validate PC
  1025. dr_pc_base = dr_get_pc_base(base_pc, sh2->is_slave);
  1026. if (dr_pc_base == (void *)-1) {
  1027. printf("invalid PC, aborting: %08x\n", base_pc);
  1028. // FIXME: be less destructive
  1029. exit(1);
  1030. }
  1031. tcache_ptr = tcache_ptrs[tcache_id];
  1032. this_block = dr_add_block(base_pc, tcache_id, &blkid_main);
  1033. if (this_block == NULL)
  1034. return NULL;
  1035. // predict tcache overflow
  1036. tmp = tcache_ptr - tcache_bases[tcache_id];
  1037. if (tmp > tcache_sizes[tcache_id] - MAX_BLOCK_SIZE) {
  1038. printf("tcache %d overflow\n", tcache_id);
  1039. return NULL;
  1040. }
  1041. block_entry = tcache_ptr;
  1042. dbg(1, "== %csh2 block #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
  1043. tcache_id, blkid_main, base_pc, block_entry);
  1044. dr_link_blocks(tcache_ptr, base_pc, tcache_id);
  1045. // 1st pass: scan forward for local branches
  1046. memset(op_flags, 0, sizeof(op_flags));
  1047. for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT; cycles++, pc += 2) {
  1048. op = FETCH_OP(pc);
  1049. if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR
  1050. signed int offs = ((signed int)(op << 20) >> 19);
  1051. pc += 2;
  1052. OP_FLAGS(pc) |= OF_DELAY_OP;
  1053. ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc + offs + 2,);
  1054. break;
  1055. }
  1056. if ((op & 0xf000) == 0) {
  1057. op &= 0xff;
  1058. if (op == 0x1b) // SLEEP
  1059. break;
  1060. if (op == 0x23 || op == 0x03 || op == 0x0b || op == 0x2b) { // BRAF, BSRF, RTS, RTE
  1061. pc += 2;
  1062. OP_FLAGS(pc) |= OF_DELAY_OP;
  1063. break;
  1064. }
  1065. continue;
  1066. }
  1067. if ((op & 0xf0df) == 0x400b) { // JMP, JSR
  1068. pc += 2;
  1069. OP_FLAGS(pc) |= OF_DELAY_OP;
  1070. break;
  1071. }
  1072. if ((op & 0xf900) == 0x8900) { // BT(S), BF(S)
  1073. signed int offs = ((signed int)(op << 24) >> 23);
  1074. if (op & 0x0400)
  1075. OP_FLAGS(pc + 2) |= OF_DELAY_OP;
  1076. ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc + offs + 4, break);
  1077. }
  1078. if ((op & 0xff00) == 0xc300) // TRAPA
  1079. break;
  1080. }
  1081. end_pc = pc;
  1082. // clean branch_targets that are not really local,
  1083. // and that land on delay slots
  1084. for (i = 0, tmp = 0; i < branch_target_count; i++) {
  1085. pc = branch_target_pc[i];
  1086. if (base_pc <= pc && pc <= end_pc && !(OP_FLAGS(pc) & OF_DELAY_OP))
  1087. branch_target_pc[tmp++] = branch_target_pc[i];
  1088. }
  1089. branch_target_count = tmp;
  1090. memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
  1091. // -------------------------------------------------
  1092. // 2nd pass: actual compilation
  1093. out_pc = 0;
  1094. pc = base_pc;
  1095. for (cycles = 0; pc <= end_pc || drcf.delayed_op; )
  1096. {
  1097. u32 tmp3, tmp4, sr;
  1098. if (drcf.delayed_op > 0)
  1099. drcf.delayed_op--;
  1100. op = FETCH_OP(pc);
  1101. i = find_in_array(branch_target_pc, branch_target_count, pc);
  1102. if (i >= 0)
  1103. {
  1104. if (pc != sh2->pc)
  1105. {
  1106. /* make "subblock" - just a mid-block entry */
  1107. block_desc *subblock;
  1108. u16 *drcblk;
  1109. int blkid;
  1110. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1111. FLUSH_CYCLES(sr);
  1112. // decide if to flush rcache
  1113. if ((op & 0xf0ff) == 0x4010 && FETCH_OP(pc + 2) == 0x8bfd) // DT; BF #-2
  1114. rcache_clean();
  1115. else
  1116. rcache_flush();
  1117. do_host_disasm(tcache_id);
  1118. subblock = dr_add_block(pc, tcache_id, &blkid);
  1119. if (subblock == NULL)
  1120. return NULL;
  1121. subblock->end_addr = pc;
  1122. if (tcache_id != 0) { // data array, BIOS
  1123. drcblk = Pico32xMem->drcblk_da[sh2->is_slave];
  1124. drcblk += (pc & 0x00fff) >> SH2_DRCBLK_DA_SHIFT;
  1125. *drcblk = (blkid << 1) | 1;
  1126. } else if ((this_block->addr & 0xc7fc0000) == 0x06000000) { // DRAM
  1127. drcblk = Pico32xMem->drcblk_ram;
  1128. drcblk += (pc & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT;
  1129. *drcblk = (blkid << 1) | 1;
  1130. }
  1131. dbg(1, "-- %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
  1132. tcache_id, blkid, pc, tcache_ptr);
  1133. // since we made a block entry, link any other blocks that jump to current pc
  1134. dr_link_blocks(tcache_ptr, pc, tcache_id);
  1135. }
  1136. branch_target_ptr[i] = tcache_ptr;
  1137. // must update PC
  1138. emit_move_r_imm32(SHR_PC, pc);
  1139. rcache_clean();
  1140. // check cycles
  1141. sr = rcache_get_reg(SHR_SR, RC_GR_READ);
  1142. emith_cmp_r_imm(sr, 0);
  1143. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  1144. do_host_disasm(tcache_id);
  1145. }
  1146. #if (DRC_DEBUG & 3)
  1147. insns_compiled++;
  1148. #if (DRC_DEBUG & 2)
  1149. DasmSH2(sh2dasm_buff, pc, op);
  1150. printf("%08x %04x %s\n", pc, op, sh2dasm_buff);
  1151. #endif
  1152. #endif
  1153. pc += 2;
  1154. cycles++;
  1155. if (skip_op > 0) {
  1156. skip_op--;
  1157. continue;
  1158. }
  1159. switch ((op >> 12) & 0x0f)
  1160. {
  1161. /////////////////////////////////////////////
  1162. case 0x00:
  1163. switch (op & 0x0f)
  1164. {
  1165. case 0x02:
  1166. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
  1167. switch (GET_Fx())
  1168. {
  1169. case 0: // STC SR,Rn 0000nnnn00000010
  1170. tmp2 = SHR_SR;
  1171. break;
  1172. case 1: // STC GBR,Rn 0000nnnn00010010
  1173. tmp2 = SHR_GBR;
  1174. break;
  1175. case 2: // STC VBR,Rn 0000nnnn00100010
  1176. tmp2 = SHR_VBR;
  1177. break;
  1178. default:
  1179. goto default_;
  1180. }
  1181. tmp3 = rcache_get_reg(tmp2, RC_GR_READ);
  1182. emith_move_r_r(tmp, tmp3);
  1183. if (tmp2 == SHR_SR)
  1184. emith_clear_msb(tmp, tmp, 22); // reserved bits defined by ISA as 0
  1185. goto end_op;
  1186. case 0x03:
  1187. CHECK_UNHANDLED_BITS(0xd0);
  1188. // BRAF Rm 0000mmmm00100011
  1189. // BSRF Rm 0000mmmm00000011
  1190. DELAYED_OP;
  1191. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE);
  1192. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1193. emith_move_r_r(tmp, tmp2);
  1194. if (op & 0x20)
  1195. emith_add_r_imm(tmp, pc + 2);
  1196. else { // BSRF
  1197. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE);
  1198. emith_move_r_imm(tmp3, pc + 2);
  1199. emith_add_r_r(tmp, tmp3);
  1200. }
  1201. out_pc = (u32)-1;
  1202. cycles++;
  1203. goto end_op;
  1204. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  1205. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  1206. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  1207. rcache_clean();
  1208. tmp = rcache_get_reg_arg(1, GET_Rm());
  1209. tmp2 = rcache_get_reg_arg(0, SHR_R0);
  1210. tmp3 = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1211. emith_add_r_r(tmp2, tmp3);
  1212. emit_memhandler_write(op & 3, pc, drcf.delayed_op);
  1213. goto end_op;
  1214. case 0x07:
  1215. // MUL.L Rm,Rn 0000nnnnmmmm0111
  1216. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1217. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1218. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
  1219. emith_mul(tmp3, tmp2, tmp);
  1220. cycles++;
  1221. goto end_op;
  1222. case 0x08:
  1223. CHECK_UNHANDLED_BITS(0xf00);
  1224. switch (GET_Fx())
  1225. {
  1226. case 0: // CLRT 0000000000001000
  1227. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1228. if (drcf.delayed_op)
  1229. DELAY_SAVE_T(sr);
  1230. emith_bic_r_imm(sr, T);
  1231. break;
  1232. case 1: // SETT 0000000000011000
  1233. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1234. if (drcf.delayed_op)
  1235. DELAY_SAVE_T(sr);
  1236. emith_or_r_imm(sr, T);
  1237. break;
  1238. case 2: // CLRMAC 0000000000101000
  1239. emit_move_r_imm32(SHR_MACL, 0);
  1240. emit_move_r_imm32(SHR_MACH, 0);
  1241. break;
  1242. default:
  1243. goto default_;
  1244. }
  1245. goto end_op;
  1246. case 0x09:
  1247. switch (GET_Fx())
  1248. {
  1249. case 0: // NOP 0000000000001001
  1250. CHECK_UNHANDLED_BITS(0xf00);
  1251. break;
  1252. case 1: // DIV0U 0000000000011001
  1253. CHECK_UNHANDLED_BITS(0xf00);
  1254. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1255. if (drcf.delayed_op)
  1256. DELAY_SAVE_T(sr);
  1257. emith_bic_r_imm(sr, M|Q|T);
  1258. break;
  1259. case 2: // MOVT Rn 0000nnnn00101001
  1260. sr = rcache_get_reg(SHR_SR, RC_GR_READ);
  1261. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
  1262. emith_clear_msb(tmp2, sr, 31);
  1263. break;
  1264. default:
  1265. goto default_;
  1266. }
  1267. goto end_op;
  1268. case 0x0a:
  1269. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
  1270. switch (GET_Fx())
  1271. {
  1272. case 0: // STS MACH,Rn 0000nnnn00001010
  1273. tmp2 = SHR_MACH;
  1274. break;
  1275. case 1: // STS MACL,Rn 0000nnnn00011010
  1276. tmp2 = SHR_MACL;
  1277. break;
  1278. case 2: // STS PR,Rn 0000nnnn00101010
  1279. tmp2 = SHR_PR;
  1280. break;
  1281. default:
  1282. goto default_;
  1283. }
  1284. tmp2 = rcache_get_reg(tmp2, RC_GR_READ);
  1285. emith_move_r_r(tmp, tmp2);
  1286. goto end_op;
  1287. case 0x0b:
  1288. CHECK_UNHANDLED_BITS(0xf00);
  1289. switch (GET_Fx())
  1290. {
  1291. case 0: // RTS 0000000000001011
  1292. DELAYED_OP;
  1293. emit_move_r_r(SHR_PC, SHR_PR);
  1294. out_pc = (u32)-1;
  1295. cycles++;
  1296. break;
  1297. case 1: // SLEEP 0000000000011011
  1298. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1299. emith_clear_msb(tmp, tmp, 20); // clear cycles
  1300. out_pc = out_pc - 2;
  1301. cycles = 1;
  1302. goto end_op;
  1303. case 2: // RTE 0000000000101011
  1304. DELAYED_OP;
  1305. // pop PC
  1306. emit_memhandler_read_rr(SHR_PC, SHR_SP, 0, 2);
  1307. // pop SR
  1308. tmp = rcache_get_reg_arg(0, SHR_SP);
  1309. emith_add_r_imm(tmp, 4);
  1310. tmp = emit_memhandler_read(2);
  1311. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1312. emith_write_sr(sr, tmp);
  1313. rcache_free_tmp(tmp);
  1314. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
  1315. emith_add_r_imm(tmp, 4*2);
  1316. drcf.test_irq = 1;
  1317. out_pc = (u32)-1;
  1318. cycles += 3;
  1319. break;
  1320. default:
  1321. goto default_;
  1322. }
  1323. goto end_op;
  1324. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  1325. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  1326. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  1327. tmp = emit_indirect_indexed_read(SHR_R0, GET_Rm(), op & 3);
  1328. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
  1329. if ((op & 3) != 2) {
  1330. emith_sext(tmp2, tmp, (op & 1) ? 16 : 8);
  1331. } else
  1332. emith_move_r_r(tmp2, tmp);
  1333. rcache_free_tmp(tmp);
  1334. goto end_op;
  1335. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  1336. emit_indirect_read_double(&tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  1337. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW);
  1338. /* MS 16 MAC bits unused if saturated */
  1339. sr = rcache_get_reg(SHR_SR, RC_GR_READ);
  1340. emith_tst_r_imm(sr, S);
  1341. EMITH_SJMP_START(DCOND_EQ);
  1342. emith_clear_msb_c(DCOND_NE, tmp4, tmp4, 16);
  1343. EMITH_SJMP_END(DCOND_EQ);
  1344. rcache_unlock(sr);
  1345. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW); // might evict SR
  1346. emith_mula_s64(tmp3, tmp4, tmp, tmp2);
  1347. rcache_free_tmp(tmp2);
  1348. sr = rcache_get_reg(SHR_SR, RC_GR_READ); // reget just in case
  1349. emith_tst_r_imm(sr, S);
  1350. EMITH_JMP_START(DCOND_EQ);
  1351. emith_asr(tmp, tmp4, 15);
  1352. emith_cmp_r_imm(tmp, -1); // negative overflow (0x80000000..0xffff7fff)
  1353. EMITH_SJMP_START(DCOND_GE);
  1354. emith_move_r_imm_c(DCOND_LT, tmp4, 0x8000);
  1355. emith_move_r_imm_c(DCOND_LT, tmp3, 0x0000);
  1356. EMITH_SJMP_END(DCOND_GE);
  1357. emith_cmp_r_imm(tmp, 0); // positive overflow (0x00008000..0x7fffffff)
  1358. EMITH_SJMP_START(DCOND_LE);
  1359. emith_move_r_imm_c(DCOND_GT, tmp4, 0x00007fff);
  1360. emith_move_r_imm_c(DCOND_GT, tmp3, 0xffffffff);
  1361. EMITH_SJMP_END(DCOND_LE);
  1362. EMITH_JMP_END(DCOND_EQ);
  1363. rcache_free_tmp(tmp);
  1364. cycles += 3;
  1365. goto end_op;
  1366. }
  1367. goto default_;
  1368. /////////////////////////////////////////////
  1369. case 0x01:
  1370. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  1371. rcache_clean();
  1372. tmp = rcache_get_reg_arg(0, GET_Rn());
  1373. tmp2 = rcache_get_reg_arg(1, GET_Rm());
  1374. if (op & 0x0f)
  1375. emith_add_r_imm(tmp, (op & 0x0f) * 4);
  1376. emit_memhandler_write(2, pc, drcf.delayed_op);
  1377. goto end_op;
  1378. case 0x02:
  1379. switch (op & 0x0f)
  1380. {
  1381. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  1382. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  1383. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  1384. rcache_clean();
  1385. rcache_get_reg_arg(0, GET_Rn());
  1386. rcache_get_reg_arg(1, GET_Rm());
  1387. emit_memhandler_write(op & 3, pc, drcf.delayed_op);
  1388. goto end_op;
  1389. case 0x04: // MOV.B Rm,@–Rn 0010nnnnmmmm0100
  1390. case 0x05: // MOV.W Rm,@–Rn 0010nnnnmmmm0101
  1391. case 0x06: // MOV.L Rm,@–Rn 0010nnnnmmmm0110
  1392. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1393. emith_sub_r_imm(tmp, (1 << (op & 3)));
  1394. rcache_clean();
  1395. rcache_get_reg_arg(0, GET_Rn());
  1396. rcache_get_reg_arg(1, GET_Rm());
  1397. emit_memhandler_write(op & 3, pc, drcf.delayed_op);
  1398. goto end_op;
  1399. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  1400. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1401. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1402. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1403. if (drcf.delayed_op)
  1404. DELAY_SAVE_T(sr);
  1405. emith_bic_r_imm(sr, M|Q|T);
  1406. emith_tst_r_imm(tmp2, (1<<31));
  1407. EMITH_SJMP_START(DCOND_EQ);
  1408. emith_or_r_imm_c(DCOND_NE, sr, Q);
  1409. EMITH_SJMP_END(DCOND_EQ);
  1410. emith_tst_r_imm(tmp3, (1<<31));
  1411. EMITH_SJMP_START(DCOND_EQ);
  1412. emith_or_r_imm_c(DCOND_NE, sr, M);
  1413. EMITH_SJMP_END(DCOND_EQ);
  1414. emith_teq_r_r(tmp2, tmp3);
  1415. EMITH_SJMP_START(DCOND_PL);
  1416. emith_or_r_imm_c(DCOND_MI, sr, T);
  1417. EMITH_SJMP_END(DCOND_PL);
  1418. goto end_op;
  1419. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  1420. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1421. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1422. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1423. if (drcf.delayed_op)
  1424. DELAY_SAVE_T(sr);
  1425. emith_bic_r_imm(sr, T);
  1426. emith_tst_r_r(tmp2, tmp3);
  1427. emit_or_t_if_eq(sr);
  1428. goto end_op;
  1429. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  1430. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1431. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1432. emith_and_r_r(tmp, tmp2);
  1433. goto end_op;
  1434. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  1435. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1436. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1437. emith_eor_r_r(tmp, tmp2);
  1438. goto end_op;
  1439. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  1440. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1441. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1442. emith_or_r_r(tmp, tmp2);
  1443. goto end_op;
  1444. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  1445. tmp = rcache_get_tmp();
  1446. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1447. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1448. emith_eor_r_r_r(tmp, tmp2, tmp3);
  1449. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1450. if (drcf.delayed_op)
  1451. DELAY_SAVE_T(sr);
  1452. emith_bic_r_imm(sr, T);
  1453. emith_tst_r_imm(tmp, 0x000000ff);
  1454. emit_or_t_if_eq(tmp);
  1455. emith_tst_r_imm(tmp, 0x0000ff00);
  1456. emit_or_t_if_eq(tmp);
  1457. emith_tst_r_imm(tmp, 0x00ff0000);
  1458. emit_or_t_if_eq(tmp);
  1459. emith_tst_r_imm(tmp, 0xff000000);
  1460. emit_or_t_if_eq(tmp);
  1461. rcache_free_tmp(tmp);
  1462. goto end_op;
  1463. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  1464. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1465. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1466. emith_lsr(tmp, tmp, 16);
  1467. emith_or_r_r_lsl(tmp, tmp2, 16);
  1468. goto end_op;
  1469. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  1470. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  1471. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1472. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
  1473. if (op & 1) {
  1474. emith_sext(tmp, tmp2, 16);
  1475. } else
  1476. emith_clear_msb(tmp, tmp2, 16);
  1477. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1478. tmp2 = rcache_get_tmp();
  1479. if (op & 1) {
  1480. emith_sext(tmp2, tmp3, 16);
  1481. } else
  1482. emith_clear_msb(tmp2, tmp3, 16);
  1483. emith_mul(tmp, tmp, tmp2);
  1484. rcache_free_tmp(tmp2);
  1485. // FIXME: causes timing issues in Doom?
  1486. // cycles++;
  1487. goto end_op;
  1488. }
  1489. goto default_;
  1490. /////////////////////////////////////////////
  1491. case 0x03:
  1492. switch (op & 0x0f)
  1493. {
  1494. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  1495. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  1496. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  1497. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  1498. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  1499. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1500. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1501. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1502. if (drcf.delayed_op)
  1503. DELAY_SAVE_T(sr);
  1504. emith_bic_r_imm(sr, T);
  1505. emith_cmp_r_r(tmp2, tmp3);
  1506. switch (op & 0x07)
  1507. {
  1508. case 0x00: // CMP/EQ
  1509. emit_or_t_if_eq(sr);
  1510. break;
  1511. case 0x02: // CMP/HS
  1512. EMITH_SJMP_START(DCOND_LO);
  1513. emith_or_r_imm_c(DCOND_HS, sr, T);
  1514. EMITH_SJMP_END(DCOND_LO);
  1515. break;
  1516. case 0x03: // CMP/GE
  1517. EMITH_SJMP_START(DCOND_LT);
  1518. emith_or_r_imm_c(DCOND_GE, sr, T);
  1519. EMITH_SJMP_END(DCOND_LT);
  1520. break;
  1521. case 0x06: // CMP/HI
  1522. EMITH_SJMP_START(DCOND_LS);
  1523. emith_or_r_imm_c(DCOND_HI, sr, T);
  1524. EMITH_SJMP_END(DCOND_LS);
  1525. break;
  1526. case 0x07: // CMP/GT
  1527. EMITH_SJMP_START(DCOND_LE);
  1528. emith_or_r_imm_c(DCOND_GT, sr, T);
  1529. EMITH_SJMP_END(DCOND_LE);
  1530. break;
  1531. }
  1532. goto end_op;
  1533. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  1534. // Q1 = carry(Rn = (Rn << 1) | T)
  1535. // if Q ^ M
  1536. // Q2 = carry(Rn += Rm)
  1537. // else
  1538. // Q2 = carry(Rn -= Rm)
  1539. // Q = M ^ Q1 ^ Q2
  1540. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  1541. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1542. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1543. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1544. if (drcf.delayed_op)
  1545. DELAY_SAVE_T(sr);
  1546. emith_tpop_carry(sr, 0);
  1547. emith_adcf_r_r(tmp2, tmp2);
  1548. emith_tpush_carry(sr, 0); // keep Q1 in T for now
  1549. tmp4 = rcache_get_tmp();
  1550. emith_and_r_r_imm(tmp4, sr, M);
  1551. emith_eor_r_r_lsr(sr, tmp4, M_SHIFT - Q_SHIFT); // Q ^= M
  1552. rcache_free_tmp(tmp4);
  1553. // add or sub, invert T if carry to get Q1 ^ Q2
  1554. // in: (Q ^ M) passed in Q, Q1 in T
  1555. emith_sh2_div1_step(tmp2, tmp3, sr);
  1556. emith_bic_r_imm(sr, Q);
  1557. emith_tst_r_imm(sr, M);
  1558. EMITH_SJMP_START(DCOND_EQ);
  1559. emith_or_r_imm_c(DCOND_NE, sr, Q); // Q = M
  1560. EMITH_SJMP_END(DCOND_EQ);
  1561. emith_tst_r_imm(sr, T);
  1562. EMITH_SJMP_START(DCOND_EQ);
  1563. emith_eor_r_imm_c(DCOND_NE, sr, Q); // Q = M ^ Q1 ^ Q2
  1564. EMITH_SJMP_END(DCOND_EQ);
  1565. emith_eor_r_imm(sr, T); // T = !(Q1 ^ Q2)
  1566. goto end_op;
  1567. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  1568. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1569. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1570. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
  1571. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
  1572. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  1573. goto end_op;
  1574. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  1575. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  1576. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1577. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1578. if (op & 4) {
  1579. emith_add_r_r(tmp, tmp2);
  1580. } else
  1581. emith_sub_r_r(tmp, tmp2);
  1582. goto end_op;
  1583. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  1584. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  1585. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1586. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1587. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1588. if (drcf.delayed_op)
  1589. DELAY_SAVE_T(sr);
  1590. if (op & 4) { // adc
  1591. emith_tpop_carry(sr, 0);
  1592. emith_adcf_r_r(tmp, tmp2);
  1593. emith_tpush_carry(sr, 0);
  1594. } else {
  1595. emith_tpop_carry(sr, 1);
  1596. emith_sbcf_r_r(tmp, tmp2);
  1597. emith_tpush_carry(sr, 1);
  1598. }
  1599. goto end_op;
  1600. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  1601. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  1602. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1603. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1604. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1605. if (drcf.delayed_op)
  1606. DELAY_SAVE_T(sr);
  1607. emith_bic_r_imm(sr, T);
  1608. if (op & 4) {
  1609. emith_addf_r_r(tmp, tmp2);
  1610. } else
  1611. emith_subf_r_r(tmp, tmp2);
  1612. EMITH_SJMP_START(DCOND_VC);
  1613. emith_or_r_imm_c(DCOND_VS, sr, T);
  1614. EMITH_SJMP_END(DCOND_VC);
  1615. goto end_op;
  1616. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  1617. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1618. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1619. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
  1620. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
  1621. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  1622. goto end_op;
  1623. }
  1624. goto default_;
  1625. /////////////////////////////////////////////
  1626. case 0x04:
  1627. switch (op & 0x0f)
  1628. {
  1629. case 0x00:
  1630. switch (GET_Fx())
  1631. {
  1632. case 0: // SHLL Rn 0100nnnn00000000
  1633. case 2: // SHAL Rn 0100nnnn00100000
  1634. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1635. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1636. if (drcf.delayed_op)
  1637. DELAY_SAVE_T(sr);
  1638. emith_tpop_carry(sr, 0); // dummy
  1639. emith_lslf(tmp, tmp, 1);
  1640. emith_tpush_carry(sr, 0);
  1641. goto end_op;
  1642. case 1: // DT Rn 0100nnnn00010000
  1643. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1644. if (drcf.delayed_op)
  1645. DELAY_SAVE_T(sr);
  1646. if (FETCH_OP(pc) == 0x8bfd) { // BF #-2
  1647. if (gconst_get(GET_Rn(), &tmp)) {
  1648. // XXX: limit burned cycles
  1649. emit_move_r_imm32(GET_Rn(), 0);
  1650. emith_or_r_imm(sr, T);
  1651. cycles += tmp * 4;
  1652. skip_op = 1;
  1653. }
  1654. else
  1655. emith_sh2_dtbf_loop();
  1656. goto end_op;
  1657. }
  1658. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1659. emith_bic_r_imm(sr, T);
  1660. emith_subf_r_imm(tmp, 1);
  1661. emit_or_t_if_eq(sr);
  1662. goto end_op;
  1663. }
  1664. goto default_;
  1665. case 0x01:
  1666. switch (GET_Fx())
  1667. {
  1668. case 0: // SHLR Rn 0100nnnn00000001
  1669. case 2: // SHAR Rn 0100nnnn00100001
  1670. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1671. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1672. if (drcf.delayed_op)
  1673. DELAY_SAVE_T(sr);
  1674. emith_tpop_carry(sr, 0); // dummy
  1675. if (op & 0x20) {
  1676. emith_asrf(tmp, tmp, 1);
  1677. } else
  1678. emith_lsrf(tmp, tmp, 1);
  1679. emith_tpush_carry(sr, 0);
  1680. goto end_op;
  1681. case 1: // CMP/PZ Rn 0100nnnn00010001
  1682. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1683. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1684. if (drcf.delayed_op)
  1685. DELAY_SAVE_T(sr);
  1686. emith_bic_r_imm(sr, T);
  1687. emith_cmp_r_imm(tmp, 0);
  1688. EMITH_SJMP_START(DCOND_LT);
  1689. emith_or_r_imm_c(DCOND_GE, sr, T);
  1690. EMITH_SJMP_END(DCOND_LT);
  1691. goto end_op;
  1692. }
  1693. goto default_;
  1694. case 0x02:
  1695. case 0x03:
  1696. switch (op & 0x3f)
  1697. {
  1698. case 0x02: // STS.L MACH,@–Rn 0100nnnn00000010
  1699. tmp = SHR_MACH;
  1700. break;
  1701. case 0x12: // STS.L MACL,@–Rn 0100nnnn00010010
  1702. tmp = SHR_MACL;
  1703. break;
  1704. case 0x22: // STS.L PR,@–Rn 0100nnnn00100010
  1705. tmp = SHR_PR;
  1706. break;
  1707. case 0x03: // STC.L SR,@–Rn 0100nnnn00000011
  1708. tmp = SHR_SR;
  1709. break;
  1710. case 0x13: // STC.L GBR,@–Rn 0100nnnn00010011
  1711. tmp = SHR_GBR;
  1712. break;
  1713. case 0x23: // STC.L VBR,@–Rn 0100nnnn00100011
  1714. tmp = SHR_VBR;
  1715. break;
  1716. default:
  1717. goto default_;
  1718. }
  1719. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1720. emith_sub_r_imm(tmp2, 4);
  1721. rcache_clean();
  1722. rcache_get_reg_arg(0, GET_Rn());
  1723. tmp3 = rcache_get_reg_arg(1, tmp);
  1724. if (tmp == SHR_SR)
  1725. emith_clear_msb(tmp3, tmp3, 22); // reserved bits defined by ISA as 0
  1726. emit_memhandler_write(2, pc, drcf.delayed_op);
  1727. goto end_op;
  1728. case 0x04:
  1729. case 0x05:
  1730. switch (op & 0x3f)
  1731. {
  1732. case 0x04: // ROTL Rn 0100nnnn00000100
  1733. case 0x05: // ROTR Rn 0100nnnn00000101
  1734. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1735. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1736. if (drcf.delayed_op)
  1737. DELAY_SAVE_T(sr);
  1738. emith_tpop_carry(sr, 0); // dummy
  1739. if (op & 1) {
  1740. emith_rorf(tmp, tmp, 1);
  1741. } else
  1742. emith_rolf(tmp, tmp, 1);
  1743. emith_tpush_carry(sr, 0);
  1744. goto end_op;
  1745. case 0x24: // ROTCL Rn 0100nnnn00100100
  1746. case 0x25: // ROTCR Rn 0100nnnn00100101
  1747. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1748. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1749. if (drcf.delayed_op)
  1750. DELAY_SAVE_T(sr);
  1751. emith_tpop_carry(sr, 0);
  1752. if (op & 1) {
  1753. emith_rorcf(tmp);
  1754. } else
  1755. emith_rolcf(tmp);
  1756. emith_tpush_carry(sr, 0);
  1757. goto end_op;
  1758. case 0x15: // CMP/PL Rn 0100nnnn00010101
  1759. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1760. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1761. if (drcf.delayed_op)
  1762. DELAY_SAVE_T(sr);
  1763. emith_bic_r_imm(sr, T);
  1764. emith_cmp_r_imm(tmp, 0);
  1765. EMITH_SJMP_START(DCOND_LE);
  1766. emith_or_r_imm_c(DCOND_GT, sr, T);
  1767. EMITH_SJMP_END(DCOND_LE);
  1768. goto end_op;
  1769. }
  1770. goto default_;
  1771. case 0x06:
  1772. case 0x07:
  1773. switch (op & 0x3f)
  1774. {
  1775. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  1776. tmp = SHR_MACH;
  1777. break;
  1778. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  1779. tmp = SHR_MACL;
  1780. break;
  1781. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  1782. tmp = SHR_PR;
  1783. break;
  1784. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  1785. tmp = SHR_SR;
  1786. break;
  1787. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  1788. tmp = SHR_GBR;
  1789. break;
  1790. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  1791. tmp = SHR_VBR;
  1792. break;
  1793. default:
  1794. goto default_;
  1795. }
  1796. rcache_get_reg_arg(0, GET_Rn());
  1797. tmp2 = emit_memhandler_read(2);
  1798. if (tmp == SHR_SR) {
  1799. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1800. if (drcf.delayed_op)
  1801. DELAY_SAVE_T(sr);
  1802. emith_write_sr(sr, tmp2);
  1803. drcf.test_irq = 1;
  1804. } else {
  1805. tmp = rcache_get_reg(tmp, RC_GR_WRITE);
  1806. emith_move_r_r(tmp, tmp2);
  1807. }
  1808. rcache_free_tmp(tmp2);
  1809. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1810. emith_add_r_imm(tmp, 4);
  1811. goto end_op;
  1812. case 0x08:
  1813. case 0x09:
  1814. switch (GET_Fx())
  1815. {
  1816. case 0:
  1817. // SHLL2 Rn 0100nnnn00001000
  1818. // SHLR2 Rn 0100nnnn00001001
  1819. tmp = 2;
  1820. break;
  1821. case 1:
  1822. // SHLL8 Rn 0100nnnn00011000
  1823. // SHLR8 Rn 0100nnnn00011001
  1824. tmp = 8;
  1825. break;
  1826. case 2:
  1827. // SHLL16 Rn 0100nnnn00101000
  1828. // SHLR16 Rn 0100nnnn00101001
  1829. tmp = 16;
  1830. break;
  1831. default:
  1832. goto default_;
  1833. }
  1834. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  1835. if (op & 1) {
  1836. emith_lsr(tmp2, tmp2, tmp);
  1837. } else
  1838. emith_lsl(tmp2, tmp2, tmp);
  1839. goto end_op;
  1840. case 0x0a:
  1841. switch (GET_Fx())
  1842. {
  1843. case 0: // LDS Rm,MACH 0100mmmm00001010
  1844. tmp2 = SHR_MACH;
  1845. break;
  1846. case 1: // LDS Rm,MACL 0100mmmm00011010
  1847. tmp2 = SHR_MACL;
  1848. break;
  1849. case 2: // LDS Rm,PR 0100mmmm00101010
  1850. tmp2 = SHR_PR;
  1851. break;
  1852. default:
  1853. goto default_;
  1854. }
  1855. emit_move_r_r(tmp2, GET_Rn());
  1856. goto end_op;
  1857. case 0x0b:
  1858. switch (GET_Fx())
  1859. {
  1860. case 0: // JSR @Rm 0100mmmm00001011
  1861. case 2: // JMP @Rm 0100mmmm00101011
  1862. DELAYED_OP;
  1863. if (!(op & 0x20))
  1864. emit_move_r_imm32(SHR_PR, pc + 2);
  1865. emit_move_r_r(SHR_PC, (op >> 8) & 0x0f);
  1866. out_pc = (u32)-1;
  1867. cycles++;
  1868. break;
  1869. case 1: // TAS.B @Rn 0100nnnn00011011
  1870. // XXX: is TAS working on 32X?
  1871. rcache_get_reg_arg(0, GET_Rn());
  1872. tmp = emit_memhandler_read(0);
  1873. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1874. if (drcf.delayed_op)
  1875. DELAY_SAVE_T(sr);
  1876. emith_bic_r_imm(sr, T);
  1877. emith_cmp_r_imm(tmp, 0);
  1878. emit_or_t_if_eq(sr);
  1879. rcache_clean();
  1880. emith_or_r_imm(tmp, 0x80);
  1881. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  1882. emith_move_r_r(tmp2, tmp);
  1883. rcache_free_tmp(tmp);
  1884. rcache_get_reg_arg(0, GET_Rn());
  1885. emit_memhandler_write(0, pc, drcf.delayed_op);
  1886. cycles += 3;
  1887. break;
  1888. default:
  1889. goto default_;
  1890. }
  1891. goto end_op;
  1892. case 0x0e:
  1893. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
  1894. switch (GET_Fx())
  1895. {
  1896. case 0: // LDC Rm,SR 0100mmmm00001110
  1897. tmp2 = SHR_SR;
  1898. break;
  1899. case 1: // LDC Rm,GBR 0100mmmm00011110
  1900. tmp2 = SHR_GBR;
  1901. break;
  1902. case 2: // LDC Rm,VBR 0100mmmm00101110
  1903. tmp2 = SHR_VBR;
  1904. break;
  1905. default:
  1906. goto default_;
  1907. }
  1908. if (tmp2 == SHR_SR) {
  1909. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1910. if (drcf.delayed_op)
  1911. DELAY_SAVE_T(sr);
  1912. emith_write_sr(sr, tmp);
  1913. drcf.test_irq = 1;
  1914. } else {
  1915. tmp2 = rcache_get_reg(tmp2, RC_GR_WRITE);
  1916. emith_move_r_r(tmp2, tmp);
  1917. }
  1918. goto end_op;
  1919. case 0x0f:
  1920. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  1921. emit_indirect_read_double(&tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  1922. emith_sext(tmp, tmp, 16);
  1923. emith_sext(tmp2, tmp2, 16);
  1924. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW);
  1925. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW);
  1926. emith_mula_s64(tmp3, tmp4, tmp, tmp2);
  1927. rcache_free_tmp(tmp2);
  1928. // XXX: MACH should be untouched when S is set?
  1929. sr = rcache_get_reg(SHR_SR, RC_GR_READ);
  1930. emith_tst_r_imm(sr, S);
  1931. EMITH_JMP_START(DCOND_EQ);
  1932. emith_asr(tmp, tmp3, 31);
  1933. emith_eorf_r_r(tmp, tmp4); // tmp = ((signed)macl >> 31) ^ mach
  1934. EMITH_JMP_START(DCOND_EQ);
  1935. emith_move_r_imm(tmp3, 0x80000000);
  1936. emith_tst_r_r(tmp4, tmp4);
  1937. EMITH_SJMP_START(DCOND_MI);
  1938. emith_sub_r_imm_c(DCOND_PL, tmp3, 1); // positive
  1939. EMITH_SJMP_END(DCOND_MI);
  1940. EMITH_JMP_END(DCOND_EQ);
  1941. EMITH_JMP_END(DCOND_EQ);
  1942. rcache_free_tmp(tmp);
  1943. cycles += 2;
  1944. goto end_op;
  1945. }
  1946. goto default_;
  1947. /////////////////////////////////////////////
  1948. case 0x05:
  1949. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  1950. emit_memhandler_read_rr(GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2);
  1951. goto end_op;
  1952. /////////////////////////////////////////////
  1953. case 0x06:
  1954. switch (op & 0x0f)
  1955. {
  1956. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  1957. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  1958. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  1959. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  1960. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  1961. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  1962. emit_memhandler_read_rr(GET_Rn(), GET_Rm(), 0, op & 3);
  1963. if ((op & 7) >= 4 && GET_Rn() != GET_Rm()) {
  1964. tmp = rcache_get_reg(GET_Rm(), RC_GR_RMW);
  1965. emith_add_r_imm(tmp, (1 << (op & 3)));
  1966. }
  1967. goto end_op;
  1968. case 0x03:
  1969. case 0x07 ... 0x0f:
  1970. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ);
  1971. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
  1972. switch (op & 0x0f)
  1973. {
  1974. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  1975. emith_move_r_r(tmp2, tmp);
  1976. break;
  1977. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  1978. emith_mvn_r_r(tmp2, tmp);
  1979. break;
  1980. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  1981. tmp3 = tmp2;
  1982. if (tmp == tmp2)
  1983. tmp3 = rcache_get_tmp();
  1984. tmp4 = rcache_get_tmp();
  1985. emith_lsr(tmp3, tmp, 16);
  1986. emith_or_r_r_lsl(tmp3, tmp, 24);
  1987. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  1988. emith_or_r_r_lsl(tmp3, tmp4, 8);
  1989. emith_rol(tmp2, tmp3, 16);
  1990. rcache_free_tmp(tmp4);
  1991. if (tmp == tmp2)
  1992. rcache_free_tmp(tmp3);
  1993. break;
  1994. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  1995. emith_rol(tmp2, tmp, 16);
  1996. break;
  1997. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  1998. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  1999. if (drcf.delayed_op)
  2000. DELAY_SAVE_T(sr);
  2001. emith_tpop_carry(sr, 1);
  2002. emith_negcf_r_r(tmp2, tmp);
  2003. emith_tpush_carry(sr, 1);
  2004. break;
  2005. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  2006. emith_neg_r_r(tmp2, tmp);
  2007. break;
  2008. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  2009. emith_clear_msb(tmp2, tmp, 24);
  2010. break;
  2011. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  2012. emith_clear_msb(tmp2, tmp, 16);
  2013. break;
  2014. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  2015. emith_sext(tmp2, tmp, 8);
  2016. break;
  2017. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  2018. emith_sext(tmp2, tmp, 16);
  2019. break;
  2020. }
  2021. goto end_op;
  2022. }
  2023. goto default_;
  2024. /////////////////////////////////////////////
  2025. case 0x07:
  2026. // ADD #imm,Rn 0111nnnniiiiiiii
  2027. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
  2028. if (op & 0x80) { // adding negative
  2029. emith_sub_r_imm(tmp, -op & 0xff);
  2030. } else
  2031. emith_add_r_imm(tmp, op & 0xff);
  2032. goto end_op;
  2033. /////////////////////////////////////////////
  2034. case 0x08:
  2035. switch (op & 0x0f00)
  2036. {
  2037. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  2038. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  2039. rcache_clean();
  2040. tmp = rcache_get_reg_arg(0, GET_Rm());
  2041. tmp2 = rcache_get_reg_arg(1, SHR_R0);
  2042. tmp3 = (op & 0x100) >> 8;
  2043. if (op & 0x0f)
  2044. emith_add_r_imm(tmp, (op & 0x0f) << tmp3);
  2045. emit_memhandler_write(tmp3, pc, drcf.delayed_op);
  2046. goto end_op;
  2047. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  2048. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  2049. tmp = (op & 0x100) >> 8;
  2050. emit_memhandler_read_rr(SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  2051. goto end_op;
  2052. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  2053. // XXX: could use cmn
  2054. tmp = rcache_get_tmp();
  2055. tmp2 = rcache_get_reg(0, RC_GR_READ);
  2056. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2057. if (drcf.delayed_op)
  2058. DELAY_SAVE_T(sr);
  2059. emith_move_r_imm_s8(tmp, op & 0xff);
  2060. emith_bic_r_imm(sr, T);
  2061. emith_cmp_r_r(tmp2, tmp);
  2062. emit_or_t_if_eq(sr);
  2063. rcache_free_tmp(tmp);
  2064. goto end_op;
  2065. case 0x0d00: // BT/S label 10001101dddddddd
  2066. case 0x0f00: // BF/S label 10001111dddddddd
  2067. DELAYED_OP;
  2068. cycles--;
  2069. // fallthrough
  2070. case 0x0900: // BT label 10001001dddddddd
  2071. case 0x0b00: // BF label 10001011dddddddd
  2072. // will handle conditional branches later
  2073. pending_branch_cond = (op & 0x0200) ? DCOND_EQ : DCOND_NE;
  2074. i = ((signed int)(op << 24) >> 23);
  2075. pending_branch_pc = pc + i + 2;
  2076. cycles += 2;
  2077. goto end_op;
  2078. }
  2079. goto default_;
  2080. /////////////////////////////////////////////
  2081. case 0x09:
  2082. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  2083. tmp = pc + (op & 0xff) * 2 + 2;
  2084. #if PROPAGATE_CONSTANTS
  2085. if (tmp < end_pc + MAX_LITERAL_OFFSET) {
  2086. gconst_new(GET_Rn(), (u32)(int)(signed short)FETCH_OP(tmp));
  2087. if (last_inlined_literal < tmp)
  2088. last_inlined_literal = tmp;
  2089. }
  2090. else
  2091. #endif
  2092. {
  2093. tmp2 = rcache_get_tmp_arg(0);
  2094. emith_move_r_imm(tmp2, tmp);
  2095. tmp2 = emit_memhandler_read(1);
  2096. tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
  2097. emith_sext(tmp3, tmp2, 16);
  2098. rcache_free_tmp(tmp2);
  2099. }
  2100. goto end_op;
  2101. /////////////////////////////////////////////
  2102. case 0x0a:
  2103. // BRA label 1010dddddddddddd
  2104. DELAYED_OP;
  2105. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2106. tmp = ((signed int)(op << 20) >> 19);
  2107. out_pc = pc + tmp + 2;
  2108. if (tmp == (u32)-4)
  2109. emith_clear_msb(sr, sr, 20); // burn cycles
  2110. cycles++;
  2111. break;
  2112. /////////////////////////////////////////////
  2113. case 0x0b:
  2114. // BSR label 1011dddddddddddd
  2115. DELAYED_OP;
  2116. emit_move_r_imm32(SHR_PR, pc + 2);
  2117. tmp = ((signed int)(op << 20) >> 19);
  2118. out_pc = pc + tmp + 2;
  2119. cycles++;
  2120. break;
  2121. /////////////////////////////////////////////
  2122. case 0x0c:
  2123. switch (op & 0x0f00)
  2124. {
  2125. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  2126. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  2127. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  2128. rcache_clean();
  2129. tmp = rcache_get_reg_arg(0, SHR_GBR);
  2130. tmp2 = rcache_get_reg_arg(1, SHR_R0);
  2131. tmp3 = (op & 0x300) >> 8;
  2132. emith_add_r_imm(tmp, (op & 0xff) << tmp3);
  2133. emit_memhandler_write(tmp3, pc, drcf.delayed_op);
  2134. goto end_op;
  2135. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  2136. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  2137. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  2138. tmp = (op & 0x300) >> 8;
  2139. emit_memhandler_read_rr(SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  2140. goto end_op;
  2141. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  2142. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
  2143. emith_sub_r_imm(tmp, 4*2);
  2144. // push SR
  2145. tmp = rcache_get_reg_arg(0, SHR_SP);
  2146. emith_add_r_imm(tmp, 4);
  2147. tmp = rcache_get_reg_arg(1, SHR_SR);
  2148. emith_clear_msb(tmp, tmp, 22);
  2149. emit_memhandler_write(2, pc, drcf.delayed_op);
  2150. // push PC
  2151. rcache_get_reg_arg(0, SHR_SP);
  2152. tmp = rcache_get_tmp_arg(1);
  2153. emith_move_r_imm(tmp, pc);
  2154. emit_memhandler_write(2, pc, drcf.delayed_op);
  2155. // obtain new PC
  2156. emit_memhandler_read_rr(SHR_PC, SHR_VBR, (op & 0xff) * 4, 2);
  2157. out_pc = (u32)-1;
  2158. cycles += 7;
  2159. goto end_op;
  2160. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  2161. emit_move_r_imm32(SHR_R0, (pc + (op & 0xff) * 4 + 2) & ~3);
  2162. goto end_op;
  2163. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  2164. tmp = rcache_get_reg(SHR_R0, RC_GR_READ);
  2165. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2166. if (drcf.delayed_op)
  2167. DELAY_SAVE_T(sr);
  2168. emith_bic_r_imm(sr, T);
  2169. emith_tst_r_imm(tmp, op & 0xff);
  2170. emit_or_t_if_eq(sr);
  2171. goto end_op;
  2172. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  2173. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
  2174. emith_and_r_imm(tmp, op & 0xff);
  2175. goto end_op;
  2176. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  2177. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
  2178. emith_eor_r_imm(tmp, op & 0xff);
  2179. goto end_op;
  2180. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  2181. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
  2182. emith_or_r_imm(tmp, op & 0xff);
  2183. goto end_op;
  2184. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  2185. tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
  2186. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2187. if (drcf.delayed_op)
  2188. DELAY_SAVE_T(sr);
  2189. emith_bic_r_imm(sr, T);
  2190. emith_tst_r_imm(tmp, op & 0xff);
  2191. emit_or_t_if_eq(sr);
  2192. rcache_free_tmp(tmp);
  2193. cycles += 2;
  2194. goto end_op;
  2195. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  2196. tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
  2197. emith_and_r_imm(tmp, op & 0xff);
  2198. goto end_rmw_op;
  2199. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  2200. tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
  2201. emith_eor_r_imm(tmp, op & 0xff);
  2202. goto end_rmw_op;
  2203. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  2204. tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
  2205. emith_or_r_imm(tmp, op & 0xff);
  2206. end_rmw_op:
  2207. tmp2 = rcache_get_tmp_arg(1);
  2208. emith_move_r_r(tmp2, tmp);
  2209. rcache_free_tmp(tmp);
  2210. tmp3 = rcache_get_reg_arg(0, SHR_GBR);
  2211. tmp4 = rcache_get_reg(SHR_R0, RC_GR_READ);
  2212. emith_add_r_r(tmp3, tmp4);
  2213. emit_memhandler_write(0, pc, drcf.delayed_op);
  2214. cycles += 2;
  2215. goto end_op;
  2216. }
  2217. goto default_;
  2218. /////////////////////////////////////////////
  2219. case 0x0d:
  2220. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  2221. tmp = (pc + (op & 0xff) * 4 + 2) & ~3;
  2222. #if PROPAGATE_CONSTANTS
  2223. if (tmp < end_pc + MAX_LITERAL_OFFSET) {
  2224. gconst_new(GET_Rn(), FETCH32(tmp));
  2225. if (last_inlined_literal < tmp)
  2226. last_inlined_literal = tmp;
  2227. }
  2228. else
  2229. #endif
  2230. {
  2231. tmp2 = rcache_get_tmp_arg(0);
  2232. emith_move_r_imm(tmp2, tmp);
  2233. tmp2 = emit_memhandler_read(2);
  2234. tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
  2235. emith_move_r_r(tmp3, tmp2);
  2236. rcache_free_tmp(tmp2);
  2237. }
  2238. goto end_op;
  2239. /////////////////////////////////////////////
  2240. case 0x0e:
  2241. // MOV #imm,Rn 1110nnnniiiiiiii
  2242. emit_move_r_imm32(GET_Rn(), (u32)(signed int)(signed char)op);
  2243. goto end_op;
  2244. default:
  2245. default_:
  2246. elprintf(EL_ANOMALY, "%csh2 drc: unhandled op %04x @ %08x",
  2247. sh2->is_slave ? 's' : 'm', op, pc - 2);
  2248. #ifdef DRC_DEBUG_INTERP
  2249. emit_move_r_imm32(SHR_PC, pc - 2);
  2250. rcache_flush();
  2251. emith_pass_arg_r(0, CONTEXT_REG);
  2252. emith_pass_arg_imm(1, op);
  2253. emith_call(sh2_do_op);
  2254. #endif
  2255. break;
  2256. }
  2257. end_op:
  2258. rcache_unlock_all();
  2259. // conditional branch handling (with/without delay)
  2260. if (pending_branch_cond != -1 && drcf.delayed_op != 2)
  2261. {
  2262. u32 target_pc = pending_branch_pc;
  2263. void *target;
  2264. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2265. // handle cycles
  2266. FLUSH_CYCLES(sr);
  2267. rcache_clean();
  2268. if (drcf.use_saved_t)
  2269. emith_tst_r_imm(sr, T_save);
  2270. else
  2271. emith_tst_r_imm(sr, T);
  2272. #if LINK_BRANCHES
  2273. if (find_in_array(branch_target_pc, branch_target_count, target_pc) >= 0) {
  2274. // local branch
  2275. // XXX: jumps back can be linked already
  2276. branch_patch_pc[branch_patch_count] = target_pc;
  2277. branch_patch_ptr[branch_patch_count] = tcache_ptr;
  2278. emith_jump_cond_patchable(pending_branch_cond, tcache_ptr);
  2279. branch_patch_count++;
  2280. if (branch_patch_count == MAX_LOCAL_BRANCHES) {
  2281. printf("warning: too many local branches\n");
  2282. break;
  2283. }
  2284. }
  2285. else
  2286. #endif
  2287. {
  2288. // can't resolve branch locally, make a block exit
  2289. emit_move_r_imm32(SHR_PC, target_pc);
  2290. rcache_clean();
  2291. target = dr_prepare_ext_branch(target_pc, sh2, tcache_id);
  2292. if (target == NULL)
  2293. return NULL;
  2294. emith_jump_cond_patchable(pending_branch_cond, target);
  2295. }
  2296. drcf.use_saved_t = 0;
  2297. pending_branch_cond = -1;
  2298. }
  2299. // test irq?
  2300. // XXX: delay slots..
  2301. if (drcf.test_irq && drcf.delayed_op != 2) {
  2302. if (!drcf.delayed_op)
  2303. emit_move_r_imm32(SHR_PC, pc);
  2304. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2305. FLUSH_CYCLES(sr);
  2306. rcache_flush();
  2307. emith_call(sh2_drc_test_irq);
  2308. drcf.test_irq = 0;
  2309. }
  2310. do_host_disasm(tcache_id);
  2311. if (out_pc != 0 && drcf.delayed_op != 2)
  2312. break;
  2313. }
  2314. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2315. FLUSH_CYCLES(tmp);
  2316. rcache_flush();
  2317. if (out_pc == (u32)-1) {
  2318. // indirect jump -> back to dispatcher
  2319. emith_jump(sh2_drc_dispatcher);
  2320. } else {
  2321. void *target;
  2322. if (out_pc == 0)
  2323. out_pc = pc;
  2324. emit_move_r_imm32(SHR_PC, out_pc);
  2325. rcache_flush();
  2326. target = dr_prepare_ext_branch(out_pc, sh2, tcache_id);
  2327. if (target == NULL)
  2328. return NULL;
  2329. emith_jump_patchable(target);
  2330. }
  2331. // link local branches
  2332. for (i = 0; i < branch_patch_count; i++) {
  2333. void *target;
  2334. int t;
  2335. t = find_in_array(branch_target_pc, branch_target_count, branch_patch_pc[i]);
  2336. target = branch_target_ptr[t];
  2337. if (target == NULL) {
  2338. // flush pc and go back to dispatcher (should no longer happen)
  2339. printf("stray branch to %08x %p\n", branch_patch_pc[i], tcache_ptr);
  2340. target = tcache_ptr;
  2341. emit_move_r_imm32(SHR_PC, branch_patch_pc[i]);
  2342. rcache_flush();
  2343. emith_jump(sh2_drc_dispatcher);
  2344. }
  2345. emith_jump_patch(branch_patch_ptr[i], target);
  2346. }
  2347. this_block->end_addr = pc;
  2348. if (last_inlined_literal > pc)
  2349. this_block->end_addr = last_inlined_literal + 4;
  2350. // mark memory blocks as containing compiled code
  2351. if (tcache_id != 0) {
  2352. // data array, BIOS
  2353. u16 *drcblk = Pico32xMem->drcblk_da[sh2->is_slave];
  2354. tmp = (this_block->addr & 0xfff) >> SH2_DRCBLK_DA_SHIFT;
  2355. tmp2 = (this_block->end_addr & 0xfff) >> SH2_DRCBLK_DA_SHIFT;
  2356. drcblk[tmp] = (blkid_main << 1) | 1;
  2357. for (++tmp; tmp < tmp2; tmp++) {
  2358. if (drcblk[tmp])
  2359. continue; // dont overwrite overlay block(s)
  2360. drcblk[tmp] = blkid_main << 1;
  2361. }
  2362. }
  2363. else if ((this_block->addr & 0xc7fc0000) == 0x06000000) { // DRAM
  2364. tmp = (this_block->addr & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT;
  2365. tmp2 = (this_block->end_addr & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT;
  2366. Pico32xMem->drcblk_ram[tmp] = (blkid_main << 1) | 1;
  2367. for (++tmp; tmp < tmp2; tmp++) {
  2368. if (Pico32xMem->drcblk_ram[tmp])
  2369. continue;
  2370. Pico32xMem->drcblk_ram[tmp] = blkid_main << 1;
  2371. }
  2372. }
  2373. tcache_ptrs[tcache_id] = tcache_ptr;
  2374. #ifdef ARM
  2375. cache_flush_d_inval_i(block_entry, tcache_ptr);
  2376. #endif
  2377. do_host_disasm(tcache_id);
  2378. dbg(1, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f",
  2379. tcache_id, block_counts[tcache_id],
  2380. tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id],
  2381. insns_compiled, host_insn_count, (double)host_insn_count / insns_compiled);
  2382. if ((sh2->pc & 0xc6000000) == 0x02000000) // ROM
  2383. dbg(1, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
  2384. /*
  2385. printf("~~~\n");
  2386. tcache_dsm_ptrs[tcache_id] = block_entry;
  2387. do_host_disasm(tcache_id);
  2388. printf("~~~\n");
  2389. */
  2390. #if (DRC_DEBUG & 2)
  2391. fflush(stdout);
  2392. #endif
  2393. return block_entry;
  2394. }
  2395. static void sh2_generate_utils(void)
  2396. {
  2397. int arg0, arg1, arg2, sr, tmp;
  2398. void *sh2_drc_write_end, *sh2_drc_write_slot_end;
  2399. sh2_drc_write32 = p32x_sh2_write32;
  2400. sh2_drc_read8 = p32x_sh2_read8;
  2401. sh2_drc_read16 = p32x_sh2_read16;
  2402. sh2_drc_read32 = p32x_sh2_read32;
  2403. host_arg2reg(arg0, 0);
  2404. host_arg2reg(arg1, 1);
  2405. host_arg2reg(arg2, 2);
  2406. emith_move_r_r(arg0, arg0); // nop
  2407. // sh2_drc_exit(void)
  2408. sh2_drc_exit = (void *)tcache_ptr;
  2409. emit_do_static_regs(1, arg2);
  2410. emith_sh2_drc_exit();
  2411. // sh2_drc_dispatcher(void)
  2412. sh2_drc_dispatcher = (void *)tcache_ptr;
  2413. sr = rcache_get_reg(SHR_SR, RC_GR_READ);
  2414. emith_cmp_r_imm(sr, 0);
  2415. emith_jump_cond(DCOND_LT, sh2_drc_exit);
  2416. rcache_invalidate();
  2417. emith_ctx_read(arg0, SHR_PC * 4);
  2418. emith_ctx_read(arg1, offsetof(SH2, is_slave));
  2419. emith_add_r_r_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  2420. emith_call(lookup_block);
  2421. emit_block_entry();
  2422. // lookup failed, call sh2_translate()
  2423. emith_move_r_r(arg0, CONTEXT_REG);
  2424. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  2425. emith_call(sh2_translate);
  2426. emit_block_entry();
  2427. // sh2_translate() failed, flush cache and retry
  2428. emith_ctx_read(arg0, offsetof(SH2, drc_tmp));
  2429. emith_call(flush_tcache);
  2430. emith_move_r_r(arg0, CONTEXT_REG);
  2431. emith_ctx_read(arg1, offsetof(SH2, drc_tmp));
  2432. emith_call(sh2_translate);
  2433. emit_block_entry();
  2434. // XXX: can't translate, fail
  2435. emith_call(exit);
  2436. // sh2_drc_test_irq(void)
  2437. // assumes it's called from main function (may jump to dispatcher)
  2438. sh2_drc_test_irq = (void *)tcache_ptr;
  2439. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  2440. sr = rcache_get_reg(SHR_SR, RC_GR_READ);
  2441. emith_lsr(arg0, sr, I_SHIFT);
  2442. emith_and_r_imm(arg0, 0x0f);
  2443. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  2444. EMITH_SJMP_START(DCOND_GT);
  2445. emith_ret_c(DCOND_LE); // nope, return
  2446. EMITH_SJMP_END(DCOND_GT);
  2447. // adjust SP
  2448. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
  2449. emith_sub_r_imm(tmp, 4*2);
  2450. rcache_clean();
  2451. // push SR
  2452. tmp = rcache_get_reg_arg(0, SHR_SP);
  2453. emith_add_r_imm(tmp, 4);
  2454. tmp = rcache_get_reg_arg(1, SHR_SR);
  2455. emith_clear_msb(tmp, tmp, 22);
  2456. emith_move_r_r(arg2, CONTEXT_REG);
  2457. emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  2458. rcache_invalidate();
  2459. // push PC
  2460. rcache_get_reg_arg(0, SHR_SP);
  2461. emith_ctx_read(arg1, SHR_PC * 4);
  2462. emith_move_r_r(arg2, CONTEXT_REG);
  2463. emith_call(p32x_sh2_write32);
  2464. rcache_invalidate();
  2465. // update I, cycles, do callback
  2466. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  2467. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2468. emith_bic_r_imm(sr, I);
  2469. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  2470. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  2471. rcache_flush();
  2472. emith_move_r_r(arg0, CONTEXT_REG);
  2473. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  2474. // obtain new PC
  2475. emith_lsl(arg0, arg0, 2);
  2476. emith_ctx_read(arg1, SHR_VBR * 4);
  2477. emith_add_r_r(arg0, arg1);
  2478. emit_memhandler_read(2);
  2479. emith_ctx_write(arg0, SHR_PC * 4);
  2480. #ifdef __i386__
  2481. emith_add_r_imm(xSP, 4); // fix stack
  2482. #endif
  2483. emith_jump(sh2_drc_dispatcher);
  2484. rcache_invalidate();
  2485. // sh2_drc_entry(SH2 *sh2)
  2486. sh2_drc_entry = (void *)tcache_ptr;
  2487. emith_sh2_drc_entry();
  2488. emith_move_r_r(CONTEXT_REG, arg0); // move ctx, arg0
  2489. emit_do_static_regs(0, arg2);
  2490. emith_call(sh2_drc_test_irq);
  2491. emith_jump(sh2_drc_dispatcher);
  2492. // write-caused irq detection
  2493. sh2_drc_write_end = tcache_ptr;
  2494. emith_tst_r_r(arg0, arg0);
  2495. EMITH_SJMP_START(DCOND_NE);
  2496. emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp)); // return
  2497. EMITH_SJMP_END(DCOND_NE);
  2498. emith_call(sh2_drc_test_irq);
  2499. emith_jump_ctx(offsetof(SH2, drc_tmp));
  2500. // write-caused irq detection for writes in delay slot
  2501. sh2_drc_write_slot_end = tcache_ptr;
  2502. emith_tst_r_r(arg0, arg0);
  2503. EMITH_SJMP_START(DCOND_NE);
  2504. emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp));
  2505. EMITH_SJMP_END(DCOND_NE);
  2506. // just burn cycles to get back to dispatcher after branch is handled
  2507. sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
  2508. emith_ctx_write(sr, offsetof(SH2, irq_cycles));
  2509. emith_clear_msb(sr, sr, 20); // clear cycles
  2510. rcache_flush();
  2511. emith_jump_ctx(offsetof(SH2, drc_tmp));
  2512. // sh2_drc_write8(u32 a, u32 d)
  2513. sh2_drc_write8 = (void *)tcache_ptr;
  2514. emith_ret_to_ctx(offsetof(SH2, drc_tmp));
  2515. emith_ctx_read(arg2, offsetof(SH2, write8_tab));
  2516. emith_sh2_wcall(arg0, arg2, sh2_drc_write_end);
  2517. // sh2_drc_write16(u32 a, u32 d)
  2518. sh2_drc_write16 = (void *)tcache_ptr;
  2519. emith_ret_to_ctx(offsetof(SH2, drc_tmp));
  2520. emith_ctx_read(arg2, offsetof(SH2, write16_tab));
  2521. emith_sh2_wcall(arg0, arg2, sh2_drc_write_end);
  2522. // sh2_drc_write8_slot(u32 a, u32 d)
  2523. sh2_drc_write8_slot = (void *)tcache_ptr;
  2524. emith_ret_to_ctx(offsetof(SH2, drc_tmp));
  2525. emith_ctx_read(arg2, offsetof(SH2, write8_tab));
  2526. emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end);
  2527. // sh2_drc_write16_slot(u32 a, u32 d)
  2528. sh2_drc_write16_slot = (void *)tcache_ptr;
  2529. emith_ret_to_ctx(offsetof(SH2, drc_tmp));
  2530. emith_ctx_read(arg2, offsetof(SH2, write16_tab));
  2531. emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end);
  2532. #ifdef PDB_NET
  2533. // debug
  2534. #define MAKE_READ_WRAPPER(func) { \
  2535. void *tmp = (void *)tcache_ptr; \
  2536. emith_ret_to_ctx(offsetof(SH2, drc_tmp)); \
  2537. emith_call(func); \
  2538. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  2539. emith_addf_r_r(arg2, arg0); \
  2540. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  2541. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  2542. emith_adc_r_imm(arg2, 0x01000000); \
  2543. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  2544. emith_jump_ctx(offsetof(SH2, drc_tmp)); \
  2545. func = tmp; \
  2546. }
  2547. #define MAKE_WRITE_WRAPPER(func) { \
  2548. void *tmp = (void *)tcache_ptr; \
  2549. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  2550. emith_addf_r_r(arg2, arg1); \
  2551. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  2552. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  2553. emith_adc_r_imm(arg2, 0x01000000); \
  2554. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  2555. emith_move_r_r(arg2, CONTEXT_REG); \
  2556. emith_jump(func); \
  2557. func = tmp; \
  2558. }
  2559. MAKE_READ_WRAPPER(sh2_drc_read8);
  2560. MAKE_READ_WRAPPER(sh2_drc_read16);
  2561. MAKE_READ_WRAPPER(sh2_drc_read32);
  2562. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  2563. MAKE_WRITE_WRAPPER(sh2_drc_write8_slot);
  2564. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  2565. MAKE_WRITE_WRAPPER(sh2_drc_write16_slot);
  2566. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  2567. #if (DRC_DEBUG & 2)
  2568. host_dasm_new_symbol(sh2_drc_read8);
  2569. host_dasm_new_symbol(sh2_drc_read16);
  2570. host_dasm_new_symbol(sh2_drc_read32);
  2571. host_dasm_new_symbol(sh2_drc_write32);
  2572. #endif
  2573. #endif
  2574. rcache_invalidate();
  2575. #if (DRC_DEBUG & 2)
  2576. host_dasm_new_symbol(sh2_drc_entry);
  2577. host_dasm_new_symbol(sh2_drc_dispatcher);
  2578. host_dasm_new_symbol(sh2_drc_exit);
  2579. host_dasm_new_symbol(sh2_drc_test_irq);
  2580. host_dasm_new_symbol(sh2_drc_write_end);
  2581. host_dasm_new_symbol(sh2_drc_write_slot_end);
  2582. host_dasm_new_symbol(sh2_drc_write8);
  2583. host_dasm_new_symbol(sh2_drc_write8_slot);
  2584. host_dasm_new_symbol(sh2_drc_write16);
  2585. host_dasm_new_symbol(sh2_drc_write16_slot);
  2586. #endif
  2587. }
  2588. static void sh2_smc_rm_block(u16 *drcblk, u16 *p, block_desc *btab, u32 a)
  2589. {
  2590. u16 id = *p >> 1;
  2591. block_desc *bd = btab + id;
  2592. // FIXME: skip subblocks; do both directions
  2593. // FIXME: collect all branches
  2594. dbg(1, " killing block %08x", bd->addr);
  2595. bd->addr = bd->end_addr = 0;
  2596. while (p > drcblk && (p[-1] >> 1) == id)
  2597. p--;
  2598. // check for possible overlay block
  2599. if (p > 0 && p[-1] != 0) {
  2600. bd = btab + (p[-1] >> 1);
  2601. if (bd->addr <= a && a < bd->end_addr)
  2602. sh2_smc_rm_block(drcblk, p - 1, btab, a);
  2603. }
  2604. do {
  2605. *p++ = 0;
  2606. }
  2607. while ((*p >> 1) == id);
  2608. }
  2609. void sh2_drc_wcheck_ram(unsigned int a, int val, int cpuid)
  2610. {
  2611. u16 *drcblk = Pico32xMem->drcblk_ram;
  2612. u16 *p = drcblk + ((a & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT);
  2613. dbg(1, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
  2614. sh2_smc_rm_block(drcblk, p, block_tables[0], a);
  2615. }
  2616. void sh2_drc_wcheck_da(unsigned int a, int val, int cpuid)
  2617. {
  2618. u16 *drcblk = Pico32xMem->drcblk_da[cpuid];
  2619. u16 *p = drcblk + ((a & 0xfff) >> SH2_DRCBLK_DA_SHIFT);
  2620. dbg(1, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
  2621. sh2_smc_rm_block(drcblk, p, block_tables[1 + cpuid], a);
  2622. }
  2623. void sh2_execute(SH2 *sh2c, int cycles)
  2624. {
  2625. int ret_cycles;
  2626. sh2 = sh2c; // XXX
  2627. sh2c->cycles_aim += cycles;
  2628. cycles = sh2c->cycles_aim - sh2c->cycles_done;
  2629. // cycles are kept in SHR_SR unused bits (upper 20)
  2630. // bit19 contains T saved for delay slot
  2631. // others are usual SH2 flags
  2632. sh2c->sr &= 0x3f3;
  2633. sh2c->sr |= cycles << 12;
  2634. sh2_drc_entry(sh2c);
  2635. // TODO: irq cycles
  2636. ret_cycles = (signed int)sh2c->sr >> 12;
  2637. if (ret_cycles > 0)
  2638. printf("warning: drc returned with cycles: %d\n", ret_cycles);
  2639. sh2c->cycles_done += cycles - ret_cycles;
  2640. }
  2641. #if (DRC_DEBUG & 1)
  2642. void block_stats(void)
  2643. {
  2644. int c, b, i, total = 0;
  2645. printf("block stats:\n");
  2646. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  2647. for (i = 0; i < block_counts[b]; i++)
  2648. if (block_tables[b][i].addr != 0)
  2649. total += block_tables[b][i].refcount;
  2650. for (c = 0; c < 10; c++) {
  2651. block_desc *blk, *maxb = NULL;
  2652. int max = 0;
  2653. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  2654. for (i = 0; i < block_counts[b]; i++) {
  2655. blk = &block_tables[b][i];
  2656. if (blk->addr != 0 && blk->refcount > max) {
  2657. max = blk->refcount;
  2658. maxb = blk;
  2659. }
  2660. }
  2661. }
  2662. if (maxb == NULL)
  2663. break;
  2664. printf("%08x %9d %2.3f%%\n", maxb->addr, maxb->refcount,
  2665. (double)maxb->refcount / total * 100.0);
  2666. maxb->refcount = 0;
  2667. }
  2668. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  2669. for (i = 0; i < block_counts[b]; i++)
  2670. block_tables[b][i].refcount = 0;
  2671. }
  2672. #else
  2673. #define block_stats()
  2674. #endif
  2675. void sh2_drc_flush_all(void)
  2676. {
  2677. block_stats();
  2678. flush_tcache(0);
  2679. flush_tcache(1);
  2680. flush_tcache(2);
  2681. }
  2682. void sh2_drc_mem_setup(SH2 *sh2)
  2683. {
  2684. // fill the convenience pointers
  2685. sh2->p_bios = sh2->is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
  2686. sh2->p_da = Pico32xMem->data_array[sh2->is_slave];
  2687. sh2->p_sdram = Pico32xMem->sdram;
  2688. sh2->p_rom = Pico.rom;
  2689. }
  2690. int sh2_drc_init(SH2 *sh2)
  2691. {
  2692. int i;
  2693. if (block_tables[0] == NULL)
  2694. {
  2695. for (i = 0; i < TCACHE_BUFFERS; i++) {
  2696. block_tables[i] = calloc(block_max_counts[i], sizeof(*block_tables[0]));
  2697. if (block_tables[i] == NULL)
  2698. goto fail;
  2699. // max 2 block links (exits) per block
  2700. block_links[i] = calloc(block_max_counts[i] * 2, sizeof(*block_links[0]));
  2701. if (block_links[i] == NULL)
  2702. goto fail;
  2703. }
  2704. memset(block_counts, 0, sizeof(block_counts));
  2705. memset(block_link_counts, 0, sizeof(block_link_counts));
  2706. drc_cmn_init();
  2707. tcache_ptr = tcache;
  2708. sh2_generate_utils();
  2709. #ifdef ARM
  2710. cache_flush_d_inval_i(tcache, tcache_ptr);
  2711. #endif
  2712. tcache_bases[0] = tcache_ptrs[0] = tcache_ptr;
  2713. for (i = 1; i < ARRAY_SIZE(tcache_bases); i++)
  2714. tcache_bases[i] = tcache_ptrs[i] = tcache_bases[i - 1] + tcache_sizes[i - 1];
  2715. // tmp
  2716. PicoOpt |= POPT_DIS_VDP_FIFO;
  2717. #if (DRC_DEBUG & 2)
  2718. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  2719. tcache_dsm_ptrs[i] = tcache_bases[i];
  2720. // disasm the utils
  2721. tcache_dsm_ptrs[0] = tcache;
  2722. do_host_disasm(0);
  2723. #endif
  2724. #if (DRC_DEBUG & 1)
  2725. hash_collisions = 0;
  2726. #endif
  2727. }
  2728. if (hash_table == NULL) {
  2729. hash_table = calloc(sizeof(hash_table[0]), MAX_HASH_ENTRIES);
  2730. if (hash_table == NULL)
  2731. goto fail;
  2732. }
  2733. return 0;
  2734. fail:
  2735. sh2_drc_finish(sh2);
  2736. return -1;
  2737. }
  2738. void sh2_drc_finish(SH2 *sh2)
  2739. {
  2740. int i;
  2741. if (block_tables[0] != NULL) {
  2742. block_stats();
  2743. for (i = 0; i < TCACHE_BUFFERS; i++) {
  2744. #if (DRC_DEBUG & 2)
  2745. printf("~~~ tcache %d\n", i);
  2746. tcache_dsm_ptrs[i] = tcache_bases[i];
  2747. tcache_ptr = tcache_ptrs[i];
  2748. do_host_disasm(i);
  2749. #endif
  2750. if (block_tables[i] != NULL)
  2751. free(block_tables[i]);
  2752. block_tables[i] = NULL;
  2753. if (block_links[i] == NULL)
  2754. free(block_links[i]);
  2755. block_links[i] = NULL;
  2756. }
  2757. drc_cmn_cleanup();
  2758. }
  2759. if (hash_table != NULL) {
  2760. free(hash_table);
  2761. hash_table = NULL;
  2762. }
  2763. }