compiler.c 205 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. * (C) kub, 2018,2019,2020
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. *
  9. * notes:
  10. * - tcache, block descriptor, block entry buffer overflows result in oldest
  11. * blocks being deleted until enough space is available
  12. * - link and list element buffer overflows result in failure and exit
  13. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  14. * except jumps from global to CPU-local tcaches
  15. *
  16. * implemented:
  17. * - static register allocation
  18. * - remaining register caching and tracking in temporaries
  19. * - block-local branch linking
  20. * - block linking
  21. * - some constant propagation
  22. * - call stack caching for host block entry address
  23. * - delay, poll, and idle loop detection and handling
  24. * - some T/M flag optimizations where the value is known or isn't used
  25. *
  26. * TODO:
  27. * - better constant propagation
  28. * - bug fixing
  29. */
  30. #include <stddef.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <assert.h>
  34. #include "../../pico/pico_int.h"
  35. #include "../../pico/arm_features.h"
  36. #include "sh2.h"
  37. #include "compiler.h"
  38. #include "../drc/cmn.h"
  39. #include "../debug.h"
  40. // features
  41. #define PROPAGATE_CONSTANTS 1
  42. #define LINK_BRANCHES 1
  43. #define BRANCH_CACHE 1
  44. #define CALL_STACK 1
  45. #define ALIAS_REGISTERS 1
  46. #define REMAP_REGISTER 1
  47. #define LOOP_DETECTION 1
  48. #define LOOP_OPTIMIZER 1
  49. #define T_OPTIMIZER 1
  50. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  51. #define MAX_LOCAL_TARGETS (BLOCK_INSN_LIMIT / 4)
  52. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 2)
  53. // debug stuff
  54. // 01 - warnings/errors
  55. // 02 - block info/smc
  56. // 04 - asm
  57. // 08 - runtime block entry log
  58. // 10 - smc self-check
  59. // 20 - runtime block entry counter
  60. // 40 - rcache checking
  61. // 80 - branch cache statistics
  62. // 100 - write trace
  63. // 200 - compare trace
  64. // 400 - block entry backtrace on exit
  65. // 800 - state dump on exit
  66. // {
  67. #ifndef DRC_DEBUG
  68. #define DRC_DEBUG 0//x847
  69. #endif
  70. #if DRC_DEBUG
  71. #define dbg(l,...) { \
  72. if ((l) & DRC_DEBUG) \
  73. elprintf(EL_STATUS, ##__VA_ARGS__); \
  74. }
  75. #include "mame/sh2dasm.h"
  76. #include <platform/libpicofe/linux/host_dasm.h>
  77. static int insns_compiled, hash_collisions, host_insn_count;
  78. #define COUNT_OP \
  79. host_insn_count++
  80. #else // !DRC_DEBUG
  81. #define COUNT_OP
  82. #define dbg(...)
  83. #endif
  84. ///
  85. #define FETCH_OP(pc) \
  86. dr_pc_base[(pc) / 2]
  87. #define FETCH32(a) \
  88. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  89. #define CHECK_UNHANDLED_BITS(mask, label) { \
  90. if ((op & (mask)) != 0) \
  91. goto label; \
  92. }
  93. #define GET_Fx() \
  94. ((op >> 4) & 0x0f)
  95. #define GET_Rm GET_Fx
  96. #define GET_Rn() \
  97. ((op >> 8) & 0x0f)
  98. #define SHR_T 30 // separate T for not-used detection
  99. #define SHR_MEM 31
  100. #define SHR_TMP -1
  101. #define T 0x00000001
  102. #define S 0x00000002
  103. #define I 0x000000f0
  104. #define Q 0x00000100
  105. #define M 0x00000200
  106. #define T_save 0x00000800
  107. #define I_SHIFT 4
  108. #define Q_SHIFT 8
  109. #define M_SHIFT 9
  110. #define T_SHIFT 11
  111. static struct op_data {
  112. u8 op;
  113. u8 cycles;
  114. u8 size; // 0, 1, 2 - byte, word, long
  115. s8 rm; // branch or load/store data reg
  116. u32 source; // bitmask of src regs
  117. u32 dest; // bitmask of dest regs
  118. u32 imm; // immediate/io address/branch target
  119. // (for literal - address, not value)
  120. } ops[BLOCK_INSN_LIMIT];
  121. enum op_types {
  122. OP_UNHANDLED = 0,
  123. OP_BRANCH,
  124. OP_BRANCH_N, // conditional known not to be taken
  125. OP_BRANCH_CT, // conditional, branch if T set
  126. OP_BRANCH_CF, // conditional, branch if T clear
  127. OP_BRANCH_R, // indirect
  128. OP_BRANCH_RF, // indirect far (PC + Rm)
  129. OP_SETCLRT, // T flag set/clear
  130. OP_MOVE, // register move
  131. OP_LOAD_CONST,// load const to register
  132. OP_LOAD_POOL, // literal pool load, imm is address
  133. OP_MOVA, // MOVA instruction
  134. OP_SLEEP, // SLEEP instruction
  135. OP_RTE, // RTE instruction
  136. OP_TRAPA, // TRAPA instruction
  137. OP_LDC, // LDC instruction
  138. OP_UNDEFINED,
  139. };
  140. // XXX consider trap insns: OP_TRAPA, OP_UNDEFINED?
  141. #define OP_ISBRANCH(op) ((BITRANGE(OP_BRANCH, OP_BRANCH_RF)| BITMASK1(OP_RTE)) \
  142. & BITMASK1(op))
  143. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  144. & BITMASK1(op))
  145. #define OP_ISBRACND(op) (BITMASK2(OP_BRANCH_CT, OP_BRANCH_CF) \
  146. & BITMASK1(op))
  147. #define OP_ISBRAIMM(op) (BITMASK3(OP_BRANCH, OP_BRANCH_CT, OP_BRANCH_CF) \
  148. & BITMASK1(op))
  149. #define OP_ISBRAIND(op) (BITMASK3(OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  150. & BITMASK1(op))
  151. #ifdef DRC_SH2
  152. #if (DRC_DEBUG & 4)
  153. static u8 *tcache_dsm_ptrs[3];
  154. static char sh2dasm_buff[64];
  155. #define do_host_disasm(tcid) \
  156. host_dasm(tcache_dsm_ptrs[tcid], emith_insn_ptr() - tcache_dsm_ptrs[tcid]); \
  157. tcache_dsm_ptrs[tcid] = emith_insn_ptr()
  158. #else
  159. #define do_host_disasm(x)
  160. #endif
  161. #define SH2_DUMP(sh2, reason) { \
  162. char ms = (sh2)->is_slave ? 's' : 'm'; \
  163. printf("%csh2 %s %08x\n", ms, reason, (sh2)->pc); \
  164. printf("%csh2 r0-7 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  165. (sh2)->r[0], (sh2)->r[1], (sh2)->r[2], (sh2)->r[3], \
  166. (sh2)->r[4], (sh2)->r[5], (sh2)->r[6], (sh2)->r[7]); \
  167. printf("%csh2 r8-15 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  168. (sh2)->r[8], (sh2)->r[9], (sh2)->r[10], (sh2)->r[11], \
  169. (sh2)->r[12], (sh2)->r[13], (sh2)->r[14], (sh2)->r[15]); \
  170. printf("%csh2 pc-ml %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  171. (sh2)->pc, (sh2)->ppc, (sh2)->pr, (sh2)->sr&0xfff, \
  172. (sh2)->gbr, (sh2)->vbr, (sh2)->mach, (sh2)->macl); \
  173. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  174. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  175. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  176. (sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  177. }
  178. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  179. #if (DRC_DEBUG & (256|512|1024))
  180. static SH2 csh2[2][8];
  181. static FILE *trace[2];
  182. #endif
  183. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  184. {
  185. if (block != NULL) {
  186. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  187. sh2->pc, block, (signed int)sr >> 12);
  188. #if defined PDB
  189. pdb_step(sh2, sh2->pc);
  190. #elif (DRC_DEBUG & 256)
  191. {
  192. int idx = sh2->is_slave;
  193. if (!trace[0]) {
  194. trace[0] = fopen("pico.trace0", "wb");
  195. trace[1] = fopen("pico.trace1", "wb");
  196. }
  197. if (csh2[idx][0].pc != sh2->pc) {
  198. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  199. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  200. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  201. csh2[idx][0].is_slave = idx;
  202. }
  203. }
  204. #elif (DRC_DEBUG & 512)
  205. {
  206. static SH2 fsh2;
  207. int idx = sh2->is_slave;
  208. if (!trace[0]) {
  209. trace[0] = fopen("pico.trace0", "rb");
  210. trace[1] = fopen("pico.trace1", "rb");
  211. }
  212. if (csh2[idx][0].pc != sh2->pc) {
  213. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  214. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  215. printf("trace eof at %08lx\n",ftell(trace[idx]));
  216. exit(1);
  217. }
  218. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  219. fsh2.is_slave = idx;
  220. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  221. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  222. {
  223. printf("difference at %08lx!\n",ftell(trace[idx]));
  224. SH2_DUMP(&fsh2, "file");
  225. SH2_DUMP(sh2, "current");
  226. SH2_DUMP(&csh2[idx][0], "previous");
  227. char *ps = (char *)sh2, *pf = (char *)&fsh2;
  228. for (idx = 0; idx < offsetof(SH2, read8_map); idx += sizeof(u32))
  229. if (*(u32 *)(ps+idx) != *(u32 *)(pf+idx))
  230. printf("diff reg %ld\n",idx/sizeof(u32));
  231. exit(1);
  232. }
  233. csh2[idx][0] = fsh2;
  234. }
  235. }
  236. #elif (DRC_DEBUG & 1024)
  237. {
  238. int x = sh2->is_slave, i;
  239. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  240. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  241. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  242. csh2[x][0].is_slave = x;
  243. }
  244. #endif
  245. }
  246. return block;
  247. }
  248. #endif
  249. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  250. // BIOS shares tcache with data array because it's only used for init
  251. // and can be discarded early
  252. #define TCACHE_BUFFERS 3
  253. struct ring_buffer {
  254. u8 *base; // ring buffer memory
  255. unsigned item_sz; // size of one buffer item
  256. unsigned size; // number of itmes in ring
  257. int first, next; // read and write pointers
  258. int used; // number of used items in ring
  259. };
  260. enum { BL_JMP=1, BL_LDJMP, BL_JCCBLX };
  261. struct block_link {
  262. short tcache_id;
  263. short type; // BL_JMP et al
  264. u32 target_pc;
  265. void *jump; // insn address
  266. void *blx; // block link/exit area if any
  267. u8 jdisp[12]; // jump backup buffer
  268. struct block_link *next; // either in block_entry->links or unresolved
  269. struct block_link *o_next; // ...in block_entry->o_links
  270. struct block_link *prev;
  271. struct block_link *o_prev;
  272. struct block_entry *target;// target block this is linked in (be->links)
  273. };
  274. struct block_entry {
  275. u32 pc;
  276. u8 *tcache_ptr; // translated block for above PC
  277. struct block_entry *next; // chain in hash_table with same pc hash
  278. struct block_entry *prev;
  279. struct block_link *links; // incoming links to this entry
  280. struct block_link *o_links;// outgoing links from this entry
  281. #if (DRC_DEBUG & 2)
  282. struct block_desc *block;
  283. #endif
  284. #if (DRC_DEBUG & 32)
  285. int entry_count;
  286. #endif
  287. };
  288. struct block_desc {
  289. u32 addr; // block start SH2 PC address
  290. u32 addr_lit; // block start SH2 literal pool addr
  291. int size; // ..of recompiled insns
  292. int size_lit; // ..of (insns+)literal pool
  293. u8 *tcache_ptr; // start address of block in cache
  294. u16 crc; // crc of insns and literals
  295. u16 active; // actively used or deactivated?
  296. struct block_list *list;
  297. #if (DRC_DEBUG & 2)
  298. int refcount;
  299. #endif
  300. int entry_count;
  301. struct block_entry *entryp;
  302. };
  303. struct block_list {
  304. struct block_desc *block; // block reference
  305. struct block_list *next; // pointers for doubly linked list
  306. struct block_list *prev;
  307. struct block_list **head; // list head (for removing from list)
  308. struct block_list *l_next;
  309. };
  310. static u8 *tcache_ptr; // ptr for code emitters
  311. // XXX: need to tune sizes
  312. static struct ring_buffer tcache_ring[TCACHE_BUFFERS];
  313. static const int tcache_sizes[TCACHE_BUFFERS] = {
  314. DRC_TCACHE_SIZE * 30 / 32, // ROM (rarely used), DRAM
  315. DRC_TCACHE_SIZE / 32, // BIOS, data array in master sh2
  316. DRC_TCACHE_SIZE / 32, // ... slave
  317. };
  318. #define BLOCK_MAX_COUNT(tcid) ((tcid) ? 256 : 32*256)
  319. static struct ring_buffer block_ring[TCACHE_BUFFERS];
  320. static struct block_desc *block_tables[TCACHE_BUFFERS];
  321. #define ENTRY_MAX_COUNT(tcid) ((tcid) ? 8*512 : 256*512)
  322. static struct ring_buffer entry_ring[TCACHE_BUFFERS];
  323. static struct block_entry *entry_tables[TCACHE_BUFFERS];
  324. // we have block_link_pool to avoid using mallocs
  325. #define BLOCK_LINK_MAX_COUNT(tcid) ((tcid) ? 512 : 32*512)
  326. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  327. static int block_link_pool_counts[TCACHE_BUFFERS];
  328. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  329. static struct block_link *blink_free[TCACHE_BUFFERS];
  330. // used for invalidation
  331. #define RAM_SIZE(tcid) ((tcid) ? 0x1000 : 0x40000)
  332. #define INVAL_PAGE_SIZE 0x100
  333. static struct block_list *inactive_blocks[TCACHE_BUFFERS];
  334. // array of pointers to block_lists for RAM and 2 data arrays
  335. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  336. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  337. #define HASH_TABLE_SIZE(tcid) ((tcid) ? 512 : 64*512)
  338. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  339. #define HASH_FUNC(hash_tab, addr, mask) \
  340. (hash_tab)[((addr) >> 1) & (mask)]
  341. #define BLOCK_LIST_MAX_COUNT (64*1024)
  342. static struct block_list *block_list_pool;
  343. static int block_list_pool_count;
  344. static struct block_list *blist_free;
  345. #if (DRC_DEBUG & 128)
  346. #if BRANCH_CACHE
  347. int bchit, bcmiss;
  348. #endif
  349. #if CALL_STACK
  350. int rchit, rcmiss;
  351. #endif
  352. #endif
  353. // host register tracking
  354. enum cache_reg_htype {
  355. HRT_TEMP = 1, // is for temps and args
  356. HRT_REG = 2, // is for sh2 regs
  357. };
  358. enum cache_reg_flags {
  359. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  360. HRF_PINNED = 1 << 1, // has a pinned mapping
  361. HRF_S16 = 1 << 2, // has a sign extended 16 bit value
  362. HRF_U16 = 1 << 3, // has a zero extended 16 bit value
  363. };
  364. enum cache_reg_type {
  365. HR_FREE,
  366. HR_CACHED, // vreg has sh2_reg_e
  367. HR_TEMP, // reg used for temp storage
  368. };
  369. typedef struct {
  370. u8 hreg:6; // "host" reg
  371. u8 htype:2; // TEMP or REG?
  372. u8 flags:4; // DIRTY, PINNED?
  373. u8 type:2; // CACHED or TEMP?
  374. u8 locked:2; // LOCKED reference counter
  375. u16 stamp; // kind of a timestamp
  376. u32 gregs; // "guest" reg mask
  377. } cache_reg_t;
  378. // guest register tracking
  379. enum guest_reg_flags {
  380. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  381. GRF_CONST = 1 << 1, // reg has a constant
  382. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  383. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  384. GRF_PINNED = 1 << 4, // reg has pinned mapping to vreg
  385. };
  386. typedef struct {
  387. u8 flags; // guest flags: is constant, is dirty?
  388. s8 sreg; // cache reg for static mapping
  389. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  390. s8 cnst; // const index if this is constant
  391. } guest_reg_t;
  392. // possibly needed in code emitter
  393. static int rcache_get_tmp(void);
  394. static void rcache_free_tmp(int hr);
  395. // Note: Register assignment goes by ABI convention. Caller save registers are
  396. // TEMPORARY, callee save registers are PRESERVED. Unusable regs are omitted.
  397. // there must be at least the free (not context or statically mapped) amount of
  398. // PRESERVED/TEMPORARY registers used by handlers in worst case (currently 4).
  399. // there must be at least 3 PARAM, and PARAM+TEMPORARY must be at least 4.
  400. // SR must and R0 should by all means be statically mapped.
  401. // XXX the static definition of SR MUST match that in compiler.h
  402. #ifdef __arm__
  403. #include "../drc/emit_arm.c"
  404. #elif defined(__aarch64__)
  405. #include "../drc/emit_arm64.c"
  406. #elif defined(__mips__)
  407. #include "../drc/emit_mips.c"
  408. #elif defined(__riscv__) || defined(__riscv)
  409. #include "../drc/emit_riscv.c"
  410. #elif defined(__powerpc__)
  411. #include "../drc/emit_ppc.c"
  412. #elif defined(__i386__)
  413. #include "../drc/emit_x86.c"
  414. #elif defined(__x86_64__)
  415. #include "../drc/emit_x86.c"
  416. #else
  417. #error unsupported arch
  418. #endif
  419. static const signed char hregs_param[] = PARAM_REGS;
  420. static const signed char hregs_temp [] = TEMPORARY_REGS;
  421. static const signed char hregs_saved[] = PRESERVED_REGS;
  422. static const signed char regs_static[] = STATIC_SH2_REGS;
  423. #define CACHE_REGS \
  424. (ARRAY_SIZE(hregs_param)+ARRAY_SIZE(hregs_temp)+ARRAY_SIZE(hregs_saved)-1)
  425. static cache_reg_t cache_regs[CACHE_REGS];
  426. static signed char reg_map_host[HOST_REGS];
  427. static guest_reg_t guest_regs[SH2_REGS];
  428. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  429. static void REGPARM(1) (*sh2_drc_dispatcher)(u32 pc);
  430. #if CALL_STACK
  431. static u32 REGPARM(2) (*sh2_drc_dispatcher_call)(u32 pc);
  432. static void REGPARM(1) (*sh2_drc_dispatcher_return)(u32 pc);
  433. #endif
  434. static void REGPARM(1) (*sh2_drc_exit)(u32 pc);
  435. static void (*sh2_drc_test_irq)(void);
  436. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  437. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  438. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  439. static u32 REGPARM(1) (*sh2_drc_read8_poll)(u32 a);
  440. static u32 REGPARM(1) (*sh2_drc_read16_poll)(u32 a);
  441. static u32 REGPARM(1) (*sh2_drc_read32_poll)(u32 a);
  442. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  443. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  444. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  445. #ifdef DRC_SR_REG
  446. void REGPARM(1) (*sh2_drc_save_sr)(SH2 *sh2);
  447. void REGPARM(1) (*sh2_drc_restore_sr)(SH2 *sh2);
  448. #endif
  449. // flags for memory access
  450. #define MF_SIZEMASK 0x03 // size of access
  451. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  452. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  453. #define MF_POLLING 0x20 // include polling check in read
  454. // address space stuff
  455. static int dr_is_rom(u32 a)
  456. {
  457. // tweak for WWF Raw which writes data to some high ROM addresses
  458. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  459. }
  460. static int dr_ctx_get_mem_ptr(SH2 *sh2, u32 a, u32 *mask)
  461. {
  462. void *memptr;
  463. int poffs = -1;
  464. // check if region is mapped memory
  465. memptr = p32x_sh2_get_mem_ptr(a, mask, sh2);
  466. if (memptr == NULL)
  467. return poffs;
  468. if (memptr == sh2->p_bios) // BIOS
  469. poffs = offsetof(SH2, p_bios);
  470. else if (memptr == sh2->p_da) // data array
  471. poffs = offsetof(SH2, p_da);
  472. else if (memptr == sh2->p_sdram) // SDRAM
  473. poffs = offsetof(SH2, p_sdram);
  474. else if (memptr == sh2->p_rom) // ROM
  475. poffs = offsetof(SH2, p_rom);
  476. return poffs;
  477. }
  478. static int dr_get_tcache_id(u32 pc, int is_slave)
  479. {
  480. u32 tcid = 0;
  481. if ((pc & 0xe0000000) == 0xc0000000)
  482. tcid = 1 + is_slave; // data array
  483. if ((pc & ~0xfff) == 0)
  484. tcid = 1 + is_slave; // BIOS
  485. return tcid;
  486. }
  487. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  488. {
  489. struct block_entry *be;
  490. *tcache_id = dr_get_tcache_id(pc, is_slave);
  491. be = HASH_FUNC(hash_tables[*tcache_id], pc, HASH_TABLE_SIZE(*tcache_id) - 1);
  492. if (be != NULL) // don't ask... gcc code generation hint
  493. for (; be != NULL; be = be->next)
  494. if (be->pc == pc)
  495. return be;
  496. return NULL;
  497. }
  498. // ---------------------------------------------------------------
  499. // ring buffer management
  500. #define RING_INIT(r,m,n) *(r) = (struct ring_buffer) { .base = (u8 *)m, \
  501. .item_sz = sizeof(*(m)), .size = n };
  502. static void *ring_alloc(struct ring_buffer *rb, int count)
  503. {
  504. // allocate space in ring buffer
  505. void *p;
  506. p = rb->base + rb->next * rb->item_sz;
  507. if (rb->next+count > rb->size) {
  508. rb->used += rb->size - rb->next;
  509. p = rb->base; // wrap if overflow at end
  510. rb->next = count;
  511. } else {
  512. rb->next += count;
  513. if (rb->next == rb->size) rb->next = 0;
  514. }
  515. rb->used += count;
  516. return p;
  517. }
  518. static void ring_wrap(struct ring_buffer *rb)
  519. {
  520. // insufficient space at end of buffer memory, wrap around
  521. rb->used += rb->size - rb->next;
  522. rb->next = 0;
  523. }
  524. static void ring_free(struct ring_buffer *rb, int count)
  525. {
  526. // free oldest space in ring buffer
  527. rb->first += count;
  528. if (rb->first >= rb->size) rb->first -= rb->size;
  529. rb->used -= count;
  530. }
  531. static void ring_free_p(struct ring_buffer *rb, void *p)
  532. {
  533. // free ring buffer space upto given pointer
  534. rb->first = ((u8 *)p - rb->base) / rb->item_sz;
  535. rb->used = rb->next - rb->first;
  536. if (rb->used < 0) rb->used += rb->size;
  537. }
  538. static void *ring_reset(struct ring_buffer *rb)
  539. {
  540. // reset to initial state
  541. rb->first = rb->next = rb->used = 0;
  542. return rb->base + rb->next * rb->item_sz;
  543. }
  544. static void *ring_first(struct ring_buffer *rb)
  545. {
  546. return rb->base + rb->first * rb->item_sz;
  547. }
  548. static void *ring_next(struct ring_buffer *rb)
  549. {
  550. return rb->base + rb->next * rb->item_sz;
  551. }
  552. // block management
  553. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  554. {
  555. struct block_list *added;
  556. if (blist_free) {
  557. added = blist_free;
  558. blist_free = added->next;
  559. } else if (block_list_pool_count >= BLOCK_LIST_MAX_COUNT) {
  560. printf( "block list overflow\n");
  561. exit(1);
  562. } else {
  563. added = block_list_pool + block_list_pool_count;
  564. block_list_pool_count ++;
  565. }
  566. added->block = block;
  567. added->l_next = block->list;
  568. block->list = added;
  569. added->head = blist;
  570. added->prev = NULL;
  571. if (*blist)
  572. (*blist)->prev = added;
  573. added->next = *blist;
  574. *blist = added;
  575. }
  576. static void rm_from_block_lists(struct block_desc *block)
  577. {
  578. struct block_list *entry;
  579. entry = block->list;
  580. while (entry != NULL) {
  581. if (entry->prev != NULL)
  582. entry->prev->next = entry->next;
  583. else
  584. *(entry->head) = entry->next;
  585. if (entry->next != NULL)
  586. entry->next->prev = entry->prev;
  587. entry->next = blist_free;
  588. blist_free = entry;
  589. entry = entry->l_next;
  590. }
  591. block->list = NULL;
  592. }
  593. static void discard_block_list(struct block_list **blist)
  594. {
  595. struct block_list *next, *current = *blist;
  596. while (current != NULL) {
  597. next = current->next;
  598. current->next = blist_free;
  599. blist_free = current;
  600. current = next;
  601. }
  602. *blist = NULL;
  603. }
  604. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  605. {
  606. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  607. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  608. be->prev = NULL;
  609. if (*head)
  610. (*head)->prev = be;
  611. be->next = *head;
  612. *head = be;
  613. #if (DRC_DEBUG & 2)
  614. if (be->next != NULL) {
  615. printf(" %08x@%p: entry hash collision with %08x@%p\n",
  616. be->pc, be->tcache_ptr, be->next->pc, be->next->tcache_ptr);
  617. hash_collisions++;
  618. }
  619. #endif
  620. }
  621. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  622. {
  623. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  624. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  625. #if DRC_DEBUG & 1
  626. struct block_entry *current = be;
  627. while (current->prev != NULL)
  628. current = current->prev;
  629. if (current != *head)
  630. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  631. #endif
  632. if (be->prev != NULL)
  633. be->prev->next = be->next;
  634. else
  635. *head = be->next;
  636. if (be->next != NULL)
  637. be->next->prev = be->prev;
  638. }
  639. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  640. {
  641. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  642. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  643. #if DRC_DEBUG & 1
  644. struct block_link *current = *head;
  645. while (current != NULL && current != bl)
  646. current = current->next;
  647. if (current == bl)
  648. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  649. #endif
  650. bl->target = NULL; // marker for not resolved
  651. bl->prev = NULL;
  652. if (*head)
  653. (*head)->prev = bl;
  654. bl->next = *head;
  655. *head = bl;
  656. }
  657. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  658. {
  659. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  660. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  661. #if DRC_DEBUG & 1
  662. struct block_link *current = bl;
  663. while (current->prev != NULL)
  664. current = current->prev;
  665. if (current != *head)
  666. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  667. #endif
  668. if (bl->prev != NULL)
  669. bl->prev->next = bl->next;
  670. else
  671. *head = bl->next;
  672. if (bl->next != NULL)
  673. bl->next->prev = bl->prev;
  674. }
  675. #if LINK_BRANCHES
  676. static void dr_block_link(struct block_entry *be, struct block_link *bl, int emit_jump)
  677. {
  678. dbg(2, "- %slink from %p to pc %08x entry %p", emit_jump ? "":"early ",
  679. bl->jump, bl->target_pc, be->tcache_ptr);
  680. if (emit_jump) {
  681. u8 *jump = bl->jump;
  682. int jsz = emith_jump_patch_size();
  683. if (bl->type == BL_JMP) { // patch: jump @entry
  684. // inlined: @jump far jump to target
  685. emith_jump_patch(jump, be->tcache_ptr, &jump);
  686. } else if (bl->type == BL_LDJMP) { // write: jump @entry
  687. // inlined: @jump far jump to target
  688. emith_jump_at(jump, be->tcache_ptr);
  689. jsz = emith_jump_at_size();
  690. } else if (bl->type == BL_JCCBLX) { // patch: jump cond -> jump @entry
  691. if (emith_jump_patch_inrange(bl->jump, be->tcache_ptr)) {
  692. // inlined: @jump near jumpcc to target
  693. emith_jump_patch(jump, be->tcache_ptr, &jump);
  694. } else { // dispatcher cond immediate
  695. // via blx: @jump near jumpcc to blx; @blx far jump
  696. emith_jump_patch(jump, bl->blx, &jump);
  697. emith_jump_at(bl->blx, be->tcache_ptr);
  698. if ((((uintptr_t)bl->blx & 0x1f) + emith_jump_at_size()-1) > 0x1f)
  699. host_instructions_updated(bl->blx, bl->blx + emith_jump_at_size()-1);
  700. }
  701. } else {
  702. printf("unknown BL type %d\n", bl->type);
  703. exit(1);
  704. }
  705. // only needs sync if patch is possibly crossing cacheline (assume 32 byte)
  706. if ((((uintptr_t)jump & 0x1f) + jsz-1) > 0x1f)
  707. host_instructions_updated(jump, jump + jsz-1);
  708. }
  709. // move bl to block_entry
  710. bl->target = be;
  711. bl->prev = NULL;
  712. if (be->links)
  713. be->links->prev = bl;
  714. bl->next = be->links;
  715. be->links = bl;
  716. }
  717. static void dr_block_unlink(struct block_link *bl, int emit_jump)
  718. {
  719. dbg(2,"- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  720. if (bl->target) {
  721. if (emit_jump) {
  722. u8 *jump = bl->jump;
  723. int jsz = emith_jump_patch_size();
  724. if (bl->type == BL_JMP) { // jump_patch @dispatcher
  725. // inlined: @jump far jump to dispatcher
  726. emith_jump_patch(jump, sh2_drc_dispatcher, &jump);
  727. } else if (bl->type == BL_LDJMP) { // restore: load pc, jump @dispatcher
  728. // inlined: @jump load target_pc, far jump to dispatcher
  729. memcpy(jump, bl->jdisp, emith_jump_at_size());
  730. jsz = emith_jump_at_size();
  731. } else if (bl->type == BL_JCCBLX) { // jump cond @blx; @blx: load pc, jump
  732. // via blx: @jump near jumpcc to blx; @blx load target_pc, far jump
  733. emith_jump_patch(bl->jump, bl->blx, &jump);
  734. memcpy(bl->blx, bl->jdisp, emith_jump_at_size());
  735. host_instructions_updated(bl->blx, bl->blx + emith_jump_at_size()-1);
  736. } else {
  737. printf("unknown BL type %d\n", bl->type);
  738. exit(1);
  739. }
  740. // update cpu caches since the previous jump target doesn't exist anymore
  741. host_instructions_updated(jump, jump + jsz-1);
  742. }
  743. if (bl->prev)
  744. bl->prev->next = bl->next;
  745. else
  746. bl->target->links = bl->next;
  747. if (bl->next)
  748. bl->next->prev = bl->prev;
  749. bl->target = NULL;
  750. }
  751. }
  752. #endif
  753. static struct block_link *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  754. {
  755. #if LINK_BRANCHES
  756. struct block_link *bl = block_link_pool[tcache_id];
  757. int cnt = block_link_pool_counts[tcache_id];
  758. int target_tcache_id;
  759. // get the target block entry
  760. target_tcache_id = dr_get_tcache_id(pc, is_slave);
  761. if (target_tcache_id && target_tcache_id != tcache_id)
  762. return NULL;
  763. // get a block link
  764. if (blink_free[tcache_id] != NULL) {
  765. bl = blink_free[tcache_id];
  766. blink_free[tcache_id] = bl->next;
  767. } else if (cnt >= BLOCK_LINK_MAX_COUNT(tcache_id)) {
  768. dbg(1, "bl overflow for tcache %d", tcache_id);
  769. return NULL;
  770. } else {
  771. bl += cnt;
  772. block_link_pool_counts[tcache_id] = cnt+1;
  773. }
  774. // prepare link and add to outgoing list of owner
  775. bl->tcache_id = tcache_id;
  776. bl->target_pc = pc;
  777. bl->jump = tcache_ptr;
  778. bl->blx = NULL;
  779. bl->o_next = owner->o_links;
  780. owner->o_links = bl;
  781. add_to_hashlist_unresolved(bl, tcache_id);
  782. return bl;
  783. #else
  784. return NULL;
  785. #endif
  786. }
  787. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  788. {
  789. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  790. u32 addr, end, mask = 0, shift = 0, idx;
  791. // mark memory blocks as containing compiled code
  792. if ((block->addr & 0xc7fc0000) == 0x06000000
  793. || (block->addr & 0xfffff000) == 0xc0000000)
  794. {
  795. if (tcache_id != 0) {
  796. // data array
  797. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  798. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  799. shift = SH2_DRCBLK_DA_SHIFT;
  800. }
  801. else {
  802. // SDRAM
  803. drc_ram_blk = Pico32xMem->drcblk_ram;
  804. lit_ram_blk = Pico32xMem->drclit_ram;
  805. shift = SH2_DRCBLK_RAM_SHIFT;
  806. }
  807. mask = RAM_SIZE(tcache_id) - 1;
  808. // mark recompiled insns
  809. addr = block->addr & ~((1 << shift) - 1);
  810. end = block->addr + block->size;
  811. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  812. drc_ram_blk[idx++] += mark;
  813. // mark literal pool
  814. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  815. addr = block->addr_lit & ~((1 << shift) - 1);
  816. end = block->addr_lit + block->size_lit;
  817. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  818. drc_ram_blk[idx++] += mark;
  819. // mark for literals disabled
  820. if (nolit) {
  821. addr = nolit & ~((1 << shift) - 1);
  822. end = block->addr_lit + block->size_lit;
  823. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  824. lit_ram_blk[idx++] = 1;
  825. }
  826. if (mark < 0)
  827. rm_from_block_lists(block);
  828. else {
  829. // add to invalidation lookup lists
  830. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  831. end = block->addr + block->size;
  832. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  833. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  834. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  835. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  836. end = block->addr_lit + block->size_lit;
  837. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  838. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  839. }
  840. }
  841. }
  842. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  843. {
  844. u8 *lit_ram_blk = NULL;
  845. u32 mask = 0, shift = 0, addr, idx;
  846. if ((start & 0xc7fc0000) == 0x06000000
  847. || (start & 0xfffff000) == 0xc0000000)
  848. {
  849. if (tcache_id != 0) {
  850. // data array
  851. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  852. shift = SH2_DRCBLK_DA_SHIFT;
  853. }
  854. else {
  855. // SDRAM
  856. lit_ram_blk = Pico32xMem->drclit_ram;
  857. shift = SH2_DRCBLK_RAM_SHIFT;
  858. }
  859. mask = RAM_SIZE(tcache_id) - 1;
  860. addr = start & ~((1 << shift) - 1);
  861. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  862. if (lit_ram_blk[idx++])
  863. break;
  864. return (addr < start ? start : addr > end ? end : addr);
  865. }
  866. return end;
  867. }
  868. static void dr_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free)
  869. {
  870. struct block_link *bl;
  871. u32 i;
  872. free = free || nolit; // block is invalid if literals are overwritten
  873. dbg(2," %sing block %08x-%08x,%08x-%08x, blkid %d,%d", free?"delet":"disabl",
  874. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  875. tcache_id, bd - block_tables[tcache_id]);
  876. if (bd->addr == 0 || bd->entry_count == 0) {
  877. dbg(1, " killing dead block!? %08x", bd->addr);
  878. return;
  879. }
  880. #if LINK_BRANCHES
  881. // remove from hash table, make incoming links unresolved
  882. if (bd->active) {
  883. for (i = 0; i < bd->entry_count; i++) {
  884. rm_from_hashlist(&bd->entryp[i], tcache_id);
  885. while ((bl = bd->entryp[i].links) != NULL) {
  886. dr_block_unlink(bl, 1);
  887. add_to_hashlist_unresolved(bl, tcache_id);
  888. }
  889. }
  890. dr_mark_memory(-1, bd, tcache_id, nolit);
  891. add_to_block_list(&inactive_blocks[tcache_id], bd);
  892. }
  893. bd->active = 0;
  894. #endif
  895. if (free) {
  896. #if LINK_BRANCHES
  897. // revoke outgoing links
  898. for (bl = bd->entryp[0].o_links; bl != NULL; bl = bl->o_next) {
  899. if (bl->target)
  900. dr_block_unlink(bl, 0);
  901. else
  902. rm_from_hashlist_unresolved(bl, tcache_id);
  903. bl->jump = NULL;
  904. bl->next = blink_free[bl->tcache_id];
  905. blink_free[bl->tcache_id] = bl;
  906. }
  907. bd->entryp[0].o_links = NULL;
  908. #endif
  909. // invalidate block
  910. rm_from_block_lists(bd);
  911. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  912. bd->entry_count = 0;
  913. bd->entryp = NULL;
  914. }
  915. emith_update_cache();
  916. }
  917. static struct block_desc *dr_find_inactive_block(int tcache_id, u16 crc,
  918. u32 addr, int size, u32 addr_lit, int size_lit)
  919. {
  920. struct block_list **head = &inactive_blocks[tcache_id];
  921. struct block_list *current;
  922. for (current = *head; current != NULL; current = current->next) {
  923. struct block_desc *block = current->block;
  924. if (block->crc == crc && block->addr == addr && block->size == size &&
  925. block->addr_lit == addr_lit && block->size_lit == size_lit)
  926. {
  927. rm_from_block_lists(block);
  928. return block;
  929. }
  930. }
  931. return NULL;
  932. }
  933. static struct block_desc *dr_add_block(int entries, u32 addr, int size,
  934. u32 addr_lit, int size_lit, u16 crc, int is_slave, int *blk_id)
  935. {
  936. struct block_entry *be;
  937. struct block_desc *bd;
  938. int tcache_id;
  939. // do a lookup to get tcache_id and override check
  940. be = dr_get_entry(addr, is_slave, &tcache_id);
  941. if (be != NULL)
  942. dbg(1, "block override for %08x", addr);
  943. if (block_ring[tcache_id].used + 1 > block_ring[tcache_id].size ||
  944. entry_ring[tcache_id].used + entries > entry_ring[tcache_id].size) {
  945. dbg(1, "bd overflow for tcache %d", tcache_id);
  946. return NULL;
  947. }
  948. *blk_id = block_ring[tcache_id].next;
  949. bd = ring_alloc(&block_ring[tcache_id], 1);
  950. bd->entryp = ring_alloc(&entry_ring[tcache_id], entries);
  951. bd->addr = addr;
  952. bd->size = size;
  953. bd->addr_lit = addr_lit;
  954. bd->size_lit = size_lit;
  955. bd->tcache_ptr = tcache_ptr;
  956. bd->crc = crc;
  957. bd->active = 0;
  958. bd->list = NULL;
  959. bd->entry_count = 0;
  960. #if (DRC_DEBUG & 2)
  961. bd->refcount = 0;
  962. #endif
  963. return bd;
  964. }
  965. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  966. {
  967. #if LINK_BRANCHES
  968. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  969. u32 pc = be->pc;
  970. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  971. struct block_link *bl = *head, *next;
  972. while (bl != NULL) {
  973. next = bl->next;
  974. if (bl->target_pc == pc && (!bl->tcache_id || bl->tcache_id == tcache_id)) {
  975. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  976. dr_block_link(be, bl, 1);
  977. }
  978. bl = next;
  979. }
  980. #endif
  981. }
  982. static void dr_link_outgoing(struct block_entry *be, int tcache_id, int is_slave)
  983. {
  984. #if LINK_BRANCHES
  985. struct block_link *bl;
  986. int target_tcache_id;
  987. for (bl = be->o_links; bl; bl = bl->o_next) {
  988. if (bl->target == NULL) {
  989. be = dr_get_entry(bl->target_pc, is_slave, &target_tcache_id);
  990. if (be != NULL && (!target_tcache_id || target_tcache_id == tcache_id)) {
  991. // remove bl from unresolved_links (must've been since target was NULL)
  992. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  993. dr_block_link(be, bl, 1);
  994. }
  995. }
  996. }
  997. #endif
  998. }
  999. static void dr_activate_block(struct block_desc *bd, int tcache_id, int is_slave)
  1000. {
  1001. int i;
  1002. // connect branches
  1003. for (i = 0; i < bd->entry_count; i++) {
  1004. struct block_entry *entry = &bd->entryp[i];
  1005. add_to_hashlist(entry, tcache_id);
  1006. // incoming branches
  1007. dr_link_blocks(entry, tcache_id);
  1008. if (!tcache_id)
  1009. dr_link_blocks(entry, is_slave?2:1);
  1010. // outgoing branches
  1011. dr_link_outgoing(entry, tcache_id, is_slave);
  1012. }
  1013. // mark memory for overwrite detection
  1014. dr_mark_memory(1, bd, tcache_id, 0);
  1015. bd->active = 1;
  1016. }
  1017. static void REGPARM(3) *dr_lookup_block(u32 pc, SH2 *sh2, int *tcache_id)
  1018. {
  1019. struct block_entry *be = NULL;
  1020. void *block = NULL;
  1021. be = dr_get_entry(pc, sh2->is_slave, tcache_id);
  1022. if (be != NULL)
  1023. block = be->tcache_ptr;
  1024. #if (DRC_DEBUG & 2)
  1025. if (be != NULL)
  1026. be->block->refcount++;
  1027. #endif
  1028. return block;
  1029. }
  1030. static void dr_free_oldest_block(int tcache_id)
  1031. {
  1032. struct block_desc *bf;
  1033. bf = ring_first(&block_ring[tcache_id]);
  1034. if (bf->addr && bf->entry_count)
  1035. dr_rm_block_entry(bf, tcache_id, 0, 1);
  1036. ring_free(&block_ring[tcache_id], 1);
  1037. if (block_ring[tcache_id].used) {
  1038. bf = ring_first(&block_ring[tcache_id]);
  1039. ring_free_p(&entry_ring[tcache_id], bf->entryp);
  1040. ring_free_p(&tcache_ring[tcache_id], bf->tcache_ptr);
  1041. } else {
  1042. // reset since size of code block isn't known if no successor block exists
  1043. ring_reset(&block_ring[tcache_id]);
  1044. ring_reset(&entry_ring[tcache_id]);
  1045. ring_reset(&tcache_ring[tcache_id]);
  1046. }
  1047. }
  1048. static inline void dr_reserve_cache(int tcache_id, struct ring_buffer *rb, int count)
  1049. {
  1050. // while not enough space available
  1051. if (rb->next + count >= rb->size){
  1052. // not enough space in rest of buffer -> wrap around
  1053. while (rb->first >= rb->next && rb->used)
  1054. dr_free_oldest_block(tcache_id);
  1055. if (rb->first == 0 && rb->used)
  1056. dr_free_oldest_block(tcache_id);
  1057. ring_wrap(rb);
  1058. }
  1059. while (rb->first >= rb->next && rb->next + count > rb->first && rb->used)
  1060. dr_free_oldest_block(tcache_id);
  1061. }
  1062. static u8 *dr_prepare_cache(int tcache_id, int insn_count, int entry_count)
  1063. {
  1064. int bf = block_ring[tcache_id].first;
  1065. // reserve one block desc
  1066. if (block_ring[tcache_id].used >= block_ring[tcache_id].size)
  1067. dr_free_oldest_block(tcache_id);
  1068. // reserve block entries
  1069. dr_reserve_cache(tcache_id, &entry_ring[tcache_id], entry_count);
  1070. // reserve cache space
  1071. dr_reserve_cache(tcache_id, &tcache_ring[tcache_id], insn_count*128);
  1072. if (bf != block_ring[tcache_id].first) {
  1073. // deleted some block(s), clear branch cache and return stack
  1074. #if BRANCH_CACHE
  1075. if (tcache_id)
  1076. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1077. else {
  1078. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1079. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  1080. }
  1081. #endif
  1082. #if CALL_STACK
  1083. if (tcache_id) {
  1084. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1085. sh2s[tcache_id-1].rts_cache_idx = 0;
  1086. } else {
  1087. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1088. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  1089. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1090. }
  1091. #endif
  1092. }
  1093. return ring_next(&tcache_ring[tcache_id]);
  1094. }
  1095. static void dr_flush_tcache(int tcid)
  1096. {
  1097. int i;
  1098. #if (DRC_DEBUG & 1)
  1099. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d bes %d/%d)", tcid,
  1100. tcache_ring[tcid].used, tcache_ring[tcid].size, block_ring[tcid].used,
  1101. block_ring[tcid].size, entry_ring[tcid].used, entry_ring[tcid].size);
  1102. #endif
  1103. ring_reset(&tcache_ring[tcid]);
  1104. ring_reset(&block_ring[tcid]);
  1105. ring_reset(&entry_ring[tcid]);
  1106. block_link_pool_counts[tcid] = 0;
  1107. blink_free[tcid] = NULL;
  1108. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * HASH_TABLE_SIZE(tcid));
  1109. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * HASH_TABLE_SIZE(tcid));
  1110. if (tcid == 0) { // ROM, RAM
  1111. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1112. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1113. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1114. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  1115. memset(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1116. memset(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache));
  1117. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1118. } else {
  1119. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1120. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1121. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  1122. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  1123. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1124. memset(sh2s[tcid - 1].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1125. sh2s[tcid - 1].rts_cache_idx = 0;
  1126. }
  1127. #if (DRC_DEBUG & 4)
  1128. tcache_dsm_ptrs[tcid] = tcache_ring[tcid].base;
  1129. #endif
  1130. for (i = 0; i < RAM_SIZE(tcid) / INVAL_PAGE_SIZE; i++)
  1131. discard_block_list(&inval_lookup[tcid][i]);
  1132. discard_block_list(&inactive_blocks[tcid]);
  1133. }
  1134. static void *dr_failure(void)
  1135. {
  1136. printf("recompilation failed\n");
  1137. exit(1);
  1138. }
  1139. // ---------------------------------------------------------------
  1140. // NB rcache allocation dependencies:
  1141. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  1142. // - get_reg(..., NULL) before get_reg(..., &hr) if it might get the same reg
  1143. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  1144. // register cache / constant propagation stuff
  1145. typedef enum {
  1146. RC_GR_READ,
  1147. RC_GR_WRITE,
  1148. RC_GR_RMW,
  1149. } rc_gr_mode;
  1150. typedef struct {
  1151. u32 gregs;
  1152. u32 val;
  1153. } gconst_t;
  1154. gconst_t gconsts[ARRAY_SIZE(guest_regs)];
  1155. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  1156. static inline int rcache_is_cached(sh2_reg_e r);
  1157. static void rcache_add_vreg_alias(int x, sh2_reg_e r);
  1158. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  1159. static void rcache_evict_vreg(int x);
  1160. static void rcache_remap_vreg(int x);
  1161. static void rcache_set_x16(int hr, int s16_, int u16_)
  1162. {
  1163. int x = reg_map_host[hr];
  1164. if (x >= 0) {
  1165. cache_regs[x].flags &= ~(HRF_S16|HRF_U16);
  1166. if (s16_) cache_regs[x].flags |= HRF_S16;
  1167. if (u16_) cache_regs[x].flags |= HRF_U16;
  1168. }
  1169. }
  1170. static void rcache_copy_x16(int hr, int hr2)
  1171. {
  1172. int x = reg_map_host[hr], y = reg_map_host[hr2];
  1173. if (x >= 0 && y >= 0) {
  1174. cache_regs[x].flags = (cache_regs[x].flags & ~(HRF_S16|HRF_U16)) |
  1175. (cache_regs[y].flags & (HRF_S16|HRF_U16));
  1176. }
  1177. }
  1178. static int rcache_is_s16(int hr)
  1179. {
  1180. int x = reg_map_host[hr];
  1181. return (x >= 0 ? cache_regs[x].flags & HRF_S16 : 0);
  1182. }
  1183. static int rcache_is_u16(int hr)
  1184. {
  1185. int x = reg_map_host[hr];
  1186. return (x >= 0 ? cache_regs[x].flags & HRF_U16 : 0);
  1187. }
  1188. #define RCACHE_DUMP(msg) { \
  1189. cache_reg_t *cp; \
  1190. guest_reg_t *gp; \
  1191. int i; \
  1192. printf("cache dump %s:\n",msg); \
  1193. printf(" cache_regs:\n"); \
  1194. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1195. cp = &cache_regs[i]; \
  1196. if (cp->type != HR_FREE || cp->gregs || cp->locked || cp->flags) \
  1197. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1198. } \
  1199. printf(" guest_regs:\n"); \
  1200. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1201. gp = &guest_regs[i]; \
  1202. if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
  1203. printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
  1204. } \
  1205. printf(" gconsts:\n"); \
  1206. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1207. if (gconsts[i].gregs) \
  1208. printf(" %d: m=%x v=%x\n", i, gconsts[i].gregs, gconsts[i].val); \
  1209. } \
  1210. }
  1211. #define RCACHE_CHECK(msg) { \
  1212. cache_reg_t *cp; \
  1213. guest_reg_t *gp; \
  1214. int i, x, m = 0, d = 0; \
  1215. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1216. cp = &cache_regs[i]; \
  1217. if (cp->flags & HRF_PINNED) m |= (1 << i); \
  1218. if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
  1219. /* check connectivity greg->vreg */ \
  1220. FOR_ALL_BITS_SET_DO(cp->gregs, x, \
  1221. if (guest_regs[x].vreg != i) \
  1222. { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
  1223. ) \
  1224. } \
  1225. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1226. gp = &guest_regs[i]; \
  1227. if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
  1228. { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
  1229. if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_CACHED) \
  1230. { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
  1231. if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
  1232. { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
  1233. if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)))\
  1234. { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
  1235. if (gp->flags & (GRF_STATIC|GRF_PINNED)) { \
  1236. if (gp->sreg == -1 || !(cache_regs[gp->sreg].flags & HRF_PINNED))\
  1237. { d = 1; printf("cache check r=%d v=%d not pinned?\n", i, gp->vreg); } \
  1238. else m &= ~(1 << gp->sreg); \
  1239. } \
  1240. } \
  1241. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1242. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
  1243. if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
  1244. { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
  1245. ) \
  1246. } \
  1247. if (m) \
  1248. { d = 1; printf("cache check m=%x pinning wrong?\n",m); } \
  1249. if (d) RCACHE_DUMP(msg) \
  1250. /* else { \
  1251. printf("locked regs %s:\n",msg); \
  1252. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1253. cp = &cache_regs[i]; \
  1254. if (cp->locked) \
  1255. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1256. } \
  1257. } */ \
  1258. }
  1259. #if PROPAGATE_CONSTANTS
  1260. static inline int gconst_alloc(sh2_reg_e r)
  1261. {
  1262. int i, n = -1;
  1263. for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
  1264. gconsts[i].gregs &= ~(1 << r);
  1265. if (gconsts[i].gregs == 0 && n < 0)
  1266. n = i;
  1267. }
  1268. if (n >= 0)
  1269. gconsts[n].gregs = (1 << r);
  1270. else {
  1271. printf("all gconst buffers in use, aborting\n");
  1272. exit(1); // cannot happen - more constants than guest regs?
  1273. }
  1274. return n;
  1275. }
  1276. static void gconst_set(sh2_reg_e r, u32 val)
  1277. {
  1278. int i = gconst_alloc(r);
  1279. guest_regs[r].flags |= GRF_CONST;
  1280. guest_regs[r].cnst = i;
  1281. gconsts[i].val = val;
  1282. }
  1283. static void gconst_new(sh2_reg_e r, u32 val)
  1284. {
  1285. gconst_set(r, val);
  1286. guest_regs[r].flags |= GRF_CDIRTY;
  1287. // throw away old r that we might have cached
  1288. if (guest_regs[r].vreg >= 0)
  1289. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1290. }
  1291. #endif
  1292. static int gconst_get(sh2_reg_e r, u32 *val)
  1293. {
  1294. if (guest_regs[r].flags & GRF_CONST) {
  1295. *val = gconsts[guest_regs[r].cnst].val;
  1296. return 1;
  1297. }
  1298. *val = 0;
  1299. return 0;
  1300. }
  1301. static int gconst_check(sh2_reg_e r)
  1302. {
  1303. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1304. return 1;
  1305. return 0;
  1306. }
  1307. // update hr if dirty, else do nothing
  1308. static int gconst_try_read(int vreg, sh2_reg_e r)
  1309. {
  1310. int i, x;
  1311. u32 v;
  1312. if (guest_regs[r].flags & GRF_CDIRTY) {
  1313. x = guest_regs[r].cnst;
  1314. v = gconsts[x].val;
  1315. emith_move_r_imm(cache_regs[vreg].hreg, v);
  1316. rcache_set_x16(cache_regs[vreg].hreg, v == (s16)v, v == (u16)v);
  1317. FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
  1318. {
  1319. if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
  1320. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  1321. if (guest_regs[i].vreg < 0)
  1322. rcache_add_vreg_alias(vreg, i);
  1323. guest_regs[i].flags &= ~GRF_CDIRTY;
  1324. guest_regs[i].flags |= GRF_DIRTY;
  1325. });
  1326. cache_regs[vreg].type = HR_CACHED;
  1327. cache_regs[vreg].flags |= HRF_DIRTY;
  1328. return 1;
  1329. }
  1330. return 0;
  1331. }
  1332. static u32 gconst_dirty_mask(void)
  1333. {
  1334. u32 mask = 0;
  1335. int i;
  1336. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1337. if (guest_regs[i].flags & GRF_CDIRTY)
  1338. mask |= (1 << i);
  1339. return mask;
  1340. }
  1341. static void gconst_kill(sh2_reg_e r)
  1342. {
  1343. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1344. gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
  1345. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1346. }
  1347. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1348. {
  1349. gconst_kill(rd);
  1350. if (guest_regs[rs].flags & GRF_CONST) {
  1351. guest_regs[rd].flags |= GRF_CONST;
  1352. if (guest_regs[rd].vreg < 0)
  1353. guest_regs[rd].flags |= GRF_CDIRTY;
  1354. guest_regs[rd].cnst = guest_regs[rs].cnst;
  1355. gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
  1356. }
  1357. }
  1358. static void gconst_clean(void)
  1359. {
  1360. int i;
  1361. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1362. if (guest_regs[i].flags & GRF_CDIRTY) {
  1363. // using RC_GR_READ here: it will call gconst_try_read,
  1364. // cache the reg and mark it dirty.
  1365. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1366. }
  1367. }
  1368. static void gconst_invalidate(void)
  1369. {
  1370. int i;
  1371. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1372. if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
  1373. gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
  1374. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1375. }
  1376. }
  1377. static u16 rcache_counter;
  1378. // SH2 register usage bitmasks
  1379. static u32 rcache_vregs_reg; // regs of type HRT_REG (for pinning)
  1380. static u32 rcache_regs_static; // statically allocated regs
  1381. static u32 rcache_regs_pinned; // pinned regs
  1382. static u32 rcache_regs_now; // regs used in current insn
  1383. static u32 rcache_regs_soon; // regs used in the next few insns
  1384. static u32 rcache_regs_late; // regs used in later insns
  1385. static u32 rcache_regs_discard; // regs overwritten without being used
  1386. static u32 rcache_regs_clean; // regs needing cleaning
  1387. static void rcache_lock_vreg(int x)
  1388. {
  1389. if (x >= 0) {
  1390. cache_regs[x].locked ++;
  1391. #if DRC_DEBUG & 64
  1392. if (cache_regs[x].type == HR_FREE) {
  1393. printf("locking free vreg %x, aborting\n", x);
  1394. exit(1);
  1395. }
  1396. if (!cache_regs[x].locked) {
  1397. printf("locking overflow vreg %x, aborting\n", x);
  1398. exit(1);
  1399. }
  1400. #endif
  1401. }
  1402. }
  1403. static void rcache_unlock_vreg(int x)
  1404. {
  1405. if (x >= 0) {
  1406. #if DRC_DEBUG & 64
  1407. if (cache_regs[x].type == HR_FREE) {
  1408. printf("unlocking free vreg %x, aborting\n", x);
  1409. exit(1);
  1410. }
  1411. #endif
  1412. if (cache_regs[x].locked)
  1413. cache_regs[x].locked --;
  1414. }
  1415. }
  1416. static void rcache_free_vreg(int x)
  1417. {
  1418. cache_regs[x].type = cache_regs[x].locked ? HR_TEMP : HR_FREE;
  1419. cache_regs[x].flags &= HRF_PINNED;
  1420. cache_regs[x].gregs = 0;
  1421. }
  1422. static void rcache_unmap_vreg(int x)
  1423. {
  1424. int i;
  1425. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1426. if (guest_regs[i].flags & GRF_DIRTY) {
  1427. // if a dirty reg is unmapped save its value to context
  1428. if ((~rcache_regs_discard | rcache_regs_now) & (1 << i))
  1429. emith_ctx_write(cache_regs[x].hreg, i * 4);
  1430. guest_regs[i].flags &= ~GRF_DIRTY;
  1431. }
  1432. guest_regs[i].vreg = -1);
  1433. rcache_free_vreg(x);
  1434. }
  1435. static void rcache_move_vreg(int d, int x)
  1436. {
  1437. int i;
  1438. cache_regs[d].type = HR_CACHED;
  1439. cache_regs[d].gregs = cache_regs[x].gregs;
  1440. cache_regs[d].flags &= HRF_PINNED;
  1441. cache_regs[d].flags |= cache_regs[x].flags & ~HRF_PINNED;
  1442. cache_regs[d].locked = 0;
  1443. cache_regs[d].stamp = cache_regs[x].stamp;
  1444. emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
  1445. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1446. if (guest_regs[i].vreg == x)
  1447. guest_regs[i].vreg = d;
  1448. rcache_free_vreg(x);
  1449. }
  1450. static void rcache_clean_vreg(int x)
  1451. {
  1452. u32 rns = rcache_regs_now | rcache_regs_soon;
  1453. int r;
  1454. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1455. cache_regs[x].flags &= ~HRF_DIRTY;
  1456. rcache_lock_vreg(x);
  1457. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1458. if (guest_regs[r].flags & GRF_DIRTY) {
  1459. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) {
  1460. if (guest_regs[r].vreg != guest_regs[r].sreg &&
  1461. !cache_regs[guest_regs[r].sreg].locked &&
  1462. ((~rcache_regs_discard | rcache_regs_now) & (1 << r)) &&
  1463. !(rns & cache_regs[guest_regs[r].sreg].gregs)) {
  1464. // statically mapped reg not in its sreg. move back to sreg
  1465. rcache_evict_vreg(guest_regs[r].sreg);
  1466. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
  1467. cache_regs[guest_regs[r].vreg].hreg);
  1468. rcache_copy_x16(cache_regs[guest_regs[r].sreg].hreg,
  1469. cache_regs[guest_regs[r].vreg].hreg);
  1470. rcache_remove_vreg_alias(x, r);
  1471. rcache_add_vreg_alias(guest_regs[r].sreg, r);
  1472. cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
  1473. } else
  1474. // cannot remap. keep dirty for writeback in unmap
  1475. cache_regs[x].flags |= HRF_DIRTY;
  1476. } else {
  1477. if ((~rcache_regs_discard | rcache_regs_now) & (1 << r))
  1478. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1479. guest_regs[r].flags &= ~GRF_DIRTY;
  1480. }
  1481. rcache_regs_clean &= ~(1 << r);
  1482. })
  1483. rcache_unlock_vreg(x);
  1484. }
  1485. #if DRC_DEBUG & 64
  1486. RCACHE_CHECK("after clean");
  1487. #endif
  1488. }
  1489. static void rcache_add_vreg_alias(int x, sh2_reg_e r)
  1490. {
  1491. cache_regs[x].gregs |= (1 << r);
  1492. guest_regs[r].vreg = x;
  1493. cache_regs[x].type = HR_CACHED;
  1494. }
  1495. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1496. {
  1497. cache_regs[x].gregs &= ~(1 << r);
  1498. if (!cache_regs[x].gregs) {
  1499. // no reg mapped -> free vreg
  1500. if (cache_regs[x].locked)
  1501. cache_regs[x].type = HR_TEMP;
  1502. else
  1503. rcache_free_vreg(x);
  1504. }
  1505. guest_regs[r].vreg = -1;
  1506. }
  1507. static void rcache_evict_vreg(int x)
  1508. {
  1509. #if REMAP_REGISTER
  1510. rcache_remap_vreg(x);
  1511. #else
  1512. rcache_clean_vreg(x);
  1513. #endif
  1514. rcache_unmap_vreg(x);
  1515. }
  1516. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1517. {
  1518. rcache_remove_vreg_alias(x, r);
  1519. rcache_evict_vreg(x);
  1520. rcache_add_vreg_alias(x, r);
  1521. }
  1522. static int rcache_allocate(int what, int minprio)
  1523. {
  1524. // evict reg with oldest stamp (only for HRT_REG, no temps)
  1525. int i, i_prio, oldest = -1, prio = 0;
  1526. u16 min_stamp = (u16)-1;
  1527. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--) {
  1528. // consider only non-static, unpinned, unlocked REG or TEMP
  1529. if ((cache_regs[i].flags & HRF_PINNED) || cache_regs[i].locked)
  1530. continue;
  1531. if ((what > 0 && !(cache_regs[i].htype & HRT_REG)) || // get a REG
  1532. (what == 0 && (cache_regs[i].htype & HRT_TEMP)) || // get a non-TEMP
  1533. (what < 0 && !(cache_regs[i].htype & HRT_TEMP))) // get a TEMP
  1534. continue;
  1535. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
  1536. // REG is free
  1537. prio = 10;
  1538. oldest = i;
  1539. break;
  1540. }
  1541. if (cache_regs[i].type == HR_CACHED) {
  1542. if (rcache_regs_now & cache_regs[i].gregs)
  1543. // REGs needed for the current insn
  1544. i_prio = 0;
  1545. else if (rcache_regs_soon & cache_regs[i].gregs)
  1546. // REGs needed in the next insns
  1547. i_prio = 2;
  1548. else if (rcache_regs_late & cache_regs[i].gregs)
  1549. // REGs needed in some future insn
  1550. i_prio = 4;
  1551. else if (~rcache_regs_discard & cache_regs[i].gregs)
  1552. // REGs not needed in the foreseeable future
  1553. i_prio = 6;
  1554. else
  1555. // REGs soon overwritten anyway
  1556. i_prio = 8;
  1557. if (!(cache_regs[i].flags & HRF_DIRTY)) i_prio ++;
  1558. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1559. min_stamp = cache_regs[i].stamp;
  1560. oldest = i;
  1561. prio = i_prio;
  1562. }
  1563. }
  1564. }
  1565. if (prio < minprio || oldest == -1)
  1566. return -1;
  1567. if (cache_regs[oldest].type == HR_CACHED)
  1568. rcache_evict_vreg(oldest);
  1569. else
  1570. rcache_free_vreg(oldest);
  1571. return oldest;
  1572. }
  1573. static int rcache_allocate_vreg(int needed)
  1574. {
  1575. int x;
  1576. x = rcache_allocate(1, needed ? 0 : 4);
  1577. if (x < 0)
  1578. x = rcache_allocate(-1, 0);
  1579. return x;
  1580. }
  1581. static int rcache_allocate_nontemp(void)
  1582. {
  1583. int x = rcache_allocate(0, 4);
  1584. return x;
  1585. }
  1586. static int rcache_allocate_temp(void)
  1587. {
  1588. int x = rcache_allocate(-1, 0);
  1589. if (x < 0)
  1590. x = rcache_allocate(0, 0);
  1591. return x;
  1592. }
  1593. #if REMAP_REGISTER
  1594. // maps a host register to a REG
  1595. static int rcache_map_reg(sh2_reg_e r, int hr)
  1596. {
  1597. int i;
  1598. gconst_kill(r);
  1599. // lookup the TEMP hr maps to
  1600. i = reg_map_host[hr];
  1601. if (i < 0) {
  1602. // must not happen
  1603. printf("invalid host register %d\n", hr);
  1604. exit(1);
  1605. }
  1606. // remove old mappings of r and i if one exists
  1607. if (guest_regs[r].vreg >= 0)
  1608. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1609. if (cache_regs[i].type == HR_CACHED)
  1610. rcache_evict_vreg(i);
  1611. // set new mappping
  1612. cache_regs[i].type = HR_CACHED;
  1613. cache_regs[i].gregs = 1 << r;
  1614. cache_regs[i].locked = 0;
  1615. cache_regs[i].stamp = ++rcache_counter;
  1616. cache_regs[i].flags |= HRF_DIRTY;
  1617. rcache_lock_vreg(i);
  1618. guest_regs[r].flags |= GRF_DIRTY;
  1619. guest_regs[r].vreg = i;
  1620. #if DRC_DEBUG & 64
  1621. RCACHE_CHECK("after map");
  1622. #endif
  1623. return cache_regs[i].hreg;
  1624. }
  1625. // remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
  1626. static void rcache_remap_vreg(int x)
  1627. {
  1628. u32 rsl_d = rcache_regs_soon | rcache_regs_late;
  1629. int d;
  1630. // x must be a cached vreg
  1631. if (cache_regs[x].type != HR_CACHED || cache_regs[x].locked)
  1632. return;
  1633. // don't do it if x isn't used
  1634. if (!(rsl_d & cache_regs[x].gregs)) {
  1635. // clean here to avoid data loss on invalidation
  1636. rcache_clean_vreg(x);
  1637. return;
  1638. }
  1639. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, d,
  1640. if ((guest_regs[d].flags & (GRF_STATIC|GRF_PINNED)) &&
  1641. !cache_regs[guest_regs[d].sreg].locked &&
  1642. !((rsl_d|rcache_regs_now) & cache_regs[guest_regs[d].sreg].gregs)) {
  1643. // STATIC not in its sreg and sreg is available
  1644. rcache_evict_vreg(guest_regs[d].sreg);
  1645. rcache_move_vreg(guest_regs[d].sreg, x);
  1646. return;
  1647. }
  1648. )
  1649. // allocate a non-TEMP vreg
  1650. rcache_lock_vreg(x); // lock to avoid evicting x
  1651. d = rcache_allocate_nontemp();
  1652. rcache_unlock_vreg(x);
  1653. if (d < 0) {
  1654. rcache_clean_vreg(x);
  1655. return;
  1656. }
  1657. // move vreg to new location
  1658. rcache_move_vreg(d, x);
  1659. #if DRC_DEBUG & 64
  1660. RCACHE_CHECK("after remap");
  1661. #endif
  1662. }
  1663. #endif
  1664. #if ALIAS_REGISTERS
  1665. static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
  1666. {
  1667. int x;
  1668. // if s isn't constant, it must be in cache for aliasing
  1669. if (!gconst_check(rs))
  1670. rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
  1671. // if d and s are not already aliased
  1672. x = guest_regs[rs].vreg;
  1673. if (guest_regs[rd].vreg != x) {
  1674. // remove possible old mapping of dst
  1675. if (guest_regs[rd].vreg >= 0)
  1676. rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
  1677. // make dst an alias of src
  1678. if (x >= 0)
  1679. rcache_add_vreg_alias(x, rd);
  1680. // if d is now in cache, it must be dirty
  1681. if (guest_regs[rd].vreg >= 0) {
  1682. x = guest_regs[rd].vreg;
  1683. cache_regs[x].flags |= HRF_DIRTY;
  1684. guest_regs[rd].flags |= GRF_DIRTY;
  1685. }
  1686. }
  1687. gconst_copy(rd, rs);
  1688. #if DRC_DEBUG & 64
  1689. RCACHE_CHECK("after alias");
  1690. #endif
  1691. }
  1692. #endif
  1693. // note: must not be called when doing conditional code
  1694. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1695. {
  1696. int src, dst, ali;
  1697. cache_reg_t *tr;
  1698. u32 rsp_d = (rcache_regs_soon | rcache_regs_static | rcache_regs_pinned) &
  1699. ~rcache_regs_discard;
  1700. dst = src = guest_regs[r].vreg;
  1701. rcache_lock_vreg(src); // lock to avoid evicting src
  1702. // good opportunity to relocate a remapped STATIC?
  1703. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1704. src != guest_regs[r].sreg && (src < 0 || mode != RC_GR_READ) &&
  1705. !cache_regs[guest_regs[r].sreg].locked &&
  1706. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[r].sreg].gregs)) {
  1707. dst = guest_regs[r].sreg;
  1708. rcache_evict_vreg(dst);
  1709. } else if (dst < 0) {
  1710. // allocate a cache register
  1711. if ((dst = rcache_allocate_vreg(rsp_d & (1 << r))) < 0) {
  1712. printf("no registers to evict, aborting\n");
  1713. exit(1);
  1714. }
  1715. }
  1716. tr = &cache_regs[dst];
  1717. tr->stamp = rcache_counter;
  1718. // remove r from src
  1719. if (src >= 0 && src != dst)
  1720. rcache_remove_vreg_alias(src, r);
  1721. rcache_unlock_vreg(src);
  1722. // if r has a constant it may have aliases
  1723. if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
  1724. src = dst;
  1725. // if r will be modified, check for aliases being needed rsn
  1726. ali = tr->gregs & ~(1 << r);
  1727. if (mode != RC_GR_READ && src == dst && ali) {
  1728. int x = -1;
  1729. if ((rsp_d|rcache_regs_now) & ali) {
  1730. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1731. guest_regs[r].sreg == dst && !tr->locked) {
  1732. // split aliases if r is STATIC in sreg and dst isn't already locked
  1733. int t;
  1734. FOR_ALL_BITS_SET_DO(ali, t,
  1735. if ((guest_regs[t].flags & (GRF_STATIC|GRF_PINNED)) &&
  1736. !(ali & ~(1 << t)) &&
  1737. !cache_regs[guest_regs[t].sreg].locked &&
  1738. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[t].sreg].gregs)) {
  1739. // alias is a single STATIC and its sreg is available
  1740. x = guest_regs[t].sreg;
  1741. rcache_evict_vreg(x);
  1742. } else {
  1743. rcache_lock_vreg(dst); // lock to avoid evicting dst
  1744. x = rcache_allocate_vreg(rsp_d & ali);
  1745. rcache_unlock_vreg(dst);
  1746. }
  1747. break;
  1748. )
  1749. if (x >= 0) {
  1750. rcache_remove_vreg_alias(src, r);
  1751. src = dst;
  1752. rcache_move_vreg(x, dst);
  1753. }
  1754. } else {
  1755. // split r
  1756. rcache_lock_vreg(src); // lock to avoid evicting src
  1757. x = rcache_allocate_vreg(rsp_d & (1 << r));
  1758. rcache_unlock_vreg(src);
  1759. if (x >= 0) {
  1760. rcache_remove_vreg_alias(src, r);
  1761. dst = x;
  1762. tr = &cache_regs[dst];
  1763. tr->stamp = rcache_counter;
  1764. }
  1765. }
  1766. }
  1767. if (x < 0)
  1768. // aliases not needed or no vreg available, remove them
  1769. rcache_evict_vreg_aliases(dst, r);
  1770. }
  1771. // assign r to dst
  1772. rcache_add_vreg_alias(dst, r);
  1773. // handle dst register transfer
  1774. if (src < 0 && mode != RC_GR_WRITE)
  1775. emith_ctx_read(tr->hreg, r * 4);
  1776. if (hr) {
  1777. *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
  1778. rcache_lock_vreg(src >= 0 ? src : dst);
  1779. } else if (src >= 0 && mode != RC_GR_WRITE && cache_regs[src].hreg != tr->hreg)
  1780. emith_move_r_r(tr->hreg, cache_regs[src].hreg);
  1781. // housekeeping
  1782. if (do_locking)
  1783. rcache_lock_vreg(dst);
  1784. if (mode != RC_GR_READ) {
  1785. tr->flags |= HRF_DIRTY;
  1786. guest_regs[r].flags |= GRF_DIRTY;
  1787. gconst_kill(r);
  1788. rcache_set_x16(tr->hreg, 0, 0);
  1789. } else if (src >= 0 && cache_regs[src].hreg != tr->hreg)
  1790. rcache_copy_x16(tr->hreg, cache_regs[src].hreg);
  1791. #if DRC_DEBUG & 64
  1792. RCACHE_CHECK("after getreg");
  1793. #endif
  1794. return tr->hreg;
  1795. }
  1796. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1797. {
  1798. return rcache_get_reg_(r, mode, 1, hr);
  1799. }
  1800. static void rcache_pin_reg(sh2_reg_e r)
  1801. {
  1802. int hr, x;
  1803. // don't pin if static or already pinned
  1804. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED))
  1805. return;
  1806. rcache_regs_soon |= (1 << r); // kludge to prevent allocation of a temp
  1807. hr = rcache_get_reg_(r, RC_GR_RMW, 0, NULL);
  1808. x = reg_map_host[hr];
  1809. // can only pin non-TEMPs
  1810. if (!(cache_regs[x].htype & HRT_TEMP)) {
  1811. guest_regs[r].flags |= GRF_PINNED;
  1812. cache_regs[x].flags |= HRF_PINNED;
  1813. guest_regs[r].sreg = x;
  1814. rcache_regs_pinned |= (1 << r);
  1815. }
  1816. #if DRC_DEBUG & 64
  1817. RCACHE_CHECK("after pin");
  1818. #endif
  1819. }
  1820. static int rcache_get_tmp(void)
  1821. {
  1822. int i;
  1823. i = rcache_allocate_temp();
  1824. if (i < 0) {
  1825. printf("cannot allocate temp\n");
  1826. exit(1);
  1827. }
  1828. cache_regs[i].type = HR_TEMP;
  1829. rcache_lock_vreg(i);
  1830. return cache_regs[i].hreg;
  1831. }
  1832. static int rcache_get_vreg_hr(int hr)
  1833. {
  1834. int i;
  1835. i = reg_map_host[hr];
  1836. if (i < 0 || cache_regs[i].locked) {
  1837. printf("host register %d is locked\n", hr);
  1838. exit(1);
  1839. }
  1840. if (cache_regs[i].type == HR_CACHED)
  1841. rcache_evict_vreg(i);
  1842. else if (cache_regs[i].type == HR_TEMP && cache_regs[i].locked) {
  1843. printf("host reg %d already used, aborting\n", hr);
  1844. exit(1);
  1845. }
  1846. return i;
  1847. }
  1848. static int rcache_get_vreg_arg(int arg)
  1849. {
  1850. int hr = 0;
  1851. host_arg2reg(hr, arg);
  1852. return rcache_get_vreg_hr(hr);
  1853. }
  1854. // get a reg to be used as function arg
  1855. static int rcache_get_tmp_arg(int arg)
  1856. {
  1857. int x = rcache_get_vreg_arg(arg);
  1858. cache_regs[x].type = HR_TEMP;
  1859. rcache_lock_vreg(x);
  1860. return cache_regs[x].hreg;
  1861. }
  1862. // ... as return value after a call
  1863. static int rcache_get_tmp_ret(void)
  1864. {
  1865. int x = rcache_get_vreg_hr(RET_REG);
  1866. cache_regs[x].type = HR_TEMP;
  1867. rcache_lock_vreg(x);
  1868. return cache_regs[x].hreg;
  1869. }
  1870. // same but caches a reg if access is readonly (announced by hr being NULL)
  1871. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1872. {
  1873. int i, srcr, dstr, dstid, keep;
  1874. u32 val;
  1875. host_arg2reg(dstr, arg);
  1876. i = guest_regs[r].vreg;
  1877. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1878. // r is already in arg, avoid evicting
  1879. dstid = i;
  1880. else
  1881. dstid = rcache_get_vreg_arg(arg);
  1882. dstr = cache_regs[dstid].hreg;
  1883. if (rcache_is_cached(r)) {
  1884. // r is needed later on anyway
  1885. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1886. keep = 1;
  1887. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1888. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1889. srcr = dstr;
  1890. emith_move_r_imm(srcr, val);
  1891. keep = 0;
  1892. } else {
  1893. // must read from ctx
  1894. srcr = dstr;
  1895. emith_ctx_read(srcr, r * 4);
  1896. keep = 1;
  1897. }
  1898. if (cache_regs[dstid].type == HR_CACHED)
  1899. rcache_evict_vreg(dstid);
  1900. cache_regs[dstid].type = HR_TEMP;
  1901. if (hr == NULL) {
  1902. if (dstr != srcr)
  1903. // arg is a copy of cached r
  1904. emith_move_r_r(dstr, srcr);
  1905. else if (keep && guest_regs[r].vreg < 0)
  1906. // keep arg as vreg for r
  1907. rcache_add_vreg_alias(dstid, r);
  1908. } else {
  1909. *hr = srcr;
  1910. if (dstr != srcr) // must lock srcr if not copied here
  1911. rcache_lock_vreg(reg_map_host[srcr]);
  1912. }
  1913. cache_regs[dstid].stamp = ++rcache_counter;
  1914. rcache_lock_vreg(dstid);
  1915. #if DRC_DEBUG & 64
  1916. RCACHE_CHECK("after getarg");
  1917. #endif
  1918. return dstr;
  1919. }
  1920. static void rcache_free_tmp(int hr)
  1921. {
  1922. int i = reg_map_host[hr];
  1923. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1924. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1925. exit(1);
  1926. }
  1927. rcache_unlock_vreg(i);
  1928. }
  1929. // saves temporary result either in REG or in drctmp
  1930. static int rcache_save_tmp(int hr)
  1931. {
  1932. int i;
  1933. // find REG, either free or unlocked temp or oldest non-hinted cached
  1934. i = rcache_allocate_nontemp();
  1935. if (i < 0) {
  1936. // if none is available, store in drctmp
  1937. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1938. rcache_free_tmp(hr);
  1939. return -1;
  1940. }
  1941. cache_regs[i].type = HR_CACHED;
  1942. cache_regs[i].gregs = 0; // not storing any guest register
  1943. cache_regs[i].flags &= HRF_PINNED;
  1944. cache_regs[i].locked = 0;
  1945. cache_regs[i].stamp = ++rcache_counter;
  1946. rcache_lock_vreg(i);
  1947. emith_move_r_r(cache_regs[i].hreg, hr);
  1948. rcache_free_tmp(hr);
  1949. return i;
  1950. }
  1951. static int rcache_restore_tmp(int x)
  1952. {
  1953. int hr;
  1954. // find REG with tmp store: cached but with no gregs
  1955. if (x >= 0) {
  1956. if (cache_regs[x].type != HR_CACHED || cache_regs[x].gregs) {
  1957. printf("invalid tmp storage %d\n", x);
  1958. exit(1);
  1959. }
  1960. // found, transform to a TEMP
  1961. cache_regs[x].type = HR_TEMP;
  1962. return cache_regs[x].hreg;
  1963. }
  1964. // if not available, create a TEMP store and fetch from drctmp
  1965. hr = rcache_get_tmp();
  1966. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1967. return hr;
  1968. }
  1969. static void rcache_free(int hr)
  1970. {
  1971. int x = reg_map_host[hr];
  1972. rcache_unlock_vreg(x);
  1973. }
  1974. static void rcache_unlock(int x)
  1975. {
  1976. if (x >= 0)
  1977. cache_regs[x].locked = 0;
  1978. }
  1979. static void rcache_unlock_all(void)
  1980. {
  1981. int i;
  1982. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1983. cache_regs[i].locked = 0;
  1984. }
  1985. static void rcache_unpin_all(void)
  1986. {
  1987. int i;
  1988. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1989. if (guest_regs[i].flags & GRF_PINNED) {
  1990. guest_regs[i].flags &= ~GRF_PINNED;
  1991. cache_regs[guest_regs[i].sreg].flags &= ~HRF_PINNED;
  1992. guest_regs[i].sreg = -1;
  1993. rcache_regs_pinned &= ~(1 << i);
  1994. }
  1995. }
  1996. #if DRC_DEBUG & 64
  1997. RCACHE_CHECK("after unpin");
  1998. #endif
  1999. }
  2000. static void rcache_save_pinned(void)
  2001. {
  2002. int i;
  2003. // save pinned regs to context
  2004. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2005. if ((guest_regs[i].flags & GRF_PINNED) && guest_regs[i].vreg >= 0)
  2006. emith_ctx_write(cache_regs[guest_regs[i].vreg].hreg, i * 4);
  2007. }
  2008. static inline void rcache_set_usage_now(u32 mask)
  2009. {
  2010. rcache_regs_now = mask;
  2011. }
  2012. static inline void rcache_set_usage_soon(u32 mask)
  2013. {
  2014. rcache_regs_soon = mask;
  2015. }
  2016. static inline void rcache_set_usage_late(u32 mask)
  2017. {
  2018. rcache_regs_late = mask;
  2019. }
  2020. static inline void rcache_set_usage_discard(u32 mask)
  2021. {
  2022. rcache_regs_discard = mask;
  2023. }
  2024. static inline int rcache_is_cached(sh2_reg_e r)
  2025. {
  2026. // is r in cache or needed RSN?
  2027. u32 rsc = rcache_regs_soon | rcache_regs_clean;
  2028. return (guest_regs[r].vreg >= 0 || (rsc & (1 << r)));
  2029. }
  2030. static inline int rcache_is_hreg_used(int hr)
  2031. {
  2032. int x = reg_map_host[hr];
  2033. // is hr in use?
  2034. return cache_regs[x].type != HR_FREE &&
  2035. (cache_regs[x].type != HR_TEMP || cache_regs[x].locked);
  2036. }
  2037. static inline u32 rcache_used_hregs_mask(void)
  2038. {
  2039. u32 mask = 0;
  2040. int i;
  2041. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2042. if ((cache_regs[i].htype & HRT_TEMP) && cache_regs[i].type != HR_FREE &&
  2043. (cache_regs[i].type != HR_TEMP || cache_regs[i].locked))
  2044. mask |= 1 << cache_regs[i].hreg;
  2045. return mask;
  2046. }
  2047. static inline u32 rcache_dirty_mask(void)
  2048. {
  2049. u32 mask = 0;
  2050. int i;
  2051. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2052. if (guest_regs[i].flags & GRF_DIRTY)
  2053. mask |= 1 << i;
  2054. mask |= gconst_dirty_mask();
  2055. return mask;
  2056. }
  2057. static inline u32 rcache_cached_mask(void)
  2058. {
  2059. u32 mask = 0;
  2060. int i;
  2061. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2062. if (cache_regs[i].type == HR_CACHED)
  2063. mask |= cache_regs[i].gregs;
  2064. return mask;
  2065. }
  2066. static void rcache_clean_tmp(void)
  2067. {
  2068. int i;
  2069. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2070. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2071. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].htype & HRT_TEMP)) {
  2072. rcache_unlock(i);
  2073. #if REMAP_REGISTER
  2074. rcache_remap_vreg(i);
  2075. #else
  2076. rcache_clean_vreg(i);
  2077. #endif
  2078. }
  2079. rcache_regs_clean = 0;
  2080. }
  2081. static void rcache_clean_masked(u32 mask)
  2082. {
  2083. int i, r, hr;
  2084. u32 m;
  2085. rcache_regs_clean |= mask;
  2086. mask = rcache_regs_clean;
  2087. // clean constants where all aliases are covered by the mask, exempt statics
  2088. // to avoid flushing them to context if sreg isn't available
  2089. m = mask & ~(rcache_regs_static | rcache_regs_pinned);
  2090. for (i = 0; i < ARRAY_SIZE(gconsts); i++)
  2091. if ((gconsts[i].gregs & m) && !(gconsts[i].gregs & ~mask)) {
  2092. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, r,
  2093. if (guest_regs[r].flags & GRF_CDIRTY) {
  2094. hr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  2095. rcache_clean_vreg(reg_map_host[hr]);
  2096. break;
  2097. });
  2098. }
  2099. // clean vregs where all aliases are covered by the mask
  2100. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2101. if (cache_regs[i].type == HR_CACHED &&
  2102. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  2103. rcache_clean_vreg(i);
  2104. }
  2105. static void rcache_clean(void)
  2106. {
  2107. int i;
  2108. gconst_clean();
  2109. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2110. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  2111. if (cache_regs[i].type == HR_CACHED)
  2112. rcache_clean_vreg(i);
  2113. // relocate statics to their sregs (necessary before conditional jumps)
  2114. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2115. if ((guest_regs[i].flags & (GRF_STATIC|GRF_PINNED)) &&
  2116. guest_regs[i].vreg != guest_regs[i].sreg) {
  2117. rcache_lock_vreg(guest_regs[i].vreg);
  2118. rcache_evict_vreg(guest_regs[i].sreg);
  2119. rcache_unlock_vreg(guest_regs[i].vreg);
  2120. if (guest_regs[i].vreg < 0)
  2121. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  2122. else {
  2123. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  2124. cache_regs[guest_regs[i].vreg].hreg);
  2125. rcache_copy_x16(cache_regs[guest_regs[i].sreg].hreg,
  2126. cache_regs[guest_regs[i].vreg].hreg);
  2127. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  2128. }
  2129. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2130. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2131. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2132. guest_regs[i].flags |= GRF_DIRTY;
  2133. guest_regs[i].vreg = guest_regs[i].sreg;
  2134. }
  2135. }
  2136. rcache_regs_clean = 0;
  2137. }
  2138. static void rcache_invalidate_tmp(void)
  2139. {
  2140. int i;
  2141. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2142. if (cache_regs[i].htype & HRT_TEMP) {
  2143. rcache_unlock(i);
  2144. if (cache_regs[i].type == HR_CACHED)
  2145. rcache_evict_vreg(i);
  2146. else
  2147. rcache_free_vreg(i);
  2148. }
  2149. }
  2150. }
  2151. static void rcache_invalidate(void)
  2152. {
  2153. int i;
  2154. gconst_invalidate();
  2155. rcache_unlock_all();
  2156. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2157. rcache_free_vreg(i);
  2158. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2159. guest_regs[i].flags &= GRF_STATIC;
  2160. if (!(guest_regs[i].flags & GRF_STATIC))
  2161. guest_regs[i].vreg = -1;
  2162. else {
  2163. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2164. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2165. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2166. guest_regs[i].flags |= GRF_DIRTY;
  2167. guest_regs[i].vreg = guest_regs[i].sreg;
  2168. }
  2169. }
  2170. rcache_counter = 0;
  2171. rcache_regs_now = rcache_regs_soon = rcache_regs_late = 0;
  2172. rcache_regs_discard = rcache_regs_clean = 0;
  2173. }
  2174. static void rcache_flush(void)
  2175. {
  2176. rcache_clean();
  2177. rcache_invalidate();
  2178. }
  2179. static void rcache_create(void)
  2180. {
  2181. int x = 0, i;
  2182. // create cache_regs as host register representation
  2183. // RET_REG/params should be first TEMPs to avoid allocation conflicts in calls
  2184. cache_regs[x++] = (cache_reg_t) {.hreg = RET_REG, .htype = HRT_TEMP};
  2185. for (i = 0; i < ARRAY_SIZE(hregs_param); i++)
  2186. if (hregs_param[i] != RET_REG)
  2187. cache_regs[x++] = (cache_reg_t){.hreg = hregs_param[i],.htype = HRT_TEMP};
  2188. for (i = 0; i < ARRAY_SIZE(hregs_temp); i++)
  2189. if (hregs_temp[i] != RET_REG)
  2190. cache_regs[x++] = (cache_reg_t){.hreg = hregs_temp[i], .htype = HRT_TEMP};
  2191. for (i = ARRAY_SIZE(hregs_saved)-1; i >= 0; i--)
  2192. if (hregs_saved[i] != CONTEXT_REG)
  2193. cache_regs[x++] = (cache_reg_t){.hreg = hregs_saved[i], .htype = HRT_REG};
  2194. if (x != ARRAY_SIZE(cache_regs)) {
  2195. printf("rcache_create failed (conflicting register count)\n");
  2196. exit(1);
  2197. }
  2198. // mapping from host_register to cache regs index
  2199. memset(reg_map_host, -1, sizeof(reg_map_host));
  2200. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2201. if (cache_regs[i].htype)
  2202. reg_map_host[cache_regs[i].hreg] = i;
  2203. if (cache_regs[i].htype == HRT_REG)
  2204. rcache_vregs_reg |= (1 << i);
  2205. }
  2206. // create static host register mapping for SH2 regs
  2207. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2208. guest_regs[i] = (guest_reg_t){.sreg = -1};
  2209. }
  2210. for (i = 0; i < ARRAY_SIZE(regs_static); i += 2) {
  2211. for (x = ARRAY_SIZE(cache_regs)-1; x >= 0; x--)
  2212. if (cache_regs[x].hreg == regs_static[i+1]) break;
  2213. if (x >= 0) {
  2214. guest_regs[regs_static[i]] = (guest_reg_t){.flags = GRF_STATIC,.sreg = x};
  2215. rcache_regs_static |= (1 << regs_static[i]);
  2216. rcache_vregs_reg &= ~(1 << x);
  2217. }
  2218. }
  2219. printf("DRC registers created, %ld host regs (%d REG, %d STATIC, 1 CTX)\n",
  2220. CACHE_REGS+1L, count_bits(rcache_vregs_reg),count_bits(rcache_regs_static));
  2221. }
  2222. static void rcache_init(void)
  2223. {
  2224. // create DRC data structures
  2225. rcache_create();
  2226. rcache_invalidate();
  2227. #if DRC_DEBUG & 64
  2228. RCACHE_CHECK("after init");
  2229. #endif
  2230. }
  2231. // ---------------------------------------------------------------
  2232. // NB may return either REG or TEMP
  2233. static int emit_get_rbase_and_offs(SH2 *sh2, sh2_reg_e r, int rmode, u32 *offs)
  2234. {
  2235. uptr omask = emith_rw_offs_max(); // offset mask
  2236. u32 mask = 0;
  2237. u32 a;
  2238. int poffs;
  2239. int hr, hr2;
  2240. uptr la;
  2241. // is r constant and points to a memory region?
  2242. if (! gconst_get(r, &a))
  2243. return -1;
  2244. poffs = dr_ctx_get_mem_ptr(sh2, a, &mask);
  2245. if (poffs == -1)
  2246. return -1;
  2247. if (mask < 0x20000) {
  2248. // data array, BIOS, DRAM, can't safely access directly since host addr may
  2249. // change (BIOS,da code may run on either core, DRAM may be switched)
  2250. hr = rcache_get_tmp();
  2251. a = (a + *offs) & mask;
  2252. if (poffs == offsetof(SH2, p_da)) {
  2253. // access sh2->data_array directly
  2254. a += offsetof(SH2, data_array);
  2255. emith_add_r_r_ptr_imm(hr, CONTEXT_REG, a & ~omask);
  2256. } else {
  2257. emith_ctx_read_ptr(hr, poffs);
  2258. if (a & ~omask)
  2259. emith_add_r_r_ptr_imm(hr, hr, a & ~omask);
  2260. }
  2261. *offs = a & omask;
  2262. return hr;
  2263. }
  2264. // ROM, SDRAM. Host address should be mmapped to be equal to SH2 address.
  2265. la = (uptr)*(void **)((char *)sh2 + poffs);
  2266. // if r is in rcache or needed soon anyway, and offs is relative to region,
  2267. // and address translation fits in add_ptr_imm (s32), then use rcached const
  2268. if (la == (s32)la && !(*offs & ~mask) && rcache_is_cached(r)) {
  2269. u32 odd = a & 1; // need to fix odd address for correct byte addressing
  2270. la -= (s32)((a & ~mask) - *offs - odd); // diff between reg and memory
  2271. hr = hr2 = rcache_get_reg(r, rmode, NULL);
  2272. if ((s32)a < 0) emith_uext_ptr(hr2);
  2273. if ((la & ~omask) - odd) {
  2274. hr = rcache_get_tmp();
  2275. emith_add_r_r_ptr_imm(hr, hr2, (la & ~omask) - odd);
  2276. rcache_free(hr2);
  2277. }
  2278. *offs = (la & omask);
  2279. } else {
  2280. // known fixed host address
  2281. la += (a + *offs) & mask;
  2282. hr = rcache_get_tmp();
  2283. emith_move_r_ptr_imm(hr, la & ~omask);
  2284. *offs = la & omask;
  2285. }
  2286. return hr;
  2287. }
  2288. // read const data from const ROM address
  2289. static int emit_get_rom_data(SH2 *sh2, sh2_reg_e r, u32 offs, int size, u32 *val)
  2290. {
  2291. u32 a, mask;
  2292. *val = 0;
  2293. if (gconst_get(r, &a)) {
  2294. a += offs;
  2295. // check if rom is memory mapped (not bank switched), and address is in rom
  2296. if (dr_is_rom(a) && p32x_sh2_get_mem_ptr(a, &mask, sh2) == sh2->p_rom) {
  2297. switch (size & MF_SIZEMASK) {
  2298. case 0: *val = (s8)p32x_sh2_read8(a, sh2s); break; // 8
  2299. case 1: *val = (s16)p32x_sh2_read16(a, sh2s); break; // 16
  2300. case 2: *val = p32x_sh2_read32(a, sh2s); break; // 32
  2301. }
  2302. return 1;
  2303. }
  2304. }
  2305. return 0;
  2306. }
  2307. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  2308. {
  2309. #if PROPAGATE_CONSTANTS
  2310. gconst_new(dst, imm);
  2311. #else
  2312. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2313. emith_move_r_imm(hr, imm);
  2314. #endif
  2315. }
  2316. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  2317. {
  2318. if (gconst_check(src) || rcache_is_cached(src)) {
  2319. #if ALIAS_REGISTERS
  2320. rcache_alias_vreg(dst, src);
  2321. #else
  2322. int hr_s = rcache_get_reg(src, RC_GR_READ, NULL);
  2323. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2324. emith_move_r_r(hr_d, hr_s);
  2325. gconst_copy(dst, src);
  2326. #endif
  2327. } else {
  2328. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2329. emith_ctx_read(hr_d, src * 4);
  2330. }
  2331. }
  2332. static void emit_add_r_imm(sh2_reg_e r, u32 imm)
  2333. {
  2334. u32 val;
  2335. int isgc = gconst_get(r, &val);
  2336. int hr, hr2;
  2337. if (!isgc || rcache_is_cached(r)) {
  2338. // not constant, or r is already in cache
  2339. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2340. emith_add_r_r_imm(hr, hr2, imm);
  2341. rcache_free(hr2);
  2342. if (isgc)
  2343. gconst_set(r, val + imm);
  2344. } else
  2345. gconst_new(r, val + imm);
  2346. }
  2347. static void emit_sub_r_imm(sh2_reg_e r, u32 imm)
  2348. {
  2349. u32 val;
  2350. int isgc = gconst_get(r, &val);
  2351. int hr, hr2;
  2352. if (!isgc || rcache_is_cached(r)) {
  2353. // not constant, or r is already in cache
  2354. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2355. emith_sub_r_r_imm(hr, hr2, imm);
  2356. rcache_free(hr2);
  2357. if (isgc)
  2358. gconst_set(r, val - imm);
  2359. } else
  2360. gconst_new(r, val - imm);
  2361. }
  2362. static void emit_sync_t_to_sr(void)
  2363. {
  2364. // avoid reloading SR from context if there's nothing to do
  2365. if (emith_get_t_cond() >= 0) {
  2366. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2367. emith_sync_t(sr);
  2368. }
  2369. }
  2370. // rd = @(arg0)
  2371. static int emit_memhandler_read(int size)
  2372. {
  2373. int hr;
  2374. emit_sync_t_to_sr();
  2375. rcache_clean_tmp();
  2376. #ifndef DRC_SR_REG
  2377. // must writeback cycles for poll detection stuff
  2378. if (guest_regs[SHR_SR].vreg != -1)
  2379. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2380. #endif
  2381. rcache_invalidate_tmp();
  2382. if (size & MF_POLLING)
  2383. switch (size & MF_SIZEMASK) {
  2384. case 0: emith_call(sh2_drc_read8_poll); break; // 8
  2385. case 1: emith_call(sh2_drc_read16_poll); break; // 16
  2386. case 2: emith_call(sh2_drc_read32_poll); break; // 32
  2387. }
  2388. else
  2389. switch (size & MF_SIZEMASK) {
  2390. case 0: emith_call(sh2_drc_read8); break; // 8
  2391. case 1: emith_call(sh2_drc_read16); break; // 16
  2392. case 2: emith_call(sh2_drc_read32); break; // 32
  2393. }
  2394. hr = rcache_get_tmp_ret();
  2395. rcache_set_x16(hr, (size & MF_SIZEMASK) < 2, 0);
  2396. return hr;
  2397. }
  2398. // @(arg0) = arg1
  2399. static void emit_memhandler_write(int size)
  2400. {
  2401. emit_sync_t_to_sr();
  2402. rcache_clean_tmp();
  2403. #ifndef DRC_SR_REG
  2404. if (guest_regs[SHR_SR].vreg != -1)
  2405. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2406. #endif
  2407. rcache_invalidate_tmp();
  2408. switch (size & MF_SIZEMASK) {
  2409. case 0: emith_call(sh2_drc_write8); break; // 8
  2410. case 1: emith_call(sh2_drc_write16); break; // 16
  2411. case 2: emith_call(sh2_drc_write32); break; // 32
  2412. }
  2413. }
  2414. // rd = @(Rs,#offs); rd < 0 -> return a temp
  2415. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2416. {
  2417. int hr, hr2;
  2418. u32 val;
  2419. #if PROPAGATE_CONSTANTS
  2420. if (emit_get_rom_data(sh2, rs, offs, size, &val)) {
  2421. if (rd == SHR_TMP) {
  2422. hr2 = rcache_get_tmp();
  2423. emith_move_r_imm(hr2, val);
  2424. } else {
  2425. emit_move_r_imm32(rd, val);
  2426. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  2427. }
  2428. rcache_set_x16(hr2, val == (s16)val, val == (u16)val);
  2429. if (size & MF_POSTINCR)
  2430. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2431. return hr2;
  2432. }
  2433. val = size & MF_POSTINCR;
  2434. hr = emit_get_rbase_and_offs(sh2, rs, val ? RC_GR_RMW : RC_GR_READ, &offs);
  2435. if (hr != -1) {
  2436. if (rd == SHR_TMP)
  2437. hr2 = rcache_get_tmp();
  2438. else
  2439. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2440. switch (size & MF_SIZEMASK) {
  2441. case 0: emith_read8s_r_r_offs(hr2, hr, offs ^ 1); break; // 8
  2442. case 1: emith_read16s_r_r_offs(hr2, hr, offs); break; // 16
  2443. case 2: emith_read_r_r_offs(hr2, hr, offs); emith_ror(hr2, hr2, 16); break;
  2444. }
  2445. rcache_free(hr);
  2446. if (size & MF_POSTINCR)
  2447. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2448. return hr2;
  2449. }
  2450. #endif
  2451. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2452. hr = rcache_get_tmp_arg(0);
  2453. emith_move_r_imm(hr, val + offs);
  2454. if (size & MF_POSTINCR)
  2455. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  2456. } else if (size & MF_POSTINCR) {
  2457. hr = rcache_get_tmp_arg(0);
  2458. hr2 = rcache_get_reg(rs, RC_GR_RMW, NULL);
  2459. emith_add_r_r_imm(hr, hr2, offs);
  2460. emith_add_r_imm(hr2, 1 << (size & MF_SIZEMASK));
  2461. if (gconst_get(rs, &val))
  2462. gconst_set(rs, val + (1 << (size & MF_SIZEMASK)));
  2463. } else {
  2464. hr = rcache_get_reg_arg(0, rs, &hr2);
  2465. if (offs || hr != hr2)
  2466. emith_add_r_r_imm(hr, hr2, offs);
  2467. }
  2468. hr = emit_memhandler_read(size);
  2469. if (rd == SHR_TMP)
  2470. hr2 = hr;
  2471. else
  2472. #if REMAP_REGISTER
  2473. hr2 = rcache_map_reg(rd, hr);
  2474. #else
  2475. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2476. #endif
  2477. if (hr != hr2) {
  2478. emith_move_r_r(hr2, hr);
  2479. rcache_free_tmp(hr);
  2480. }
  2481. return hr2;
  2482. }
  2483. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  2484. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2485. {
  2486. int hr, hr2;
  2487. u32 val;
  2488. if (rd == SHR_TMP) {
  2489. host_arg2reg(hr2, 1); // already locked and prepared by caller
  2490. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  2491. hr2 = rcache_get_reg_arg(1, rd, &hr);
  2492. if (hr != hr2) {
  2493. emith_move_r_r(hr2, hr);
  2494. rcache_free(hr2);
  2495. }
  2496. } else
  2497. hr2 = rcache_get_reg_arg(1, rd, NULL);
  2498. if (rd != SHR_TMP)
  2499. rcache_unlock(guest_regs[rd].vreg); // unlock in case rd is in arg0
  2500. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2501. hr = rcache_get_tmp_arg(0);
  2502. if (size & MF_PREDECR) {
  2503. val -= 1 << (size & MF_SIZEMASK);
  2504. gconst_new(rs, val);
  2505. }
  2506. emith_move_r_imm(hr, val + offs);
  2507. } else if (offs || (size & MF_PREDECR)) {
  2508. if (size & MF_PREDECR)
  2509. emit_sub_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2510. rcache_unlock(guest_regs[rs].vreg); // unlock in case rs is in arg0
  2511. hr = rcache_get_reg_arg(0, rs, &hr2);
  2512. if (offs || hr != hr2)
  2513. emith_add_r_r_imm(hr, hr2, offs);
  2514. } else
  2515. hr = rcache_get_reg_arg(0, rs, NULL);
  2516. emit_memhandler_write(size);
  2517. }
  2518. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2519. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2520. {
  2521. int hr, hr2;
  2522. int tx, ty;
  2523. #if PROPAGATE_CONSTANTS
  2524. u32 offs;
  2525. // if offs is larger than 0x01000000, it's most probably the base address part
  2526. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2527. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2528. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2529. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2530. #endif
  2531. hr = rcache_get_reg_arg(0, rx, &tx);
  2532. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2533. emith_add_r_r_r(hr, tx, ty);
  2534. hr = emit_memhandler_read(size);
  2535. if (rd == SHR_TMP)
  2536. hr2 = hr;
  2537. else
  2538. #if REMAP_REGISTER
  2539. hr2 = rcache_map_reg(rd, hr);
  2540. #else
  2541. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2542. #endif
  2543. if (hr != hr2) {
  2544. emith_move_r_r(hr2, hr);
  2545. rcache_free_tmp(hr);
  2546. }
  2547. return hr2;
  2548. }
  2549. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2550. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2551. {
  2552. int hr, tx, ty;
  2553. #if PROPAGATE_CONSTANTS
  2554. u32 offs;
  2555. // if offs is larger than 0x01000000, it's most probably the base address part
  2556. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2557. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2558. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2559. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2560. #endif
  2561. if (rd != SHR_TMP)
  2562. rcache_get_reg_arg(1, rd, NULL);
  2563. hr = rcache_get_reg_arg(0, rx, &tx);
  2564. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2565. emith_add_r_r_r(hr, tx, ty);
  2566. emit_memhandler_write(size);
  2567. }
  2568. // @Rn+,@Rm+
  2569. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2570. {
  2571. int tmp;
  2572. // unlock rn, rm here to avoid REG shortage in MAC operation
  2573. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2574. rcache_unlock(guest_regs[rn].vreg);
  2575. tmp = rcache_save_tmp(tmp);
  2576. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2577. rcache_unlock(guest_regs[rm].vreg);
  2578. *rnr = rcache_restore_tmp(tmp);
  2579. }
  2580. static void emit_do_static_regs(int is_write, int tmpr)
  2581. {
  2582. int i, r, count;
  2583. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2584. if (guest_regs[i].flags & (GRF_STATIC|GRF_PINNED))
  2585. r = cache_regs[guest_regs[i].vreg].hreg;
  2586. else
  2587. continue;
  2588. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2589. if ((guest_regs[i + 1].flags & (GRF_STATIC|GRF_PINNED)) &&
  2590. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2591. count++;
  2592. else
  2593. break;
  2594. }
  2595. if (count > 1) {
  2596. // i, r point to last item
  2597. if (is_write)
  2598. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2599. else
  2600. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2601. } else {
  2602. if (is_write)
  2603. emith_ctx_write(r, i * 4);
  2604. else
  2605. emith_ctx_read(r, i * 4);
  2606. }
  2607. }
  2608. }
  2609. // block local link stuff
  2610. struct linkage {
  2611. u32 pc;
  2612. void *ptr;
  2613. struct block_link *bl;
  2614. u32 mask;
  2615. };
  2616. static inline int find_in_linkage(const struct linkage *array, int size, u32 pc)
  2617. {
  2618. size_t i;
  2619. for (i = 0; i < size; i++)
  2620. if (pc == array[i].pc)
  2621. return i;
  2622. return -1;
  2623. }
  2624. static int find_in_sorted_linkage(const struct linkage *array, int size, u32 pc)
  2625. {
  2626. // binary search in sorted array
  2627. int left = 0, right = size-1;
  2628. while (left <= right)
  2629. {
  2630. int middle = (left + right) / 2;
  2631. if (array[middle].pc == pc)
  2632. return middle;
  2633. else if (array[middle].pc < pc)
  2634. left = middle + 1;
  2635. else
  2636. right = middle - 1;
  2637. }
  2638. return -1;
  2639. }
  2640. static void emit_branch_linkage_code(SH2 *sh2, struct block_desc *block, int tcache_id,
  2641. const struct linkage *targets, int target_count,
  2642. const struct linkage *links, int link_count)
  2643. {
  2644. struct block_link *bl;
  2645. int u, v, tmp;
  2646. emith_flush();
  2647. for (u = 0; u < link_count; u++) {
  2648. emith_pool_check();
  2649. // look up local branch targets
  2650. if (links[u].mask & 0x2) {
  2651. v = find_in_sorted_linkage(targets, target_count, links[u].pc);
  2652. if (v < 0 || ! targets[v].ptr) {
  2653. // forward branch not yet resolved, prepare external linking
  2654. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2655. bl = dr_prepare_ext_branch(block->entryp, links[u].pc, sh2->is_slave, tcache_id);
  2656. if (bl)
  2657. bl->type = BL_LDJMP;
  2658. tmp = rcache_get_tmp_arg(0);
  2659. emith_move_r_imm(tmp, links[u].pc);
  2660. rcache_free_tmp(tmp);
  2661. emith_jump_patchable(sh2_drc_dispatcher);
  2662. } else if (emith_jump_patch_inrange(links[u].ptr, targets[v].ptr)) {
  2663. // inrange local branch
  2664. emith_jump_patch(links[u].ptr, targets[v].ptr, NULL);
  2665. } else {
  2666. // far local branch
  2667. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2668. emith_jump(targets[v].ptr);
  2669. }
  2670. } else {
  2671. // external or exit, emit blx area entry
  2672. void *target = (links[u].mask & 0x1 ? sh2_drc_exit : sh2_drc_dispatcher);
  2673. if (links[u].bl)
  2674. links[u].bl->blx = tcache_ptr;
  2675. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2676. tmp = rcache_get_tmp_arg(0);
  2677. emith_move_r_imm(tmp, links[u].pc & ~1);
  2678. rcache_free_tmp(tmp);
  2679. emith_jump(target);
  2680. }
  2681. }
  2682. }
  2683. #define DELAY_SAVE_T(sr) { \
  2684. int t_ = rcache_get_tmp(); \
  2685. emith_bic_r_imm(sr, T_save); \
  2686. emith_and_r_r_imm(t_, sr, 1); \
  2687. emith_or_r_r_lsl(sr, t_, T_SHIFT); \
  2688. rcache_free_tmp(t_); \
  2689. }
  2690. #define FLUSH_CYCLES(sr) \
  2691. if (cycles > 0) { \
  2692. emith_sub_r_imm(sr, cycles << 12); \
  2693. cycles = 0; \
  2694. }
  2695. static void *dr_get_pc_base(u32 pc, SH2 *sh2);
  2696. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2697. {
  2698. // branch targets in current block
  2699. static struct linkage branch_targets[MAX_LOCAL_TARGETS];
  2700. int branch_target_count = 0;
  2701. // unresolved local or external targets with block link/exit area if needed
  2702. static struct linkage blx_targets[MAX_LOCAL_BRANCHES];
  2703. int blx_target_count = 0;
  2704. static u8 op_flags[BLOCK_INSN_LIMIT];
  2705. enum flg_states { FLG_UNKNOWN, FLG_UNUSED, FLG_0, FLG_1 };
  2706. struct drcf {
  2707. int delay_reg:8;
  2708. u32 loop_type:8;
  2709. u32 polling:8;
  2710. u32 pinning:1;
  2711. u32 test_irq:1;
  2712. u32 pending_branch_direct:1;
  2713. u32 pending_branch_indirect:1;
  2714. u32 Tflag:2, Mflag:2;
  2715. } drcf = { 0, };
  2716. #if LOOP_OPTIMIZER
  2717. // loops with pinned registers for optimzation
  2718. // pinned regs are like statics and don't need saving/restoring inside a loop
  2719. static struct linkage pinned_loops[MAX_LOCAL_TARGETS/16];
  2720. int pinned_loop_count = 0;
  2721. #endif
  2722. // PC of current, first, last SH2 insn
  2723. u32 pc, base_pc, end_pc;
  2724. u32 base_literals, end_literals;
  2725. u8 *block_entry_ptr;
  2726. struct block_desc *block;
  2727. struct block_entry *entry;
  2728. struct block_link *bl;
  2729. u16 *dr_pc_base;
  2730. struct op_data *opd;
  2731. int blkid_main = 0;
  2732. int tmp, tmp2;
  2733. int cycles;
  2734. int i, v;
  2735. u32 u, m1, m2, m3, m4;
  2736. int op;
  2737. u16 crc;
  2738. base_pc = sh2->pc;
  2739. // get base/validate PC
  2740. dr_pc_base = dr_get_pc_base(base_pc, sh2);
  2741. if (dr_pc_base == (void *)-1) {
  2742. printf("invalid PC, aborting: %08x\n", base_pc);
  2743. // FIXME: be less destructive
  2744. exit(1);
  2745. }
  2746. // initial passes to disassemble and analyze the block
  2747. crc = scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2748. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2749. if (base_literals == end_literals) // map empty lit section to end of code
  2750. base_literals = end_literals = end_pc;
  2751. // if there is already a translated but inactive block, reuse it
  2752. block = dr_find_inactive_block(tcache_id, crc, base_pc, end_pc - base_pc,
  2753. base_literals, end_literals - base_literals);
  2754. if (block) {
  2755. dbg(2, "== %csh2 reuse block %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2756. base_pc, end_pc, base_literals, end_literals, block->entryp->tcache_ptr);
  2757. dr_activate_block(block, tcache_id, sh2->is_slave);
  2758. emith_update_cache();
  2759. return block->entryp[0].tcache_ptr;
  2760. }
  2761. // collect branch_targets that don't land on delay slots
  2762. m1 = m2 = m3 = m4 = v = op = 0;
  2763. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2764. if (op_flags[i] & OF_DELAY_OP)
  2765. op_flags[i] &= ~OF_BTARGET;
  2766. if (op_flags[i] & OF_BTARGET) {
  2767. if (branch_target_count < ARRAY_SIZE(branch_targets))
  2768. branch_targets[branch_target_count++] = (struct linkage) { .pc = pc };
  2769. else {
  2770. printf("warning: linkage overflow\n");
  2771. end_pc = pc;
  2772. break;
  2773. }
  2774. }
  2775. if (ops[i].op == OP_LDC && (ops[i].dest & BITMASK1(SHR_SR)) && pc+2 < end_pc)
  2776. op_flags[i+1] |= OF_BTARGET; // RTE entrypoint in case of SR.IMASK change
  2777. // unify T and SR since rcache doesn't know about "virtual" guest regs
  2778. if (ops[i].source & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2779. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2780. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].dest |= BITMASK1(SHR_SR);
  2781. #if LOOP_DETECTION
  2782. // loop types detected:
  2783. // 1. target: ... BRA target -> idle loop
  2784. // 2. target: ... delay insn ... BF target -> delay loop
  2785. // 3. target: ... poll insn ... BF/BT target -> poll loop
  2786. // 4. target: ... poll insn ... BF/BT exit ... BRA target, exit: -> poll
  2787. // conditions:
  2788. // a. no further branch targets between target and back jump.
  2789. // b. no unconditional branch insn inside the loop.
  2790. // c. exactly one poll or delay insn is allowed inside a delay/poll loop
  2791. // (scan_block marks loops only if they meet conditions a through c)
  2792. // d. idle loops do not modify anything but PC,SR and contain no branches
  2793. // e. delay/poll loops do not modify anything but the concerned reg,PC,SR
  2794. // f. loading constants into registers inside the loop is allowed
  2795. // g. a delay/poll loop must have a conditional branch somewhere
  2796. // h. an idle loop must not have a conditional branch
  2797. if (op_flags[i] & OF_BTARGET) {
  2798. // possible loop entry point
  2799. drcf.loop_type = op_flags[i] & OF_LOOP;
  2800. drcf.pending_branch_direct = drcf.pending_branch_indirect = 0;
  2801. op = OF_IDLE_LOOP; // loop type
  2802. v = i;
  2803. m1 = m2 = m3 = m4 = 0;
  2804. if (!drcf.loop_type) // reset basic loop it it isn't recognized as loop
  2805. op_flags[i] &= ~OF_BASIC_LOOP;
  2806. }
  2807. if (drcf.loop_type) {
  2808. // calculate reg masks for loop pinning
  2809. m4 |= ops[i].source & ~m3;
  2810. m3 |= ops[i].dest;
  2811. // detect loop type, and store poll/delay register
  2812. if (op_flags[i] & OF_POLL_INSN) {
  2813. op = OF_POLL_LOOP;
  2814. m1 |= ops[i].dest; // loop poll/delay regs
  2815. } else if (op_flags[i] & OF_DELAY_INSN) {
  2816. op = OF_DELAY_LOOP;
  2817. m1 |= ops[i].dest;
  2818. } else if (ops[i].op != OP_LOAD_POOL && ops[i].op != OP_LOAD_CONST
  2819. && (ops[i].op != OP_MOVE || op != OF_POLL_LOOP)) {
  2820. // not (MOV @(PC) or MOV # or (MOV reg and poll)), condition f
  2821. m2 |= ops[i].dest; // regs modified by other insns
  2822. }
  2823. // branch detector
  2824. if (OP_ISBRAIMM(ops[i].op)) {
  2825. if (ops[i].imm == base_pc + 2*v)
  2826. drcf.pending_branch_direct = 1; // backward branch detected
  2827. else
  2828. op_flags[v] &= ~OF_BASIC_LOOP; // no basic loop
  2829. }
  2830. if (OP_ISBRACND(ops[i].op))
  2831. drcf.pending_branch_indirect = 1; // conditions g,h - cond.branch
  2832. // poll/idle loops terminate with their backwards branch to the loop start
  2833. if (drcf.pending_branch_direct && !(op_flags[i+1] & OF_DELAY_OP)) {
  2834. m2 &= ~(m1 | BITMASK3(SHR_PC, SHR_SR, SHR_T)); // conditions d,e + g,h
  2835. if (m2 || ((op == OF_IDLE_LOOP) == (drcf.pending_branch_indirect)))
  2836. op = 0; // conditions not met
  2837. op_flags[v] = (op_flags[v] & ~OF_LOOP) | op; // set loop type
  2838. drcf.loop_type = 0;
  2839. #if LOOP_OPTIMIZER
  2840. if (op_flags[v] & OF_BASIC_LOOP) {
  2841. m3 &= ~rcache_regs_static & ~BITMASK5(SHR_PC, SHR_PR, SHR_SR, SHR_T, SHR_MEM);
  2842. if (m3 && count_bits(m3) < count_bits(rcache_vregs_reg) &&
  2843. pinned_loop_count < ARRAY_SIZE(pinned_loops)-1) {
  2844. pinned_loops[pinned_loop_count++] =
  2845. (struct linkage) { .pc = base_pc + 2*v, .mask = m3 };
  2846. } else
  2847. op_flags[v] &= ~OF_BASIC_LOOP;
  2848. }
  2849. #endif
  2850. }
  2851. }
  2852. #endif
  2853. }
  2854. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2, branch_target_count);
  2855. #if (DRC_DEBUG & 4)
  2856. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  2857. #endif
  2858. block = dr_add_block(branch_target_count, base_pc, end_pc - base_pc,
  2859. base_literals, end_literals-base_literals, crc, sh2->is_slave, &blkid_main);
  2860. if (block == NULL)
  2861. return NULL;
  2862. block_entry_ptr = tcache_ptr;
  2863. dbg(2, "== %csh2 block #%d,%d %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2864. tcache_id, blkid_main, base_pc, end_pc, base_literals, end_literals, block_entry_ptr);
  2865. // clear stale state after compile errors
  2866. rcache_invalidate();
  2867. emith_invalidate_t();
  2868. drcf = (struct drcf) { 0 };
  2869. #if LOOP_OPTIMIZER
  2870. pinned_loops[pinned_loop_count].pc = -1;
  2871. pinned_loop_count = 0;
  2872. #endif
  2873. // -------------------------------------------------
  2874. // 3rd pass: actual compilation
  2875. pc = base_pc;
  2876. cycles = 0;
  2877. for (i = 0; pc < end_pc; i++)
  2878. {
  2879. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  2880. int tmp3, tmp4;
  2881. int sr;
  2882. if (op_flags[i] & OF_BTARGET)
  2883. {
  2884. if (pc != base_pc)
  2885. {
  2886. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2887. FLUSH_CYCLES(sr);
  2888. emith_sync_t(sr);
  2889. drcf.Mflag = FLG_UNKNOWN;
  2890. rcache_flush();
  2891. emith_flush();
  2892. }
  2893. // make block entry
  2894. v = block->entry_count;
  2895. entry = &block->entryp[v];
  2896. if (v < branch_target_count)
  2897. {
  2898. entry = &block->entryp[v];
  2899. entry->pc = pc;
  2900. entry->tcache_ptr = tcache_ptr;
  2901. entry->links = entry->o_links = NULL;
  2902. #if (DRC_DEBUG & 2)
  2903. entry->block = block;
  2904. #endif
  2905. block->entry_count++;
  2906. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  2907. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  2908. pc, tcache_ptr);
  2909. }
  2910. else {
  2911. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  2912. tcache_id, blkid_main, pc);
  2913. break;
  2914. }
  2915. v = find_in_sorted_linkage(branch_targets, branch_target_count, pc);
  2916. if (v >= 0)
  2917. branch_targets[v].ptr = tcache_ptr;
  2918. #if LOOP_DETECTION
  2919. drcf.loop_type = op_flags[i] & OF_LOOP;
  2920. drcf.delay_reg = -1;
  2921. drcf.polling = (drcf.loop_type == OF_POLL_LOOP ? MF_POLLING : 0);
  2922. #endif
  2923. rcache_clean();
  2924. #if (DRC_DEBUG & 0x10)
  2925. tmp = rcache_get_tmp_arg(0);
  2926. emith_move_r_imm(tmp, pc);
  2927. tmp = emit_memhandler_read(1);
  2928. tmp2 = rcache_get_tmp();
  2929. tmp3 = rcache_get_tmp();
  2930. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  2931. emith_move_r_imm(tmp3, 0);
  2932. emith_cmp_r_r(tmp, tmp2);
  2933. EMITH_SJMP_START(DCOND_EQ);
  2934. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  2935. EMITH_SJMP_END(DCOND_EQ);
  2936. rcache_free_tmp(tmp);
  2937. rcache_free_tmp(tmp2);
  2938. rcache_free_tmp(tmp3);
  2939. #endif
  2940. // check cycles
  2941. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2942. #if LOOP_OPTIMIZER
  2943. if (op_flags[i] & OF_BASIC_LOOP) {
  2944. if (pinned_loops[pinned_loop_count].pc == pc) {
  2945. // pin needed regs on loop entry
  2946. FOR_ALL_BITS_SET_DO(pinned_loops[pinned_loop_count].mask, v, rcache_pin_reg(v));
  2947. emith_flush();
  2948. // store current PC as loop target
  2949. pinned_loops[pinned_loop_count].ptr = tcache_ptr;
  2950. drcf.pinning = 1;
  2951. } else
  2952. op_flags[i] &= ~OF_BASIC_LOOP;
  2953. }
  2954. if (op_flags[i] & OF_BASIC_LOOP) {
  2955. // if exiting a pinned loop pinned regs must be written back to ctx
  2956. // since they are reloaded in the loop entry code
  2957. emith_cmp_r_imm(sr, 0);
  2958. EMITH_JMP_START(DCOND_GT);
  2959. rcache_save_pinned();
  2960. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  2961. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  2962. blx_targets[blx_target_count++] =
  2963. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  2964. emith_jump_patchable(tcache_ptr);
  2965. } else {
  2966. // blx table full, must inline exit code
  2967. tmp = rcache_get_tmp_arg(0);
  2968. emith_move_r_imm(tmp, pc);
  2969. emith_jump(sh2_drc_exit);
  2970. rcache_free_tmp(tmp);
  2971. }
  2972. EMITH_JMP_END(DCOND_GT);
  2973. } else
  2974. #endif
  2975. {
  2976. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  2977. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  2978. emith_cmp_r_imm(sr, 0);
  2979. blx_targets[blx_target_count++] =
  2980. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  2981. emith_jump_cond_patchable(DCOND_LE, tcache_ptr);
  2982. } else {
  2983. // blx table full, must inline exit code
  2984. tmp = rcache_get_tmp_arg(0);
  2985. emith_cmp_r_imm(sr, 0);
  2986. EMITH_SJMP_START(DCOND_GT);
  2987. emith_move_r_imm_c(DCOND_LE, tmp, pc);
  2988. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  2989. EMITH_SJMP_END(DCOND_GT);
  2990. rcache_free_tmp(tmp);
  2991. }
  2992. }
  2993. #if (DRC_DEBUG & 32)
  2994. // block hit counter
  2995. tmp = rcache_get_tmp_arg(0);
  2996. tmp2 = rcache_get_tmp_arg(1);
  2997. emith_move_r_ptr_imm(tmp, (uptr)entry);
  2998. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  2999. emith_add_r_imm(tmp2, 1);
  3000. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3001. rcache_free_tmp(tmp);
  3002. rcache_free_tmp(tmp2);
  3003. #endif
  3004. #if (DRC_DEBUG & (8|256|512|1024))
  3005. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3006. emith_sync_t(sr);
  3007. rcache_clean();
  3008. tmp = rcache_used_hregs_mask();
  3009. emith_save_caller_regs(tmp);
  3010. emit_do_static_regs(1, 0);
  3011. rcache_get_reg_arg(2, SHR_SR, NULL);
  3012. tmp2 = rcache_get_tmp_arg(0);
  3013. tmp3 = rcache_get_tmp_arg(1);
  3014. tmp4 = rcache_get_tmp();
  3015. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  3016. emith_move_r_r_ptr(tmp3, CONTEXT_REG);
  3017. emith_move_r_imm(tmp4, pc);
  3018. emith_ctx_write(tmp4, SHR_PC * 4);
  3019. rcache_invalidate_tmp();
  3020. emith_call(sh2_drc_log_entry);
  3021. emith_restore_caller_regs(tmp);
  3022. #endif
  3023. do_host_disasm(tcache_id);
  3024. rcache_unlock_all();
  3025. }
  3026. #ifdef DRC_CMP
  3027. if (!(op_flags[i] & OF_DELAY_OP)) {
  3028. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3029. FLUSH_CYCLES(sr);
  3030. emith_sync_t(sr);
  3031. emit_move_r_imm32(SHR_PC, pc);
  3032. rcache_clean();
  3033. tmp = rcache_used_hregs_mask();
  3034. emith_save_caller_regs(tmp);
  3035. emit_do_static_regs(1, 0);
  3036. emith_pass_arg_r(0, CONTEXT_REG);
  3037. emith_call(do_sh2_cmp);
  3038. emith_restore_caller_regs(tmp);
  3039. }
  3040. #endif
  3041. // emit blx area if limits are approached
  3042. if (blx_target_count && (blx_target_count > ARRAY_SIZE(blx_targets)-4 ||
  3043. !emith_jump_patch_inrange(blx_targets[0].ptr, tcache_ptr+0x100))) {
  3044. u8 *jp;
  3045. rcache_invalidate_tmp();
  3046. jp = tcache_ptr;
  3047. emith_jump_patchable(tcache_ptr);
  3048. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  3049. branch_target_count, blx_targets, blx_target_count);
  3050. blx_target_count = 0;
  3051. do_host_disasm(tcache_id);
  3052. emith_jump_patch(jp, tcache_ptr, NULL);
  3053. }
  3054. emith_pool_check();
  3055. opd = &ops[i];
  3056. op = FETCH_OP(pc);
  3057. #if (DRC_DEBUG & 4)
  3058. DasmSH2(sh2dasm_buff, pc, op);
  3059. if (op_flags[i] & OF_BTARGET) {
  3060. if ((op_flags[i] & OF_LOOP) == OF_DELAY_LOOP) tmp3 = '+';
  3061. else if ((op_flags[i] & OF_LOOP) == OF_POLL_LOOP) tmp3 = '=';
  3062. else if ((op_flags[i] & OF_LOOP) == OF_IDLE_LOOP) tmp3 = '~';
  3063. else tmp3 = '*';
  3064. } else if (drcf.loop_type) tmp3 = '.';
  3065. else tmp3 = ' ';
  3066. printf("%c%08x %04x %s\n", tmp3, pc, op, sh2dasm_buff);
  3067. #endif
  3068. pc += 2;
  3069. #if (DRC_DEBUG & 2)
  3070. insns_compiled++;
  3071. #endif
  3072. if (op_flags[i] & OF_DELAY_OP)
  3073. {
  3074. // handle delay slot dependencies
  3075. delay_dep_fw = opd->dest & ops[i-1].source;
  3076. delay_dep_bk = opd->source & ops[i-1].dest;
  3077. if (delay_dep_fw & BITMASK1(SHR_T)) {
  3078. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3079. emith_sync_t(sr);
  3080. DELAY_SAVE_T(sr);
  3081. }
  3082. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  3083. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  3084. // can only be those 2 really..
  3085. elprintf_sh2(sh2, EL_ANOMALY,
  3086. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  3087. }
  3088. // store PC for MOVA/MOV @PC address calculation
  3089. if (opd->imm != 0)
  3090. ; // case OP_BRANCH - addr already resolved in scan_block
  3091. else {
  3092. switch (ops[i-1].op) {
  3093. case OP_BRANCH:
  3094. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  3095. break;
  3096. case OP_BRANCH_CT:
  3097. case OP_BRANCH_CF:
  3098. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3099. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3100. emith_move_r_imm(tmp, pc);
  3101. tmp2 = emith_tst_t(sr, (ops[i-1].op == OP_BRANCH_CT));
  3102. tmp3 = emith_invert_cond(tmp2);
  3103. EMITH_SJMP_START(tmp3);
  3104. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  3105. EMITH_SJMP_END(tmp3);
  3106. break;
  3107. case OP_BRANCH_N: // BT/BF known not to be taken
  3108. // XXX could modify opd->imm instead?
  3109. emit_move_r_imm32(SHR_PC, pc);
  3110. break;
  3111. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  3112. }
  3113. }
  3114. }
  3115. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  3116. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  3117. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  3118. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  3119. }
  3120. // inform cache about future register usage
  3121. u32 late = 0; // regs read by future ops
  3122. u32 write = 0; // regs written to (to detect write before read)
  3123. u32 soon = 0; // regs read soon
  3124. for (v = 1; v <= 9; v++) {
  3125. // no sense in looking any further than the next rcache flush
  3126. tmp = ((op_flags[i+v] & OF_BTARGET) || (op_flags[i+v-1] & OF_DELAY_OP) ||
  3127. (OP_ISBRACND(opd[v-1].op) && !(op_flags[i+v] & OF_DELAY_OP)));
  3128. // XXX looking behind cond branch to avoid evicting regs used later?
  3129. if (pc + 2*v <= end_pc && !tmp) { // (pc already incremented above)
  3130. late |= opd[v].source & ~write;
  3131. // ignore source regs after they have been written to
  3132. write |= opd[v].dest;
  3133. // regs needed in the next few instructions
  3134. if (v <= 4)
  3135. soon = late;
  3136. } else
  3137. break;
  3138. }
  3139. rcache_set_usage_now(opd[0].source); // current insn
  3140. rcache_set_usage_soon(soon); // insns 1-4
  3141. rcache_set_usage_late(late & ~soon); // insns 5-9
  3142. rcache_set_usage_discard(write & ~(late|soon));
  3143. if (v <= 9)
  3144. // upcoming rcache_flush, start writing back unused dirty stuff
  3145. rcache_clean_masked(rcache_dirty_mask() & ~(write|opd[0].dest));
  3146. switch (opd->op)
  3147. {
  3148. case OP_BRANCH_N:
  3149. // never taken, just use up cycles
  3150. goto end_op;
  3151. case OP_BRANCH:
  3152. case OP_BRANCH_CT:
  3153. case OP_BRANCH_CF:
  3154. if (opd->dest & BITMASK1(SHR_PR))
  3155. emit_move_r_imm32(SHR_PR, pc + 2);
  3156. drcf.pending_branch_direct = 1;
  3157. goto end_op;
  3158. case OP_BRANCH_R:
  3159. if (opd->dest & BITMASK1(SHR_PR))
  3160. emit_move_r_imm32(SHR_PR, pc + 2);
  3161. emit_move_r_r(SHR_PC, opd->rm);
  3162. drcf.pending_branch_indirect = 1;
  3163. goto end_op;
  3164. case OP_BRANCH_RF:
  3165. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3166. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3167. emith_move_r_imm(tmp, pc + 2);
  3168. if (opd->dest & BITMASK1(SHR_PR)) {
  3169. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  3170. emith_move_r_r(tmp3, tmp);
  3171. }
  3172. emith_add_r_r(tmp, tmp2);
  3173. if (gconst_get(GET_Rn(), &u))
  3174. gconst_set(SHR_PC, pc + 2 + u);
  3175. drcf.pending_branch_indirect = 1;
  3176. goto end_op;
  3177. case OP_SLEEP: // SLEEP 0000000000011011
  3178. printf("TODO sleep\n");
  3179. goto end_op;
  3180. case OP_RTE: // RTE 0000000000101011
  3181. emith_invalidate_t();
  3182. // pop PC
  3183. tmp = emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  3184. rcache_free(tmp);
  3185. // pop SR
  3186. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  3187. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3188. emith_write_sr(sr, tmp);
  3189. rcache_free_tmp(tmp);
  3190. drcf.test_irq = 1;
  3191. drcf.pending_branch_indirect = 1;
  3192. goto end_op;
  3193. case OP_UNDEFINED:
  3194. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  3195. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  3196. // fallthrough
  3197. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  3198. // push SR
  3199. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  3200. emith_sync_t(tmp2);
  3201. emith_clear_msb(tmp, tmp2, 22);
  3202. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3203. // push PC
  3204. if (opd->op == OP_TRAPA) {
  3205. tmp = rcache_get_tmp_arg(1);
  3206. emith_move_r_imm(tmp, pc);
  3207. } else if (drcf.pending_branch_indirect) {
  3208. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  3209. } else {
  3210. tmp = rcache_get_tmp_arg(1);
  3211. emith_move_r_imm(tmp, pc - 2);
  3212. }
  3213. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3214. // obtain new PC
  3215. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  3216. // indirect jump -> back to dispatcher
  3217. drcf.pending_branch_indirect = 1;
  3218. goto end_op;
  3219. case OP_LOAD_POOL:
  3220. #if PROPAGATE_CONSTANTS
  3221. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  3222. dr_is_rom(opd->imm))
  3223. {
  3224. if (opd->size == 2)
  3225. u = FETCH32(opd->imm);
  3226. else
  3227. u = (s16)FETCH_OP(opd->imm);
  3228. // tweak for Blackthorne: avoid stack overwriting
  3229. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  3230. gconst_new(GET_Rn(), u);
  3231. }
  3232. else
  3233. #endif
  3234. {
  3235. if (opd->imm != 0) {
  3236. tmp = rcache_get_tmp_arg(0);
  3237. emith_move_r_imm(tmp, opd->imm);
  3238. } else {
  3239. // have to calculate read addr from PC for delay slot
  3240. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  3241. if (opd->size == 2) {
  3242. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3243. emith_bic_r_imm(tmp, 3);
  3244. }
  3245. else
  3246. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  3247. }
  3248. tmp2 = emit_memhandler_read(opd->size);
  3249. #if REMAP_REGISTER
  3250. tmp3 = rcache_map_reg(GET_Rn(), tmp2);
  3251. #else
  3252. tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3253. #endif
  3254. if (tmp3 != tmp2) {
  3255. emith_move_r_r(tmp3, tmp2);
  3256. rcache_free_tmp(tmp2);
  3257. }
  3258. }
  3259. goto end_op;
  3260. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  3261. if (opd->imm != 0)
  3262. emit_move_r_imm32(SHR_R0, opd->imm);
  3263. else {
  3264. // have to calculate addr from PC for delay slot
  3265. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  3266. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  3267. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3268. emith_bic_r_imm(tmp, 3);
  3269. }
  3270. goto end_op;
  3271. }
  3272. switch ((op >> 12) & 0x0f)
  3273. {
  3274. /////////////////////////////////////////////
  3275. case 0x00:
  3276. switch (op & 0x0f)
  3277. {
  3278. case 0x02:
  3279. switch (GET_Fx())
  3280. {
  3281. case 0: // STC SR,Rn 0000nnnn00000010
  3282. tmp2 = SHR_SR;
  3283. break;
  3284. case 1: // STC GBR,Rn 0000nnnn00010010
  3285. tmp2 = SHR_GBR;
  3286. break;
  3287. case 2: // STC VBR,Rn 0000nnnn00100010
  3288. tmp2 = SHR_VBR;
  3289. break;
  3290. default:
  3291. goto default_;
  3292. }
  3293. if (tmp2 == SHR_SR) {
  3294. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3295. emith_sync_t(sr);
  3296. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3297. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  3298. } else
  3299. emit_move_r_r(GET_Rn(), tmp2);
  3300. goto end_op;
  3301. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  3302. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  3303. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  3304. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  3305. goto end_op;
  3306. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  3307. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3308. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3309. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3310. emith_mul(tmp3, tmp2, tmp);
  3311. goto end_op;
  3312. case 0x08:
  3313. switch (GET_Fx())
  3314. {
  3315. case 0: // CLRT 0000000000001000
  3316. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3317. #if T_OPTIMIZER
  3318. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3319. #endif
  3320. emith_set_t(sr, 0);
  3321. break;
  3322. case 1: // SETT 0000000000011000
  3323. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3324. #if T_OPTIMIZER
  3325. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3326. #endif
  3327. emith_set_t(sr, 1);
  3328. break;
  3329. case 2: // CLRMAC 0000000000101000
  3330. emit_move_r_imm32(SHR_MACL, 0);
  3331. emit_move_r_imm32(SHR_MACH, 0);
  3332. break;
  3333. default:
  3334. goto default_;
  3335. }
  3336. goto end_op;
  3337. case 0x09:
  3338. switch (GET_Fx())
  3339. {
  3340. case 0: // NOP 0000000000001001
  3341. break;
  3342. case 1: // DIV0U 0000000000011001
  3343. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3344. emith_invalidate_t();
  3345. emith_bic_r_imm(sr, M|Q|T);
  3346. drcf.Mflag = FLG_0;
  3347. break;
  3348. case 2: // MOVT Rn 0000nnnn00101001
  3349. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3350. emith_sync_t(sr);
  3351. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3352. emith_clear_msb(tmp2, sr, 31);
  3353. break;
  3354. default:
  3355. goto default_;
  3356. }
  3357. goto end_op;
  3358. case 0x0a:
  3359. switch (GET_Fx())
  3360. {
  3361. case 0: // STS MACH,Rn 0000nnnn00001010
  3362. tmp2 = SHR_MACH;
  3363. break;
  3364. case 1: // STS MACL,Rn 0000nnnn00011010
  3365. tmp2 = SHR_MACL;
  3366. break;
  3367. case 2: // STS PR,Rn 0000nnnn00101010
  3368. tmp2 = SHR_PR;
  3369. break;
  3370. default:
  3371. goto default_;
  3372. }
  3373. emit_move_r_r(GET_Rn(), tmp2);
  3374. goto end_op;
  3375. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  3376. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  3377. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  3378. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), (op & 3) | drcf.polling);
  3379. goto end_op;
  3380. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  3381. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  3382. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3383. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3384. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3385. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  3386. rcache_free_tmp(tmp2);
  3387. rcache_free_tmp(tmp);
  3388. goto end_op;
  3389. }
  3390. goto default_;
  3391. /////////////////////////////////////////////
  3392. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  3393. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  3394. goto end_op;
  3395. case 0x02:
  3396. switch (op & 0x0f)
  3397. {
  3398. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  3399. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  3400. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  3401. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  3402. goto end_op;
  3403. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  3404. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  3405. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  3406. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  3407. goto end_op;
  3408. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  3409. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3410. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3411. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3412. tmp = rcache_get_tmp();
  3413. emith_invalidate_t();
  3414. emith_bic_r_imm(sr, M|Q|T);
  3415. emith_lsr(tmp, tmp2, 31); // Q = Nn
  3416. emith_or_r_r_lsl(sr, tmp, Q_SHIFT);
  3417. emith_lsr(tmp, tmp3, 31); // M = Nm
  3418. emith_or_r_r_lsl(sr, tmp, M_SHIFT);
  3419. emith_eor_r_r_lsr(tmp, tmp2, 31);
  3420. emith_or_r_r(sr, tmp); // T = Q^M
  3421. rcache_free(tmp);
  3422. drcf.Mflag = FLG_UNKNOWN;
  3423. goto end_op;
  3424. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  3425. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3426. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3427. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3428. emith_clr_t_cond(sr);
  3429. emith_tst_r_r(tmp2, tmp3);
  3430. emith_set_t_cond(sr, DCOND_EQ);
  3431. goto end_op;
  3432. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  3433. if (GET_Rm() != GET_Rn()) {
  3434. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3435. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3436. emith_and_r_r_r(tmp, tmp3, tmp2);
  3437. }
  3438. goto end_op;
  3439. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  3440. #if PROPAGATE_CONSTANTS
  3441. if (GET_Rn() == GET_Rm()) {
  3442. gconst_new(GET_Rn(), 0);
  3443. goto end_op;
  3444. }
  3445. #endif
  3446. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3447. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3448. emith_eor_r_r_r(tmp, tmp3, tmp2);
  3449. goto end_op;
  3450. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  3451. if (GET_Rm() != GET_Rn()) {
  3452. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3453. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3454. emith_or_r_r_r(tmp, tmp3, tmp2);
  3455. }
  3456. goto end_op;
  3457. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  3458. tmp = rcache_get_tmp();
  3459. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3460. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3461. emith_eor_r_r_r(tmp, tmp2, tmp3);
  3462. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3463. emith_clr_t_cond(sr);
  3464. emith_tst_r_imm(tmp, 0x000000ff);
  3465. EMITH_SJMP_START(DCOND_EQ);
  3466. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  3467. EMITH_SJMP_START(DCOND_EQ);
  3468. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  3469. EMITH_SJMP_START(DCOND_EQ);
  3470. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  3471. EMITH_SJMP_END(DCOND_EQ);
  3472. EMITH_SJMP_END(DCOND_EQ);
  3473. EMITH_SJMP_END(DCOND_EQ);
  3474. emith_set_t_cond(sr, DCOND_EQ);
  3475. rcache_free_tmp(tmp);
  3476. goto end_op;
  3477. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  3478. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3479. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3480. emith_lsr(tmp, tmp3, 16);
  3481. emith_or_r_r_lsl(tmp, tmp2, 16);
  3482. goto end_op;
  3483. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  3484. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  3485. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3486. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3487. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3488. tmp4 = tmp3;
  3489. if (op & 1) {
  3490. if (! rcache_is_s16(tmp2)) {
  3491. emith_sext(tmp, tmp2, 16);
  3492. tmp2 = tmp;
  3493. }
  3494. if (! rcache_is_s16(tmp3)) {
  3495. tmp4 = rcache_get_tmp();
  3496. emith_sext(tmp4, tmp3, 16);
  3497. }
  3498. } else {
  3499. if (! rcache_is_u16(tmp2)) {
  3500. emith_clear_msb(tmp, tmp2, 16);
  3501. tmp2 = tmp;
  3502. }
  3503. if (! rcache_is_u16(tmp3)) {
  3504. tmp4 = rcache_get_tmp();
  3505. emith_clear_msb(tmp4, tmp3, 16);
  3506. }
  3507. }
  3508. emith_mul(tmp, tmp2, tmp4);
  3509. if (tmp4 != tmp3)
  3510. rcache_free_tmp(tmp4);
  3511. goto end_op;
  3512. }
  3513. goto default_;
  3514. /////////////////////////////////////////////
  3515. case 0x03:
  3516. switch (op & 0x0f)
  3517. {
  3518. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  3519. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  3520. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  3521. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  3522. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  3523. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3524. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3525. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3526. switch (op & 0x07)
  3527. {
  3528. case 0x00: // CMP/EQ
  3529. tmp = DCOND_EQ;
  3530. break;
  3531. case 0x02: // CMP/HS
  3532. tmp = DCOND_HS;
  3533. break;
  3534. case 0x03: // CMP/GE
  3535. tmp = DCOND_GE;
  3536. break;
  3537. case 0x06: // CMP/HI
  3538. tmp = DCOND_HI;
  3539. break;
  3540. case 0x07: // CMP/GT
  3541. tmp = DCOND_GT;
  3542. break;
  3543. }
  3544. emith_clr_t_cond(sr);
  3545. emith_cmp_r_r(tmp2, tmp3);
  3546. emith_set_t_cond(sr, tmp);
  3547. goto end_op;
  3548. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  3549. // Q1 = carry(Rn = (Rn << 1) | T)
  3550. // if Q ^ M
  3551. // Q2 = carry(Rn += Rm)
  3552. // else
  3553. // Q2 = carry(Rn -= Rm)
  3554. // Q = M ^ Q1 ^ Q2
  3555. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  3556. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3557. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3558. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3559. emith_sync_t(sr);
  3560. tmp = rcache_get_tmp();
  3561. if (drcf.Mflag != FLG_0) {
  3562. emith_and_r_r_imm(tmp, sr, M);
  3563. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT); // Q ^= M
  3564. }
  3565. rcache_free_tmp(tmp);
  3566. // shift Rn, add T, add or sub Rm, set T = !(Q1 ^ Q2)
  3567. // in: (Q ^ M) passed in Q
  3568. emith_sh2_div1_step(tmp2, tmp3, sr);
  3569. tmp = rcache_get_tmp();
  3570. emith_or_r_imm(sr, Q); // Q = !T
  3571. emith_and_r_r_imm(tmp, sr, T);
  3572. emith_eor_r_r_lsl(sr, tmp, Q_SHIFT);
  3573. if (drcf.Mflag != FLG_0) { // Q = M ^ !T = M ^ Q1 ^ Q2
  3574. emith_and_r_r_imm(tmp, sr, M);
  3575. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT);
  3576. }
  3577. rcache_free_tmp(tmp);
  3578. goto end_op;
  3579. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  3580. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3581. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3582. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3583. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3584. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  3585. goto end_op;
  3586. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  3587. #if PROPAGATE_CONSTANTS
  3588. if (GET_Rn() == GET_Rm()) {
  3589. gconst_new(GET_Rn(), 0);
  3590. goto end_op;
  3591. }
  3592. #endif
  3593. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  3594. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3595. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3596. if (op & 4) {
  3597. emith_add_r_r_r(tmp, tmp3, tmp2);
  3598. } else
  3599. emith_sub_r_r_r(tmp, tmp3, tmp2);
  3600. goto end_op;
  3601. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  3602. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  3603. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3604. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3605. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3606. emith_sync_t(sr);
  3607. #if T_OPTIMIZER
  3608. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3609. if (op & 4) {
  3610. emith_t_to_carry(sr, 0);
  3611. emith_adc_r_r_r(tmp, tmp3, tmp2);
  3612. } else {
  3613. emith_t_to_carry(sr, 1);
  3614. emith_sbc_r_r_r(tmp, tmp3, tmp2);
  3615. }
  3616. } else
  3617. #endif
  3618. {
  3619. EMITH_HINT_COND(DCOND_CS);
  3620. if (op & 4) { // adc
  3621. emith_tpop_carry(sr, 0);
  3622. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  3623. emith_tpush_carry(sr, 0);
  3624. } else {
  3625. emith_tpop_carry(sr, 1);
  3626. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  3627. emith_tpush_carry(sr, 1);
  3628. }
  3629. }
  3630. goto end_op;
  3631. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  3632. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  3633. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3634. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3635. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3636. #if T_OPTIMIZER
  3637. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3638. if (op & 4)
  3639. emith_add_r_r_r(tmp,tmp3,tmp2);
  3640. else
  3641. emith_sub_r_r_r(tmp,tmp3,tmp2);
  3642. } else
  3643. #endif
  3644. {
  3645. emith_clr_t_cond(sr);
  3646. EMITH_HINT_COND(DCOND_VS);
  3647. if (op & 4)
  3648. emith_addf_r_r_r(tmp, tmp3, tmp2);
  3649. else
  3650. emith_subf_r_r_r(tmp, tmp3, tmp2);
  3651. emith_set_t_cond(sr, DCOND_VS);
  3652. }
  3653. goto end_op;
  3654. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  3655. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3656. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3657. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3658. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3659. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  3660. goto end_op;
  3661. }
  3662. goto default_;
  3663. /////////////////////////////////////////////
  3664. case 0x04:
  3665. switch (op & 0x0f)
  3666. {
  3667. case 0x00:
  3668. switch (GET_Fx())
  3669. {
  3670. case 0: // SHLL Rn 0100nnnn00000000
  3671. case 2: // SHAL Rn 0100nnnn00100000
  3672. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3673. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3674. #if T_OPTIMIZER
  3675. if (rcache_regs_discard & BITMASK1(SHR_T))
  3676. emith_lsl(tmp, tmp2, 1);
  3677. else
  3678. #endif
  3679. {
  3680. emith_invalidate_t();
  3681. emith_lslf(tmp, tmp2, 1);
  3682. emith_carry_to_t(sr, 0);
  3683. }
  3684. goto end_op;
  3685. case 1: // DT Rn 0100nnnn00010000
  3686. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3687. #if LOOP_DETECTION
  3688. if (drcf.loop_type == OF_DELAY_LOOP) {
  3689. if (drcf.delay_reg == -1)
  3690. drcf.delay_reg = GET_Rn();
  3691. else
  3692. drcf.polling = drcf.loop_type = 0;
  3693. }
  3694. #endif
  3695. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3696. emith_clr_t_cond(sr);
  3697. EMITH_HINT_COND(DCOND_EQ);
  3698. emith_subf_r_r_imm(tmp, tmp2, 1);
  3699. emith_set_t_cond(sr, DCOND_EQ);
  3700. goto end_op;
  3701. }
  3702. goto default_;
  3703. case 0x01:
  3704. switch (GET_Fx())
  3705. {
  3706. case 0: // SHLR Rn 0100nnnn00000001
  3707. case 2: // SHAR Rn 0100nnnn00100001
  3708. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3709. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3710. #if T_OPTIMIZER
  3711. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3712. if (op & 0x20)
  3713. emith_asr(tmp,tmp2,1);
  3714. else
  3715. emith_lsr(tmp,tmp2,1);
  3716. } else
  3717. #endif
  3718. {
  3719. emith_invalidate_t();
  3720. if (op & 0x20) {
  3721. emith_asrf(tmp, tmp2, 1);
  3722. } else
  3723. emith_lsrf(tmp, tmp2, 1);
  3724. emith_carry_to_t(sr, 0);
  3725. }
  3726. goto end_op;
  3727. case 1: // CMP/PZ Rn 0100nnnn00010001
  3728. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3729. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3730. emith_clr_t_cond(sr);
  3731. emith_cmp_r_imm(tmp, 0);
  3732. emith_set_t_cond(sr, DCOND_GE);
  3733. goto end_op;
  3734. }
  3735. goto default_;
  3736. case 0x02:
  3737. case 0x03:
  3738. switch (op & 0x3f)
  3739. {
  3740. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  3741. tmp = SHR_MACH;
  3742. break;
  3743. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  3744. tmp = SHR_MACL;
  3745. break;
  3746. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  3747. tmp = SHR_PR;
  3748. break;
  3749. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  3750. tmp = SHR_SR;
  3751. break;
  3752. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  3753. tmp = SHR_GBR;
  3754. break;
  3755. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  3756. tmp = SHR_VBR;
  3757. break;
  3758. default:
  3759. goto default_;
  3760. }
  3761. if (tmp == SHR_SR) {
  3762. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  3763. emith_sync_t(tmp4);
  3764. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  3765. } else
  3766. tmp3 = rcache_get_reg_arg(1, tmp, NULL);
  3767. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  3768. goto end_op;
  3769. case 0x04:
  3770. case 0x05:
  3771. switch (op & 0x3f)
  3772. {
  3773. case 0x04: // ROTL Rn 0100nnnn00000100
  3774. case 0x05: // ROTR Rn 0100nnnn00000101
  3775. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3776. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3777. #if T_OPTIMIZER
  3778. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3779. if (op & 1)
  3780. emith_ror(tmp, tmp2, 1);
  3781. else
  3782. emith_rol(tmp, tmp2, 1);
  3783. } else
  3784. #endif
  3785. {
  3786. emith_invalidate_t();
  3787. if (op & 1)
  3788. emith_rorf(tmp, tmp2, 1);
  3789. else
  3790. emith_rolf(tmp, tmp2, 1);
  3791. emith_carry_to_t(sr, 0);
  3792. }
  3793. goto end_op;
  3794. case 0x24: // ROTCL Rn 0100nnnn00100100
  3795. case 0x25: // ROTCR Rn 0100nnnn00100101
  3796. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3797. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3798. emith_sync_t(sr);
  3799. #if T_OPTIMIZER
  3800. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3801. emith_t_to_carry(sr, 0);
  3802. if (op & 1)
  3803. emith_rorc(tmp);
  3804. else
  3805. emith_rolc(tmp);
  3806. } else
  3807. #endif
  3808. {
  3809. emith_tpop_carry(sr, 0);
  3810. if (op & 1)
  3811. emith_rorcf(tmp);
  3812. else
  3813. emith_rolcf(tmp);
  3814. emith_tpush_carry(sr, 0);
  3815. }
  3816. goto end_op;
  3817. case 0x15: // CMP/PL Rn 0100nnnn00010101
  3818. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3819. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3820. emith_clr_t_cond(sr);
  3821. emith_cmp_r_imm(tmp, 0);
  3822. emith_set_t_cond(sr, DCOND_GT);
  3823. goto end_op;
  3824. }
  3825. goto default_;
  3826. case 0x06:
  3827. case 0x07:
  3828. switch (op & 0x3f)
  3829. {
  3830. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  3831. tmp = SHR_MACH;
  3832. break;
  3833. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  3834. tmp = SHR_MACL;
  3835. break;
  3836. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  3837. tmp = SHR_PR;
  3838. break;
  3839. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  3840. tmp = SHR_SR;
  3841. break;
  3842. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  3843. tmp = SHR_GBR;
  3844. break;
  3845. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  3846. tmp = SHR_VBR;
  3847. break;
  3848. default:
  3849. goto default_;
  3850. }
  3851. if (tmp == SHR_SR) {
  3852. emith_invalidate_t();
  3853. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  3854. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3855. emith_write_sr(sr, tmp2);
  3856. rcache_free_tmp(tmp2);
  3857. drcf.test_irq = 1;
  3858. } else
  3859. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  3860. goto end_op;
  3861. case 0x08:
  3862. case 0x09:
  3863. switch (GET_Fx())
  3864. {
  3865. case 0: // SHLL2 Rn 0100nnnn00001000
  3866. // SHLR2 Rn 0100nnnn00001001
  3867. tmp = 2;
  3868. break;
  3869. case 1: // SHLL8 Rn 0100nnnn00011000
  3870. // SHLR8 Rn 0100nnnn00011001
  3871. tmp = 8;
  3872. break;
  3873. case 2: // SHLL16 Rn 0100nnnn00101000
  3874. // SHLR16 Rn 0100nnnn00101001
  3875. tmp = 16;
  3876. break;
  3877. default:
  3878. goto default_;
  3879. }
  3880. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3881. if (op & 1) {
  3882. emith_lsr(tmp2, tmp3, tmp);
  3883. } else
  3884. emith_lsl(tmp2, tmp3, tmp);
  3885. goto end_op;
  3886. case 0x0a:
  3887. switch (GET_Fx())
  3888. {
  3889. case 0: // LDS Rm,MACH 0100mmmm00001010
  3890. tmp2 = SHR_MACH;
  3891. break;
  3892. case 1: // LDS Rm,MACL 0100mmmm00011010
  3893. tmp2 = SHR_MACL;
  3894. break;
  3895. case 2: // LDS Rm,PR 0100mmmm00101010
  3896. tmp2 = SHR_PR;
  3897. break;
  3898. default:
  3899. goto default_;
  3900. }
  3901. emit_move_r_r(tmp2, GET_Rn());
  3902. goto end_op;
  3903. case 0x0b:
  3904. switch (GET_Fx())
  3905. {
  3906. case 1: // TAS.B @Rn 0100nnnn00011011
  3907. // XXX: is TAS working on 32X?
  3908. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3909. tmp = emit_memhandler_read(0);
  3910. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3911. emith_clr_t_cond(sr);
  3912. emith_cmp_r_imm(tmp, 0);
  3913. emith_set_t_cond(sr, DCOND_EQ);
  3914. emith_or_r_imm(tmp, 0x80);
  3915. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  3916. emith_move_r_r(tmp2, tmp);
  3917. rcache_free_tmp(tmp);
  3918. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3919. emit_memhandler_write(0);
  3920. break;
  3921. default:
  3922. goto default_;
  3923. }
  3924. goto end_op;
  3925. case 0x0e:
  3926. switch (GET_Fx())
  3927. {
  3928. case 0: // LDC Rm,SR 0100mmmm00001110
  3929. tmp2 = SHR_SR;
  3930. break;
  3931. case 1: // LDC Rm,GBR 0100mmmm00011110
  3932. tmp2 = SHR_GBR;
  3933. break;
  3934. case 2: // LDC Rm,VBR 0100mmmm00101110
  3935. tmp2 = SHR_VBR;
  3936. break;
  3937. default:
  3938. goto default_;
  3939. }
  3940. if (tmp2 == SHR_SR) {
  3941. emith_invalidate_t();
  3942. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3943. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3944. emith_write_sr(sr, tmp);
  3945. drcf.test_irq = 1;
  3946. } else
  3947. emit_move_r_r(tmp2, GET_Rn());
  3948. goto end_op;
  3949. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  3950. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  3951. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3952. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3953. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3954. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  3955. rcache_free_tmp(tmp2);
  3956. rcache_free_tmp(tmp);
  3957. goto end_op;
  3958. }
  3959. goto default_;
  3960. /////////////////////////////////////////////
  3961. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  3962. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2 | drcf.polling);
  3963. goto end_op;
  3964. /////////////////////////////////////////////
  3965. case 0x06:
  3966. switch (op & 0x0f)
  3967. {
  3968. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  3969. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  3970. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  3971. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  3972. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  3973. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  3974. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : drcf.polling;
  3975. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  3976. goto end_op;
  3977. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  3978. emit_move_r_r(GET_Rn(), GET_Rm());
  3979. goto end_op;
  3980. case 0x07 ... 0x0f:
  3981. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3982. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3983. switch (op & 0x0f)
  3984. {
  3985. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  3986. emith_mvn_r_r(tmp2, tmp);
  3987. break;
  3988. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  3989. tmp3 = tmp2;
  3990. if (tmp == tmp2)
  3991. tmp3 = rcache_get_tmp();
  3992. tmp4 = rcache_get_tmp();
  3993. emith_lsr(tmp3, tmp, 16);
  3994. emith_or_r_r_lsl(tmp3, tmp, 24);
  3995. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  3996. emith_or_r_r_lsl(tmp3, tmp4, 8);
  3997. emith_rol(tmp2, tmp3, 16);
  3998. rcache_free_tmp(tmp4);
  3999. if (tmp == tmp2)
  4000. rcache_free_tmp(tmp3);
  4001. break;
  4002. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  4003. emith_rol(tmp2, tmp, 16);
  4004. break;
  4005. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  4006. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4007. emith_sync_t(sr);
  4008. #if T_OPTIMIZER
  4009. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4010. emith_t_to_carry(sr, 1);
  4011. emith_negc_r_r(tmp2, tmp);
  4012. } else
  4013. #endif
  4014. {
  4015. EMITH_HINT_COND(DCOND_CS);
  4016. emith_tpop_carry(sr, 1);
  4017. emith_negcf_r_r(tmp2, tmp);
  4018. emith_tpush_carry(sr, 1);
  4019. }
  4020. break;
  4021. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  4022. emith_neg_r_r(tmp2, tmp);
  4023. break;
  4024. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  4025. emith_clear_msb(tmp2, tmp, 24);
  4026. rcache_set_x16(tmp2, 1, 1);
  4027. break;
  4028. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  4029. emith_clear_msb(tmp2, tmp, 16);
  4030. rcache_set_x16(tmp2, 0, 1);
  4031. break;
  4032. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  4033. emith_sext(tmp2, tmp, 8);
  4034. rcache_set_x16(tmp2, 1, 0);
  4035. break;
  4036. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  4037. emith_sext(tmp2, tmp, 16);
  4038. rcache_set_x16(tmp2, 1, 0);
  4039. break;
  4040. }
  4041. goto end_op;
  4042. }
  4043. goto default_;
  4044. /////////////////////////////////////////////
  4045. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  4046. if (op & 0x80) // adding negative
  4047. emit_sub_r_imm(GET_Rn(), (u8)-op);
  4048. else
  4049. emit_add_r_imm(GET_Rn(), (u8)op);
  4050. goto end_op;
  4051. /////////////////////////////////////////////
  4052. case 0x08:
  4053. switch (op & 0x0f00)
  4054. {
  4055. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  4056. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  4057. tmp = (op & 0x100) >> 8;
  4058. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  4059. goto end_op;
  4060. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  4061. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  4062. tmp = (op & 0x100) >> 8;
  4063. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp | drcf.polling);
  4064. goto end_op;
  4065. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  4066. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4067. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4068. emith_clr_t_cond(sr);
  4069. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  4070. emith_set_t_cond(sr, DCOND_EQ);
  4071. goto end_op;
  4072. }
  4073. goto default_;
  4074. /////////////////////////////////////////////
  4075. case 0x0c:
  4076. switch (op & 0x0f00)
  4077. {
  4078. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  4079. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  4080. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  4081. tmp = (op & 0x300) >> 8;
  4082. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  4083. goto end_op;
  4084. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  4085. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  4086. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  4087. tmp = (op & 0x300) >> 8;
  4088. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp | drcf.polling);
  4089. goto end_op;
  4090. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  4091. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4092. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4093. emith_clr_t_cond(sr);
  4094. emith_tst_r_imm(tmp, op & 0xff);
  4095. emith_set_t_cond(sr, DCOND_EQ);
  4096. goto end_op;
  4097. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  4098. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4099. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  4100. goto end_op;
  4101. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  4102. if (op & 0xff) {
  4103. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4104. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  4105. }
  4106. goto end_op;
  4107. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  4108. if (op & 0xff) {
  4109. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4110. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  4111. }
  4112. goto end_op;
  4113. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  4114. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0 | drcf.polling);
  4115. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4116. emith_clr_t_cond(sr);
  4117. emith_tst_r_imm(tmp, op & 0xff);
  4118. emith_set_t_cond(sr, DCOND_EQ);
  4119. rcache_free_tmp(tmp);
  4120. goto end_op;
  4121. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  4122. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4123. tmp2 = rcache_get_tmp_arg(1);
  4124. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  4125. goto end_rmw_op;
  4126. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  4127. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4128. tmp2 = rcache_get_tmp_arg(1);
  4129. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  4130. goto end_rmw_op;
  4131. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  4132. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4133. tmp2 = rcache_get_tmp_arg(1);
  4134. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  4135. end_rmw_op:
  4136. rcache_free_tmp(tmp);
  4137. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4138. goto end_op;
  4139. }
  4140. goto default_;
  4141. /////////////////////////////////////////////
  4142. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  4143. emit_move_r_imm32(GET_Rn(), (s8)op);
  4144. goto end_op;
  4145. default:
  4146. default_:
  4147. if (!(op_flags[i] & OF_B_IN_DS)) {
  4148. elprintf_sh2(sh2, EL_ANOMALY,
  4149. "drc: illegal op %04x @ %08x", op, pc - 2);
  4150. exit(1);
  4151. }
  4152. }
  4153. end_op:
  4154. rcache_unlock_all();
  4155. rcache_set_usage_now(0);
  4156. #if DRC_DEBUG & 64
  4157. RCACHE_CHECK("after insn");
  4158. #endif
  4159. cycles += opd->cycles;
  4160. if (op_flags[i+1] & OF_DELAY_OP) {
  4161. do_host_disasm(tcache_id);
  4162. continue;
  4163. }
  4164. // test irq?
  4165. if (drcf.test_irq && !drcf.pending_branch_direct) {
  4166. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4167. FLUSH_CYCLES(sr);
  4168. emith_sync_t(sr);
  4169. if (!drcf.pending_branch_indirect)
  4170. emit_move_r_imm32(SHR_PC, pc);
  4171. rcache_flush();
  4172. emith_call(sh2_drc_test_irq);
  4173. drcf.test_irq = 0;
  4174. }
  4175. // branch handling
  4176. if (drcf.pending_branch_direct)
  4177. {
  4178. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4179. u32 target_pc = opd_b->imm;
  4180. int cond = -1;
  4181. int ctaken = 0;
  4182. void *target = NULL;
  4183. if (OP_ISBRACND(opd_b->op))
  4184. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  4185. cycles += ctaken; // assume branch taken
  4186. #if LOOP_OPTIMIZER
  4187. if ((drcf.loop_type == OF_IDLE_LOOP ||
  4188. (drcf.loop_type == OF_DELAY_LOOP && drcf.delay_reg >= 0)))
  4189. {
  4190. // idle or delay loop
  4191. emit_sync_t_to_sr();
  4192. emith_sh2_delay_loop(cycles, drcf.delay_reg);
  4193. rcache_unlock_all(); // may lock delay_reg
  4194. drcf.polling = drcf.loop_type = drcf.pinning = 0;
  4195. }
  4196. #endif
  4197. #if CALL_STACK
  4198. void *rtsadd = NULL, *rtsret = NULL;
  4199. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4200. // BSR - save rts data
  4201. tmp = rcache_get_tmp_arg(1);
  4202. rtsadd = tcache_ptr;
  4203. emith_move_r_imm_s8_patchable(tmp, 0);
  4204. rcache_clean_tmp();
  4205. rcache_invalidate_tmp();
  4206. emith_call(sh2_drc_dispatcher_call);
  4207. rtsret = tcache_ptr;
  4208. }
  4209. #endif
  4210. // XXX move below cond test if not changing host cond (MIPS delay slot)?
  4211. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4212. FLUSH_CYCLES(sr);
  4213. rcache_clean();
  4214. if (OP_ISBRACND(opd_b->op)) {
  4215. // BT[S], BF[S] - emit condition test
  4216. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  4217. if (delay_dep_fw & BITMASK1(SHR_T)) {
  4218. emith_sync_t(sr);
  4219. emith_tst_r_imm(sr, T_save);
  4220. } else {
  4221. cond = emith_tst_t(sr, (opd_b->op == OP_BRANCH_CT));
  4222. if (emith_get_t_cond() >= 0) {
  4223. if (opd_b->op == OP_BRANCH_CT)
  4224. emith_or_r_imm_c(cond, sr, T);
  4225. else
  4226. emith_bic_r_imm_c(cond, sr, T);
  4227. }
  4228. }
  4229. } else
  4230. emith_sync_t(sr);
  4231. // no modification of host status/flags between here and branching!
  4232. v = find_in_sorted_linkage(branch_targets, branch_target_count, target_pc);
  4233. if (v >= 0)
  4234. {
  4235. // local branch
  4236. if (branch_targets[v].ptr) {
  4237. // local backward jump, link here now since host PC is already known
  4238. target = branch_targets[v].ptr;
  4239. #if LOOP_OPTIMIZER
  4240. if (pinned_loops[pinned_loop_count].pc == target_pc) {
  4241. // backward jump at end of optimized loop
  4242. rcache_unpin_all();
  4243. target = pinned_loops[pinned_loop_count].ptr;
  4244. pinned_loop_count ++;
  4245. }
  4246. #endif
  4247. if (cond != -1) {
  4248. if (emith_jump_patch_inrange(tcache_ptr, target)) {
  4249. emith_jump_cond(cond, target);
  4250. } else {
  4251. // not reachable directly, must use far branch
  4252. EMITH_JMP_START(emith_invert_cond(cond));
  4253. emith_jump(target);
  4254. EMITH_JMP_END(emith_invert_cond(cond));
  4255. }
  4256. } else {
  4257. emith_jump(target);
  4258. rcache_invalidate();
  4259. }
  4260. } else if (blx_target_count < MAX_LOCAL_BRANCHES) {
  4261. // local forward jump
  4262. target = tcache_ptr;
  4263. blx_targets[blx_target_count++] =
  4264. (struct linkage) { .pc = target_pc, .ptr = target, .mask = 0x2 };
  4265. if (cond != -1)
  4266. emith_jump_cond_patchable(cond, target);
  4267. else {
  4268. emith_jump_patchable(target);
  4269. rcache_invalidate();
  4270. }
  4271. } else
  4272. // no space for resolving forward branch, handle it as external
  4273. dbg(1, "warning: too many unresolved branches");
  4274. }
  4275. if (target == NULL)
  4276. {
  4277. // can't resolve branch locally, make a block exit
  4278. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4279. if (cond != -1) {
  4280. #if 1
  4281. if (bl && blx_target_count < ARRAY_SIZE(blx_targets)) {
  4282. // conditional jumps get a blx stub for the far jump
  4283. bl->type = BL_JCCBLX;
  4284. target = tcache_ptr;
  4285. blx_targets[blx_target_count++] =
  4286. (struct linkage) { .pc = target_pc, .ptr = target, .bl = bl };
  4287. emith_jump_cond_patchable(cond, target);
  4288. } else {
  4289. // not linkable, or blx table full; inline jump @dispatcher
  4290. EMITH_JMP_START(emith_invert_cond(cond));
  4291. if (bl) {
  4292. bl->jump = tcache_ptr;
  4293. emith_flush(); // flush to inhibit insn swapping
  4294. bl->type = BL_LDJMP;
  4295. }
  4296. tmp = rcache_get_tmp_arg(0);
  4297. emith_move_r_imm(tmp, target_pc);
  4298. rcache_free_tmp(tmp);
  4299. target = sh2_drc_dispatcher;
  4300. emith_jump_patchable(target);
  4301. EMITH_JMP_END(emith_invert_cond(cond));
  4302. }
  4303. #else
  4304. // jump @dispatcher - ARM 32bit version with conditional execution
  4305. EMITH_SJMP_START(emith_invert_cond(cond));
  4306. tmp = rcache_get_tmp_arg(0);
  4307. emith_move_r_imm_c(cond, tmp, target_pc);
  4308. rcache_free_tmp(tmp);
  4309. target = sh2_drc_dispatcher;
  4310. if (bl) {
  4311. bl->jump = tcache_ptr;
  4312. bl->type = BL_JMP;
  4313. }
  4314. emith_jump_cond_patchable(cond, target);
  4315. EMITH_SJMP_END(emith_invert_cond(cond));
  4316. #endif
  4317. } else {
  4318. // unconditional, has the far jump inlined
  4319. if (bl) {
  4320. emith_flush(); // flush to inhibit insn swapping
  4321. bl->type = BL_LDJMP;
  4322. }
  4323. tmp = rcache_get_tmp_arg(0);
  4324. emith_move_r_imm(tmp, target_pc);
  4325. rcache_free_tmp(tmp);
  4326. target = sh2_drc_dispatcher;
  4327. emith_jump_patchable(target);
  4328. rcache_invalidate();
  4329. }
  4330. }
  4331. #if CALL_STACK
  4332. if (rtsadd)
  4333. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4334. #endif
  4335. // branch not taken, correct cycle count
  4336. if (ctaken)
  4337. cycles -= ctaken;
  4338. // set T bit to reflect branch not taken for OP_BRANCH_CT/CF
  4339. if (emith_get_t_cond() >= 0) // T is synced for all other cases
  4340. emith_set_t(sr, opd_b->op == OP_BRANCH_CF);
  4341. drcf.pending_branch_direct = 0;
  4342. if (target_pc >= base_pc && target_pc < pc)
  4343. drcf.polling = drcf.loop_type = 0;
  4344. }
  4345. else if (drcf.pending_branch_indirect) {
  4346. u32 target_pc;
  4347. tmp = rcache_get_reg_arg(0, SHR_PC, NULL);
  4348. #if CALL_STACK
  4349. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4350. void *rtsadd = NULL, *rtsret = NULL;
  4351. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4352. // JSR, BSRF - save rts data
  4353. tmp = rcache_get_tmp_arg(1);
  4354. rtsadd = tcache_ptr;
  4355. emith_move_r_imm_s8_patchable(tmp, 0);
  4356. rcache_clean_tmp();
  4357. rcache_invalidate_tmp();
  4358. emith_call(sh2_drc_dispatcher_call);
  4359. rtsret = tcache_ptr;
  4360. }
  4361. #endif
  4362. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4363. FLUSH_CYCLES(sr);
  4364. emith_sync_t(sr);
  4365. rcache_clean();
  4366. #if CALL_STACK
  4367. if (opd_b->rm == SHR_PR) {
  4368. // RTS - restore rts data, else jump to dispatcher
  4369. emith_jump(sh2_drc_dispatcher_return);
  4370. } else
  4371. #endif
  4372. if (gconst_get(SHR_PC, &target_pc)) {
  4373. // JMP, JSR, BRAF, BSRF const - treat like unconditional direct branch
  4374. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4375. if (bl) // pc already loaded somewhere else, can patch jump only
  4376. bl->type = BL_JMP;
  4377. emith_jump_patchable(sh2_drc_dispatcher);
  4378. } else {
  4379. // JMP, JSR, BRAF, BSRF not const
  4380. emith_jump(sh2_drc_dispatcher);
  4381. }
  4382. rcache_invalidate();
  4383. #if CALL_STACK
  4384. if (rtsadd)
  4385. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4386. #endif
  4387. drcf.pending_branch_indirect = 0;
  4388. drcf.polling = drcf.loop_type = 0;
  4389. }
  4390. rcache_unlock_all();
  4391. do_host_disasm(tcache_id);
  4392. }
  4393. // check the last op
  4394. if (op_flags[i-1] & OF_DELAY_OP)
  4395. opd = &ops[i-2];
  4396. else
  4397. opd = &ops[i-1];
  4398. if (! OP_ISBRAUC(opd->op))
  4399. {
  4400. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4401. FLUSH_CYCLES(tmp);
  4402. emith_sync_t(tmp);
  4403. rcache_clean();
  4404. bl = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  4405. if (bl) {
  4406. emith_flush(); // flush to inhibit insn swapping
  4407. bl->type = BL_LDJMP;
  4408. }
  4409. tmp = rcache_get_tmp_arg(0);
  4410. emith_move_r_imm(tmp, pc);
  4411. emith_jump_patchable(sh2_drc_dispatcher);
  4412. rcache_invalidate();
  4413. } else
  4414. rcache_flush();
  4415. // link unresolved branches, emitting blx area entries as needed
  4416. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  4417. branch_target_count, blx_targets, blx_target_count);
  4418. emith_flush();
  4419. do_host_disasm(tcache_id);
  4420. emith_pool_commit(0);
  4421. // fill blx backup; do this last to backup final patched code
  4422. for (i = 0; i < block->entry_count; i++)
  4423. for (bl = block->entryp[i].o_links; bl; bl = bl->o_next)
  4424. memcpy(bl->jdisp, bl->blx ?: bl->jump, emith_jump_at_size());
  4425. ring_alloc(&tcache_ring[tcache_id], tcache_ptr - block_entry_ptr);
  4426. host_instructions_updated(block_entry_ptr, tcache_ptr);
  4427. dr_activate_block(block, tcache_id, sh2->is_slave);
  4428. emith_update_cache();
  4429. do_host_disasm(tcache_id);
  4430. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  4431. tcache_id, blkid_main, tcache_ptr,
  4432. tcache_ring[tcache_id].used, tcache_ring[tcache_id].size,
  4433. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  4434. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  4435. dbg(2, " hash collisions %d/%d", hash_collisions, block_ring[tcache_id].used);
  4436. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  4437. }
  4438. /*
  4439. printf("~~~\n");
  4440. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  4441. do_host_disasm(tcache_id);
  4442. printf("~~~\n");
  4443. */
  4444. #if (DRC_DEBUG)
  4445. fflush(stdout);
  4446. #endif
  4447. return block_entry_ptr;
  4448. }
  4449. static void sh2_generate_utils(void)
  4450. {
  4451. int arg0, arg1, arg2, arg3, sr, tmp, tmp2;
  4452. #if DRC_DEBUG
  4453. int hic = host_insn_count; // don't count utils for insn statistics
  4454. #endif
  4455. host_arg2reg(arg0, 0);
  4456. host_arg2reg(arg1, 1);
  4457. host_arg2reg(arg2, 2);
  4458. host_arg2reg(arg3, 3);
  4459. emith_move_r_r(arg0, arg0); // nop
  4460. emith_flush();
  4461. // sh2_drc_write8(u32 a, u32 d)
  4462. sh2_drc_write8 = (void *)tcache_ptr;
  4463. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  4464. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4465. emith_flush();
  4466. // sh2_drc_write16(u32 a, u32 d)
  4467. sh2_drc_write16 = (void *)tcache_ptr;
  4468. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  4469. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4470. emith_flush();
  4471. // sh2_drc_write32(u32 a, u32 d)
  4472. sh2_drc_write32 = (void *)tcache_ptr;
  4473. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  4474. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4475. emith_flush();
  4476. // d = sh2_drc_read8(u32 a)
  4477. sh2_drc_read8 = (void *)tcache_ptr;
  4478. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4479. EMITH_HINT_COND(DCOND_CS);
  4480. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4481. EMITH_SJMP_START(DCOND_CS);
  4482. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4483. emith_eor_r_imm_ptr_c(DCOND_CC, arg0, 1);
  4484. emith_read8s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4485. emith_ret_c(DCOND_CC);
  4486. EMITH_SJMP_END(DCOND_CS);
  4487. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4488. emith_jump_reg(arg2);
  4489. emith_flush();
  4490. // d = sh2_drc_read16(u32 a)
  4491. sh2_drc_read16 = (void *)tcache_ptr;
  4492. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4493. EMITH_HINT_COND(DCOND_CS);
  4494. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4495. EMITH_SJMP_START(DCOND_CS);
  4496. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4497. emith_read16s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4498. emith_ret_c(DCOND_CC);
  4499. EMITH_SJMP_END(DCOND_CS);
  4500. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4501. emith_jump_reg(arg2);
  4502. emith_flush();
  4503. // d = sh2_drc_read32(u32 a)
  4504. sh2_drc_read32 = (void *)tcache_ptr;
  4505. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4506. EMITH_HINT_COND(DCOND_CS);
  4507. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4508. EMITH_SJMP_START(DCOND_CS);
  4509. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4510. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4511. emith_ror_c(DCOND_CC, RET_REG, RET_REG, 16);
  4512. emith_ret_c(DCOND_CC);
  4513. EMITH_SJMP_END(DCOND_CS);
  4514. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4515. emith_jump_reg(arg2);
  4516. emith_flush();
  4517. // d = sh2_drc_read8_poll(u32 a)
  4518. sh2_drc_read8_poll = (void *)tcache_ptr;
  4519. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4520. EMITH_HINT_COND(DCOND_CS);
  4521. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4522. EMITH_SJMP_START(DCOND_CC);
  4523. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4524. emith_jump_reg_c(DCOND_CS, arg2);
  4525. EMITH_SJMP_END(DCOND_CC);
  4526. emith_and_r_r_r(arg1, arg0, arg3);
  4527. emith_eor_r_imm_ptr(arg1, 1);
  4528. emith_read8s_r_r_r(arg1, arg2, arg1);
  4529. emith_push_ret(arg1);
  4530. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4531. emith_call(p32x_sh2_poll_memory8);
  4532. emith_pop_and_ret(arg1);
  4533. emith_flush();
  4534. // d = sh2_drc_read16_poll(u32 a)
  4535. sh2_drc_read16_poll = (void *)tcache_ptr;
  4536. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4537. EMITH_HINT_COND(DCOND_CS);
  4538. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4539. EMITH_SJMP_START(DCOND_CC);
  4540. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4541. emith_jump_reg_c(DCOND_CS, arg2);
  4542. EMITH_SJMP_END(DCOND_CC);
  4543. emith_and_r_r_r(arg1, arg0, arg3);
  4544. emith_read16s_r_r_r(arg1, arg2, arg1);
  4545. emith_push_ret(arg1);
  4546. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4547. emith_call(p32x_sh2_poll_memory16);
  4548. emith_pop_and_ret(arg1);
  4549. emith_flush();
  4550. // d = sh2_drc_read32_poll(u32 a)
  4551. sh2_drc_read32_poll = (void *)tcache_ptr;
  4552. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4553. EMITH_HINT_COND(DCOND_CS);
  4554. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4555. EMITH_SJMP_START(DCOND_CC);
  4556. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4557. emith_jump_reg_c(DCOND_CS, arg2);
  4558. EMITH_SJMP_END(DCOND_CC);
  4559. emith_and_r_r_r(arg1, arg0, arg3);
  4560. emith_read_r_r_r(arg1, arg2, arg1);
  4561. emith_ror(arg1, arg1, 16);
  4562. emith_push_ret(arg1);
  4563. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4564. emith_call(p32x_sh2_poll_memory32);
  4565. emith_pop_and_ret(arg1);
  4566. emith_flush();
  4567. // sh2_drc_exit(u32 pc)
  4568. sh2_drc_exit = (void *)tcache_ptr;
  4569. emith_ctx_write(arg0, SHR_PC * 4);
  4570. emit_do_static_regs(1, arg2);
  4571. emith_sh2_drc_exit();
  4572. emith_flush();
  4573. // sh2_drc_dispatcher(u32 pc)
  4574. sh2_drc_dispatcher = (void *)tcache_ptr;
  4575. emith_ctx_write(arg0, SHR_PC * 4);
  4576. #if BRANCH_CACHE
  4577. // check if PC is in branch target cache
  4578. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4579. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4580. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  4581. emith_cmp_r_r(arg2, arg0);
  4582. EMITH_SJMP_START(DCOND_NE);
  4583. #if (DRC_DEBUG & 128)
  4584. emith_move_r_ptr_imm(arg2, (uptr)&bchit);
  4585. emith_read_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4586. emith_add_r_imm_c(DCOND_EQ, arg3, 1);
  4587. emith_write_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4588. #endif
  4589. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4590. emith_jump_reg_c(DCOND_EQ, RET_REG);
  4591. EMITH_SJMP_END(DCOND_NE);
  4592. #endif
  4593. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4594. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  4595. emith_call(dr_lookup_block);
  4596. // store PC and block entry ptr (in arg0) in branch target cache
  4597. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4598. EMITH_SJMP_START(DCOND_EQ);
  4599. #if BRANCH_CACHE
  4600. #if (DRC_DEBUG & 128)
  4601. emith_move_r_ptr_imm(arg2, (uptr)&bcmiss);
  4602. emith_read_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4603. emith_add_r_imm_c(DCOND_NE, arg3, 1);
  4604. emith_write_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4605. #endif
  4606. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  4607. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4608. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4609. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  4610. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4611. #endif
  4612. emith_jump_reg_c(DCOND_NE, RET_REG);
  4613. EMITH_SJMP_END(DCOND_EQ);
  4614. // lookup failed, call sh2_translate()
  4615. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4616. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  4617. emith_call(sh2_translate);
  4618. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4619. EMITH_SJMP_START(DCOND_EQ);
  4620. emith_jump_reg_c(DCOND_NE, RET_REG);
  4621. EMITH_SJMP_END(DCOND_EQ);
  4622. // XXX: can't translate, fail
  4623. emith_call(dr_failure);
  4624. emith_flush();
  4625. #if CALL_STACK
  4626. // pc = sh2_drc_dispatcher_call(u32 pc)
  4627. sh2_drc_dispatcher_call = (void *)tcache_ptr;
  4628. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4629. emith_add_r_imm(arg2, (u32)(2*sizeof(void *)));
  4630. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4631. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4632. emith_add_r_r_r_lsl_ptr(arg3, CONTEXT_REG, arg2, 0);
  4633. rcache_get_reg_arg(2, SHR_PR, NULL);
  4634. emith_add_r_ret(arg1);
  4635. emith_write_r_r_offs_ptr(arg1, arg3, offsetof(SH2, rts_cache)+sizeof(void *));
  4636. emith_write_r_r_offs(arg2, arg3, offsetof(SH2, rts_cache));
  4637. rcache_flush();
  4638. emith_ret();
  4639. emith_flush();
  4640. // sh2_drc_dispatcher_return(u32 pc)
  4641. sh2_drc_dispatcher_return = (void *)tcache_ptr;
  4642. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4643. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg2, 0);
  4644. emith_read_r_r_offs(arg3, arg1, offsetof(SH2, rts_cache));
  4645. emith_cmp_r_r(arg0, arg3);
  4646. #if (DRC_DEBUG & 128)
  4647. EMITH_SJMP_START(DCOND_EQ);
  4648. emith_move_r_ptr_imm(arg3, (uptr)&rcmiss);
  4649. emith_read_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4650. emith_add_r_imm_c(DCOND_NE, arg1, 1);
  4651. emith_write_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4652. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4653. EMITH_SJMP_END(DCOND_EQ);
  4654. #else
  4655. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4656. #endif
  4657. emith_read_r_r_offs_ptr(arg0, arg1, offsetof(SH2, rts_cache) + sizeof(void *));
  4658. emith_sub_r_imm(arg2, (u32)(2*sizeof(void *)));
  4659. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4660. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4661. #if (DRC_DEBUG & 128)
  4662. emith_move_r_ptr_imm(arg3, (uptr)&rchit);
  4663. emith_read_r_r_offs(arg1, arg3, 0);
  4664. emith_add_r_imm(arg1, 1);
  4665. emith_write_r_r_offs(arg1, arg3, 0);
  4666. #endif
  4667. emith_jump_reg(arg0);
  4668. emith_flush();
  4669. #endif
  4670. // sh2_drc_test_irq(void)
  4671. // assumes it's called from main function (may jump to dispatcher)
  4672. sh2_drc_test_irq = (void *)tcache_ptr;
  4673. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4674. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4675. emith_lsr(arg0, sr, I_SHIFT);
  4676. emith_and_r_imm(arg0, 0x0f);
  4677. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  4678. EMITH_SJMP_START(DCOND_GT);
  4679. emith_ret_c(DCOND_LE); // nope, return
  4680. EMITH_SJMP_END(DCOND_GT);
  4681. // adjust SP
  4682. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  4683. emith_sub_r_imm(tmp, 4*2);
  4684. rcache_clean();
  4685. // push SR
  4686. tmp = rcache_get_reg_arg(0, SHR_SP, &tmp2);
  4687. emith_add_r_r_imm(tmp, tmp2, 4);
  4688. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  4689. emith_clear_msb(tmp, tmp, 22);
  4690. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4691. rcache_invalidate_tmp();
  4692. emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  4693. // push PC
  4694. rcache_get_reg_arg(0, SHR_SP, NULL);
  4695. rcache_get_reg_arg(1, SHR_PC, NULL);
  4696. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4697. rcache_invalidate_tmp();
  4698. emith_call(p32x_sh2_write32);
  4699. // update I, cycles, do callback
  4700. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4701. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4702. emith_bic_r_imm(sr, I);
  4703. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  4704. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  4705. rcache_flush();
  4706. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4707. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  4708. // obtain new PC
  4709. tmp = rcache_get_reg_arg(1, SHR_VBR, &tmp2);
  4710. emith_add_r_r_r_lsl(arg0, tmp2, RET_REG, 2);
  4711. emith_call(sh2_drc_read32);
  4712. if (arg0 != RET_REG)
  4713. emith_move_r_r(arg0, RET_REG);
  4714. emith_call_cleanup();
  4715. rcache_invalidate();
  4716. emith_jump(sh2_drc_dispatcher);
  4717. emith_flush();
  4718. // sh2_drc_entry(SH2 *sh2)
  4719. sh2_drc_entry = (void *)tcache_ptr;
  4720. emith_sh2_drc_entry();
  4721. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  4722. emit_do_static_regs(0, arg2);
  4723. emith_call(sh2_drc_test_irq);
  4724. emith_ctx_read(arg0, SHR_PC * 4);
  4725. emith_jump(sh2_drc_dispatcher);
  4726. emith_flush();
  4727. #ifdef DRC_SR_REG
  4728. // sh2_drc_save_sr(SH2 *sh2)
  4729. sh2_drc_save_sr = (void *)tcache_ptr;
  4730. tmp = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4731. emith_write_r_r_offs(tmp, arg0, SHR_SR * 4);
  4732. rcache_invalidate();
  4733. emith_ret();
  4734. emith_flush();
  4735. // sh2_drc_restore_sr(SH2 *sh2)
  4736. sh2_drc_restore_sr = (void *)tcache_ptr;
  4737. tmp = rcache_get_reg(SHR_SR, RC_GR_WRITE, NULL);
  4738. emith_read_r_r_offs(tmp, arg0, SHR_SR * 4);
  4739. rcache_flush();
  4740. emith_ret();
  4741. emith_flush();
  4742. #endif
  4743. #ifdef PDB_NET
  4744. // debug
  4745. #define MAKE_READ_WRAPPER(func) { \
  4746. void *tmp = (void *)tcache_ptr; \
  4747. emith_push_ret(); \
  4748. emith_call(func); \
  4749. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4750. emith_addf_r_r(arg2, arg0); \
  4751. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4752. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4753. emith_adc_r_imm(arg2, 0x01000000); \
  4754. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4755. emith_pop_and_ret(); \
  4756. emith_flush(); \
  4757. func = tmp; \
  4758. }
  4759. #define MAKE_WRITE_WRAPPER(func) { \
  4760. void *tmp = (void *)tcache_ptr; \
  4761. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4762. emith_addf_r_r(arg2, arg1); \
  4763. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4764. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4765. emith_adc_r_imm(arg2, 0x01000000); \
  4766. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4767. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  4768. emith_jump(func); \
  4769. emith_flush(); \
  4770. func = tmp; \
  4771. }
  4772. MAKE_READ_WRAPPER(sh2_drc_read8);
  4773. MAKE_READ_WRAPPER(sh2_drc_read16);
  4774. MAKE_READ_WRAPPER(sh2_drc_read32);
  4775. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  4776. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  4777. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  4778. MAKE_READ_WRAPPER(sh2_drc_read8_poll);
  4779. MAKE_READ_WRAPPER(sh2_drc_read16_poll);
  4780. MAKE_READ_WRAPPER(sh2_drc_read32_poll);
  4781. #endif
  4782. emith_pool_commit(0);
  4783. rcache_invalidate();
  4784. #if (DRC_DEBUG & 4)
  4785. host_dasm_new_symbol(sh2_drc_entry);
  4786. host_dasm_new_symbol(sh2_drc_dispatcher);
  4787. #if CALL_STACK
  4788. host_dasm_new_symbol(sh2_drc_dispatcher_call);
  4789. host_dasm_new_symbol(sh2_drc_dispatcher_return);
  4790. #endif
  4791. host_dasm_new_symbol(sh2_drc_exit);
  4792. host_dasm_new_symbol(sh2_drc_test_irq);
  4793. host_dasm_new_symbol(sh2_drc_write8);
  4794. host_dasm_new_symbol(sh2_drc_write16);
  4795. host_dasm_new_symbol(sh2_drc_write32);
  4796. host_dasm_new_symbol(sh2_drc_read8);
  4797. host_dasm_new_symbol(sh2_drc_read16);
  4798. host_dasm_new_symbol(sh2_drc_read32);
  4799. host_dasm_new_symbol(sh2_drc_read8_poll);
  4800. host_dasm_new_symbol(sh2_drc_read16_poll);
  4801. host_dasm_new_symbol(sh2_drc_read32_poll);
  4802. #ifdef DRC_SR_REG
  4803. host_dasm_new_symbol(sh2_drc_save_sr);
  4804. host_dasm_new_symbol(sh2_drc_restore_sr);
  4805. #endif
  4806. #endif
  4807. #if DRC_DEBUG
  4808. host_insn_count = hic;
  4809. #endif
  4810. }
  4811. static void sh2_smc_rm_blocks(u32 a, int len, int tcache_id, u32 shift)
  4812. {
  4813. struct block_list **blist, *entry, *next;
  4814. u32 mask = RAM_SIZE(tcache_id) - 1;
  4815. u32 wtmask = ~0x20000000; // writethrough area mask
  4816. u32 start_addr, end_addr;
  4817. u32 start_lit, end_lit;
  4818. struct block_desc *block;
  4819. #if (DRC_DEBUG & 2)
  4820. int removed = 0;
  4821. #endif
  4822. // ignore cache-through
  4823. a &= wtmask;
  4824. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  4825. entry = *blist;
  4826. // go through the block list for this range
  4827. while (entry != NULL) {
  4828. next = entry->next;
  4829. block = entry->block;
  4830. start_addr = block->addr & wtmask;
  4831. end_addr = start_addr + block->size;
  4832. start_lit = block->addr_lit & wtmask;
  4833. end_lit = start_lit + block->size_lit;
  4834. // disable/delete block if it covers the modified address
  4835. if ((start_addr < a+len && a < end_addr) ||
  4836. (start_lit < a+len && a < end_lit))
  4837. {
  4838. dbg(2, "smc remove @%08x", a);
  4839. end_addr = (start_lit < a+len && block->size_lit ? a : 0);
  4840. dr_rm_block_entry(block, tcache_id, end_addr, 0);
  4841. #if (DRC_DEBUG & 2)
  4842. removed = 1;
  4843. #endif
  4844. }
  4845. entry = next;
  4846. }
  4847. #if (DRC_DEBUG & 2)
  4848. if (!removed)
  4849. dbg(2, "rm_blocks called @%08x, no work?", a);
  4850. #endif
  4851. #if BRANCH_CACHE
  4852. if (tcache_id)
  4853. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  4854. else {
  4855. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  4856. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  4857. }
  4858. #endif
  4859. #if CALL_STACK
  4860. if (tcache_id) {
  4861. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  4862. sh2s[tcache_id-1].rts_cache_idx = 0;
  4863. } else {
  4864. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  4865. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  4866. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  4867. }
  4868. #endif
  4869. }
  4870. void sh2_drc_wcheck_ram(u32 a, unsigned len, SH2 *sh2)
  4871. {
  4872. sh2_smc_rm_blocks(a, len, 0, SH2_DRCBLK_RAM_SHIFT);
  4873. }
  4874. void sh2_drc_wcheck_da(u32 a, unsigned len, SH2 *sh2)
  4875. {
  4876. sh2_smc_rm_blocks(a, len, 1 + sh2->is_slave, SH2_DRCBLK_DA_SHIFT);
  4877. }
  4878. int sh2_execute_drc(SH2 *sh2c, int cycles)
  4879. {
  4880. int ret_cycles;
  4881. // cycles are kept in SHR_SR unused bits (upper 20)
  4882. // bit11 contains T saved for delay slot
  4883. // others are usual SH2 flags
  4884. sh2c->sr &= 0x3f3;
  4885. sh2c->sr |= cycles << 12;
  4886. sh2_drc_entry(sh2c);
  4887. // TODO: irq cycles
  4888. ret_cycles = (int32_t)sh2c->sr >> 12;
  4889. if (ret_cycles > 0)
  4890. dbg(1, "warning: drc returned with cycles: %d, pc %08x", ret_cycles, sh2c->pc);
  4891. sh2c->sr &= 0x3f3;
  4892. return ret_cycles;
  4893. }
  4894. static void block_stats(void)
  4895. {
  4896. #if (DRC_DEBUG & 2)
  4897. int c, b, i;
  4898. long total = 0;
  4899. printf("block stats:\n");
  4900. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4901. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4902. if (block_tables[b][i].addr != 0)
  4903. total += block_tables[b][i].refcount;
  4904. }
  4905. printf("total: %ld\n",total);
  4906. for (c = 0; c < 20; c++) {
  4907. struct block_desc *blk, *maxb = NULL;
  4908. int max = 0;
  4909. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4910. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4911. if ((blk = &block_tables[b][i])->addr != 0 && blk->refcount > max) {
  4912. max = blk->refcount;
  4913. maxb = blk;
  4914. }
  4915. }
  4916. if (maxb == NULL)
  4917. break;
  4918. printf("%08x %p %9d %2.3f%%\n", maxb->addr, maxb->tcache_ptr, maxb->refcount,
  4919. (double)maxb->refcount / total * 100.0);
  4920. maxb->refcount = 0;
  4921. }
  4922. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  4923. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4924. block_tables[b][i].refcount = 0;
  4925. #endif
  4926. }
  4927. void entry_stats(void)
  4928. {
  4929. #if (DRC_DEBUG & 32)
  4930. int c, b, i, j;
  4931. long total = 0;
  4932. printf("block entry stats:\n");
  4933. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4934. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4935. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4936. total += block_tables[b][i].entryp[j].entry_count;
  4937. }
  4938. printf("total: %ld\n",total);
  4939. for (c = 0; c < 20; c++) {
  4940. struct block_desc *blk;
  4941. struct block_entry *maxb = NULL;
  4942. int max = 0;
  4943. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4944. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size) {
  4945. blk = &block_tables[b][i];
  4946. for (j = 0; j < blk->entry_count; j++)
  4947. if (blk->entryp[j].entry_count > max) {
  4948. max = blk->entryp[j].entry_count;
  4949. maxb = &blk->entryp[j];
  4950. }
  4951. }
  4952. }
  4953. if (maxb == NULL)
  4954. break;
  4955. printf("%08x %p %9d %2.3f%%\n", maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  4956. (double)100 * maxb->entry_count / total);
  4957. maxb->entry_count = 0;
  4958. }
  4959. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4960. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4961. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4962. block_tables[b][i].entryp[j].entry_count = 0;
  4963. }
  4964. #endif
  4965. }
  4966. static void backtrace(void)
  4967. {
  4968. #if (DRC_DEBUG & 1024)
  4969. int i;
  4970. printf("backtrace master:\n");
  4971. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  4972. SH2_DUMP(&csh2[0][i], "bt msh2");
  4973. printf("backtrace slave:\n");
  4974. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  4975. SH2_DUMP(&csh2[1][i], "bt ssh2");
  4976. #endif
  4977. }
  4978. static void state_dump(void)
  4979. {
  4980. #if (DRC_DEBUG & 2048)
  4981. int i;
  4982. SH2_DUMP(&sh2s[0], "master");
  4983. printf("VBR msh2: %x\n", sh2s[0].vbr);
  4984. for (i = 0; i < 0x60; i++) {
  4985. printf("%08x ",p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  4986. if ((i+1) % 8 == 0) printf("\n");
  4987. }
  4988. printf("stack msh2: %x\n", sh2s[0].r[15]);
  4989. for (i = -0x30; i < 0x30; i++) {
  4990. printf("%08x ",p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  4991. if ((i+1) % 8 == 0) printf("\n");
  4992. }
  4993. SH2_DUMP(&sh2s[1], "slave");
  4994. printf("VBR ssh2: %x\n", sh2s[1].vbr);
  4995. for (i = 0; i < 0x60; i++) {
  4996. printf("%08x ",p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  4997. if ((i+1) % 8 == 0) printf("\n");
  4998. }
  4999. printf("stack ssh2: %x\n", sh2s[1].r[15]);
  5000. for (i = -0x30; i < 0x30; i++) {
  5001. printf("%08x ",p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  5002. if ((i+1) % 8 == 0) printf("\n");
  5003. }
  5004. #endif
  5005. }
  5006. static void bcache_stats(void)
  5007. {
  5008. #if (DRC_DEBUG & 128)
  5009. int i;
  5010. #if CALL_STACK
  5011. for (i = 1; i < ARRAY_SIZE(sh2s->rts_cache); i++)
  5012. if (sh2s[0].rts_cache[i].pc == -1 && sh2s[1].rts_cache[i].pc == -1) break;
  5013. printf("return cache hits:%d misses:%d depth: %d index: %d/%d\n", rchit, rcmiss, i,sh2s[0].rts_cache_idx,sh2s[1].rts_cache_idx);
  5014. for (i = 0; i < ARRAY_SIZE(sh2s[0].rts_cache); i++) {
  5015. printf("%08x ",sh2s[0].rts_cache[i].pc);
  5016. if ((i+1) % 8 == 0) printf("\n");
  5017. }
  5018. for (i = 0; i < ARRAY_SIZE(sh2s[1].rts_cache); i++) {
  5019. printf("%08x ",sh2s[1].rts_cache[i].pc);
  5020. if ((i+1) % 8 == 0) printf("\n");
  5021. }
  5022. #endif
  5023. #if BRANCH_CACHE
  5024. printf("branch cache hits:%d misses:%d\n", bchit, bcmiss);
  5025. printf("branch cache master:\n");
  5026. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  5027. printf("%08x ",sh2s[0].branch_cache[i].pc);
  5028. if ((i+1) % 8 == 0) printf("\n");
  5029. }
  5030. printf("branch cache slave:\n");
  5031. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  5032. printf("%08x ",sh2s[1].branch_cache[i].pc);
  5033. if ((i+1) % 8 == 0) printf("\n");
  5034. }
  5035. #endif
  5036. #endif
  5037. }
  5038. void sh2_drc_flush_all(void)
  5039. {
  5040. backtrace();
  5041. state_dump();
  5042. block_stats();
  5043. entry_stats();
  5044. bcache_stats();
  5045. dr_flush_tcache(0);
  5046. dr_flush_tcache(1);
  5047. dr_flush_tcache(2);
  5048. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  5049. }
  5050. void sh2_drc_mem_setup(SH2 *sh2)
  5051. {
  5052. // fill the DRC-only convenience pointers
  5053. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  5054. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  5055. }
  5056. int sh2_drc_init(SH2 *sh2)
  5057. {
  5058. int i;
  5059. if (block_tables[0] == NULL)
  5060. {
  5061. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5062. block_tables[i] = calloc(BLOCK_MAX_COUNT(i), sizeof(*block_tables[0]));
  5063. if (block_tables[i] == NULL)
  5064. goto fail;
  5065. entry_tables[i] = calloc(ENTRY_MAX_COUNT(i), sizeof(*entry_tables[0]));
  5066. if (entry_tables[i] == NULL)
  5067. goto fail;
  5068. block_link_pool[i] = calloc(BLOCK_LINK_MAX_COUNT(i),
  5069. sizeof(*block_link_pool[0]));
  5070. if (block_link_pool[i] == NULL)
  5071. goto fail;
  5072. inval_lookup[i] = calloc(RAM_SIZE(i) / INVAL_PAGE_SIZE,
  5073. sizeof(inval_lookup[0]));
  5074. if (inval_lookup[i] == NULL)
  5075. goto fail;
  5076. hash_tables[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*hash_tables[0]));
  5077. if (hash_tables[i] == NULL)
  5078. goto fail;
  5079. unresolved_links[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*unresolved_links[0]));
  5080. if (unresolved_links[i] == NULL)
  5081. goto fail;
  5082. //atexit(sh2_drc_finish);
  5083. RING_INIT(&block_ring[i], block_tables[i], BLOCK_MAX_COUNT(i));
  5084. RING_INIT(&entry_ring[i], entry_tables[i], ENTRY_MAX_COUNT(i));
  5085. }
  5086. block_list_pool = calloc(BLOCK_LIST_MAX_COUNT, sizeof(*block_list_pool));
  5087. if (block_list_pool == NULL)
  5088. goto fail;
  5089. block_list_pool_count = 0;
  5090. blist_free = NULL;
  5091. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  5092. memset(blink_free, 0, sizeof(blink_free));
  5093. drc_cmn_init();
  5094. rcache_init();
  5095. tcache_ptr = tcache;
  5096. sh2_generate_utils();
  5097. host_instructions_updated(tcache, tcache_ptr);
  5098. emith_update_cache();
  5099. i = tcache_ptr - tcache;
  5100. RING_INIT(&tcache_ring[0], tcache_ptr, tcache_sizes[0] - i);
  5101. for (i = 1; i < ARRAY_SIZE(tcache_ring); i++) {
  5102. RING_INIT(&tcache_ring[i], tcache_ring[i-1].base + tcache_ring[i-1].size,
  5103. tcache_sizes[i]);
  5104. }
  5105. #if (DRC_DEBUG & 4)
  5106. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  5107. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5108. // disasm the utils
  5109. tcache_dsm_ptrs[0] = tcache;
  5110. do_host_disasm(0);
  5111. fflush(stdout);
  5112. #endif
  5113. #if (DRC_DEBUG & 1)
  5114. hash_collisions = 0;
  5115. #endif
  5116. }
  5117. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  5118. memset(sh2->rts_cache, -1, sizeof(sh2->rts_cache));
  5119. sh2->rts_cache_idx = 0;
  5120. return 0;
  5121. fail:
  5122. sh2_drc_finish(sh2);
  5123. return -1;
  5124. }
  5125. void sh2_drc_finish(SH2 *sh2)
  5126. {
  5127. int i;
  5128. if (block_tables[0] == NULL)
  5129. return;
  5130. #if (DRC_DEBUG & (256|512))
  5131. if (trace[0]) fclose(trace[0]);
  5132. if (trace[1]) fclose(trace[1]);
  5133. trace[0] = trace[1] = NULL;
  5134. #endif
  5135. #if (DRC_DEBUG & 4)
  5136. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5137. printf("~~~ tcache %d\n", i);
  5138. #if 0
  5139. if (tcache_ring[i].first < tcache_ring[i].next) {
  5140. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5141. tcache_ptr = tcache_ring[i].next;
  5142. do_host_disasm(i);
  5143. } else if (tcache_ring[i].used) {
  5144. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5145. tcache_ptr = tcache_ring[i].base + tcache_ring[i].size;
  5146. do_host_disasm(i);
  5147. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5148. tcache_ptr = tcache_ring[i].next;
  5149. do_host_disasm(i);
  5150. }
  5151. #endif
  5152. printf("max links: %d\n", block_link_pool_counts[i]);
  5153. }
  5154. printf("max block list: %d\n", block_list_pool_count);
  5155. #endif
  5156. sh2_drc_flush_all();
  5157. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5158. if (block_tables[i] != NULL)
  5159. free(block_tables[i]);
  5160. block_tables[i] = NULL;
  5161. if (entry_tables[i] != NULL)
  5162. free(entry_tables[i]);
  5163. entry_tables[i] = NULL;
  5164. if (block_link_pool[i] != NULL)
  5165. free(block_link_pool[i]);
  5166. block_link_pool[i] = NULL;
  5167. blink_free[i] = NULL;
  5168. if (inval_lookup[i] != NULL)
  5169. free(inval_lookup[i]);
  5170. inval_lookup[i] = NULL;
  5171. if (hash_tables[i] != NULL) {
  5172. free(hash_tables[i]);
  5173. hash_tables[i] = NULL;
  5174. }
  5175. }
  5176. if (block_list_pool != NULL)
  5177. free(block_list_pool);
  5178. block_list_pool = NULL;
  5179. blist_free = NULL;
  5180. drc_cmn_cleanup();
  5181. }
  5182. #endif /* DRC_SH2 */
  5183. static void *dr_get_pc_base(u32 pc, SH2 *sh2)
  5184. {
  5185. void *ret;
  5186. u32 mask = 0;
  5187. ret = p32x_sh2_get_mem_ptr(pc, &mask, sh2);
  5188. if (ret == (void *)-1)
  5189. return ret;
  5190. return (char *)ret - (pc & ~mask);
  5191. }
  5192. u16 scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  5193. u32 *base_literals_out, u32 *end_literals_out)
  5194. {
  5195. u16 *dr_pc_base;
  5196. u32 pc, op, tmp;
  5197. u32 end_pc, end_literals = 0;
  5198. u32 lowest_literal = 0;
  5199. u32 lowest_mova = 0;
  5200. struct op_data *opd;
  5201. int next_is_delay = 0;
  5202. int end_block = 0;
  5203. int i, i_end;
  5204. u32 crc = 0;
  5205. // 2nd pass stuff
  5206. int last_btarget; // loop detector
  5207. enum { T_UNKNOWN, T_CLEAR, T_SET } t; // T propagation state
  5208. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  5209. op_flags[0] |= OF_BTARGET; // block start is always a target
  5210. dr_pc_base = dr_get_pc_base(base_pc, &sh2s[!!is_slave]);
  5211. // 1st pass: disassemble
  5212. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  5213. // we need an ops[] entry after the last one initialized,
  5214. // so do it before end_block checks
  5215. opd = &ops[i];
  5216. opd->op = OP_UNHANDLED;
  5217. opd->rm = -1;
  5218. opd->source = opd->dest = 0;
  5219. opd->cycles = 1;
  5220. opd->imm = 0;
  5221. if (next_is_delay) {
  5222. op_flags[i] |= OF_DELAY_OP;
  5223. next_is_delay = 0;
  5224. }
  5225. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  5226. break;
  5227. else if ((lowest_mova && lowest_mova <= pc) ||
  5228. (lowest_literal && lowest_literal <= pc))
  5229. break; // text area collides with data area
  5230. op = FETCH_OP(pc);
  5231. switch ((op & 0xf000) >> 12)
  5232. {
  5233. /////////////////////////////////////////////
  5234. case 0x00:
  5235. switch (op & 0x0f)
  5236. {
  5237. case 0x02:
  5238. switch (GET_Fx())
  5239. {
  5240. case 0: // STC SR,Rn 0000nnnn00000010
  5241. tmp = BITMASK2(SHR_SR, SHR_T);
  5242. break;
  5243. case 1: // STC GBR,Rn 0000nnnn00010010
  5244. tmp = BITMASK1(SHR_GBR);
  5245. break;
  5246. case 2: // STC VBR,Rn 0000nnnn00100010
  5247. tmp = BITMASK1(SHR_VBR);
  5248. break;
  5249. default:
  5250. goto undefined;
  5251. }
  5252. opd->op = OP_MOVE;
  5253. opd->source = tmp;
  5254. opd->dest = BITMASK1(GET_Rn());
  5255. break;
  5256. case 0x03:
  5257. CHECK_UNHANDLED_BITS(0xd0, undefined);
  5258. // BRAF Rm 0000mmmm00100011
  5259. // BSRF Rm 0000mmmm00000011
  5260. opd->op = OP_BRANCH_RF;
  5261. opd->rm = GET_Rn();
  5262. opd->source = BITMASK2(SHR_PC, opd->rm);
  5263. opd->dest = BITMASK1(SHR_PC);
  5264. if (!(op & 0x20))
  5265. opd->dest |= BITMASK1(SHR_PR);
  5266. opd->cycles = 2;
  5267. next_is_delay = 1;
  5268. if (!(opd->dest & BITMASK1(SHR_PR)))
  5269. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5270. else
  5271. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5272. break;
  5273. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  5274. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  5275. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  5276. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  5277. opd->dest = BITMASK1(SHR_MEM);
  5278. break;
  5279. case 0x07:
  5280. // MUL.L Rm,Rn 0000nnnnmmmm0111
  5281. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5282. opd->dest = BITMASK1(SHR_MACL);
  5283. opd->cycles = 2;
  5284. break;
  5285. case 0x08:
  5286. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5287. switch (GET_Fx())
  5288. {
  5289. case 0: // CLRT 0000000000001000
  5290. opd->op = OP_SETCLRT;
  5291. opd->dest = BITMASK1(SHR_T);
  5292. opd->imm = 0;
  5293. break;
  5294. case 1: // SETT 0000000000011000
  5295. opd->op = OP_SETCLRT;
  5296. opd->dest = BITMASK1(SHR_T);
  5297. opd->imm = 1;
  5298. break;
  5299. case 2: // CLRMAC 0000000000101000
  5300. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5301. break;
  5302. default:
  5303. goto undefined;
  5304. }
  5305. break;
  5306. case 0x09:
  5307. switch (GET_Fx())
  5308. {
  5309. case 0: // NOP 0000000000001001
  5310. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5311. break;
  5312. case 1: // DIV0U 0000000000011001
  5313. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5314. opd->source = BITMASK1(SHR_SR);
  5315. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5316. break;
  5317. case 2: // MOVT Rn 0000nnnn00101001
  5318. opd->source = BITMASK1(SHR_T);
  5319. opd->dest = BITMASK1(GET_Rn());
  5320. break;
  5321. default:
  5322. goto undefined;
  5323. }
  5324. break;
  5325. case 0x0a:
  5326. switch (GET_Fx())
  5327. {
  5328. case 0: // STS MACH,Rn 0000nnnn00001010
  5329. tmp = SHR_MACH;
  5330. break;
  5331. case 1: // STS MACL,Rn 0000nnnn00011010
  5332. tmp = SHR_MACL;
  5333. break;
  5334. case 2: // STS PR,Rn 0000nnnn00101010
  5335. tmp = SHR_PR;
  5336. break;
  5337. default:
  5338. goto undefined;
  5339. }
  5340. opd->op = OP_MOVE;
  5341. opd->source = BITMASK1(tmp);
  5342. opd->dest = BITMASK1(GET_Rn());
  5343. break;
  5344. case 0x0b:
  5345. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5346. switch (GET_Fx())
  5347. {
  5348. case 0: // RTS 0000000000001011
  5349. opd->op = OP_BRANCH_R;
  5350. opd->rm = SHR_PR;
  5351. opd->source = BITMASK1(opd->rm);
  5352. opd->dest = BITMASK1(SHR_PC);
  5353. opd->cycles = 2;
  5354. next_is_delay = 1;
  5355. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5356. break;
  5357. case 1: // SLEEP 0000000000011011
  5358. opd->op = OP_SLEEP;
  5359. end_block = 1;
  5360. break;
  5361. case 2: // RTE 0000000000101011
  5362. opd->op = OP_RTE;
  5363. opd->source = BITMASK1(SHR_SP);
  5364. opd->dest = BITMASK4(SHR_SP, SHR_SR, SHR_T, SHR_PC);
  5365. opd->cycles = 4;
  5366. next_is_delay = 1;
  5367. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5368. break;
  5369. default:
  5370. goto undefined;
  5371. }
  5372. break;
  5373. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  5374. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  5375. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  5376. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  5377. opd->dest = BITMASK1(GET_Rn());
  5378. op_flags[i] |= OF_POLL_INSN;
  5379. break;
  5380. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  5381. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5382. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5383. opd->cycles = 3;
  5384. break;
  5385. default:
  5386. goto undefined;
  5387. }
  5388. break;
  5389. /////////////////////////////////////////////
  5390. case 0x01:
  5391. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  5392. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5393. opd->dest = BITMASK1(SHR_MEM);
  5394. opd->imm = (op & 0x0f) * 4;
  5395. break;
  5396. /////////////////////////////////////////////
  5397. case 0x02:
  5398. switch (op & 0x0f)
  5399. {
  5400. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  5401. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  5402. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  5403. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5404. opd->dest = BITMASK1(SHR_MEM);
  5405. break;
  5406. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  5407. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  5408. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  5409. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5410. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5411. break;
  5412. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  5413. opd->source = BITMASK3(SHR_SR, GET_Rm(), GET_Rn());
  5414. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5415. break;
  5416. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  5417. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5418. opd->dest = BITMASK1(SHR_T);
  5419. break;
  5420. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  5421. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  5422. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  5423. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5424. opd->dest = BITMASK1(GET_Rn());
  5425. break;
  5426. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  5427. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5428. opd->dest = BITMASK1(SHR_T);
  5429. break;
  5430. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  5431. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5432. opd->dest = BITMASK1(GET_Rn());
  5433. break;
  5434. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  5435. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  5436. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5437. opd->dest = BITMASK1(SHR_MACL);
  5438. break;
  5439. default:
  5440. goto undefined;
  5441. }
  5442. break;
  5443. /////////////////////////////////////////////
  5444. case 0x03:
  5445. switch (op & 0x0f)
  5446. {
  5447. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  5448. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  5449. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  5450. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  5451. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  5452. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5453. opd->dest = BITMASK1(SHR_T);
  5454. break;
  5455. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  5456. opd->source = BITMASK4(GET_Rm(), GET_Rn(), SHR_SR, SHR_T);
  5457. opd->dest = BITMASK3(GET_Rn(), SHR_SR, SHR_T);
  5458. break;
  5459. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  5460. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  5461. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5462. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5463. opd->cycles = 2;
  5464. break;
  5465. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  5466. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  5467. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5468. opd->dest = BITMASK1(GET_Rn());
  5469. break;
  5470. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  5471. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  5472. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  5473. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5474. break;
  5475. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  5476. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  5477. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5478. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5479. break;
  5480. default:
  5481. goto undefined;
  5482. }
  5483. break;
  5484. /////////////////////////////////////////////
  5485. case 0x04:
  5486. switch (op & 0x0f)
  5487. {
  5488. case 0x00:
  5489. switch (GET_Fx())
  5490. {
  5491. case 0: // SHLL Rn 0100nnnn00000000
  5492. case 2: // SHAL Rn 0100nnnn00100000
  5493. opd->source = BITMASK1(GET_Rn());
  5494. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5495. break;
  5496. case 1: // DT Rn 0100nnnn00010000
  5497. opd->source = BITMASK1(GET_Rn());
  5498. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5499. op_flags[i] |= OF_DELAY_INSN;
  5500. break;
  5501. default:
  5502. goto undefined;
  5503. }
  5504. break;
  5505. case 0x01:
  5506. switch (GET_Fx())
  5507. {
  5508. case 0: // SHLR Rn 0100nnnn00000001
  5509. case 2: // SHAR Rn 0100nnnn00100001
  5510. opd->source = BITMASK1(GET_Rn());
  5511. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5512. break;
  5513. case 1: // CMP/PZ Rn 0100nnnn00010001
  5514. opd->source = BITMASK1(GET_Rn());
  5515. opd->dest = BITMASK1(SHR_T);
  5516. break;
  5517. default:
  5518. goto undefined;
  5519. }
  5520. break;
  5521. case 0x02:
  5522. case 0x03:
  5523. switch (op & 0x3f)
  5524. {
  5525. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  5526. tmp = BITMASK1(SHR_MACH);
  5527. break;
  5528. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  5529. tmp = BITMASK1(SHR_MACL);
  5530. break;
  5531. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  5532. tmp = BITMASK1(SHR_PR);
  5533. break;
  5534. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  5535. tmp = BITMASK2(SHR_SR, SHR_T);
  5536. opd->cycles = 2;
  5537. break;
  5538. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  5539. tmp = BITMASK1(SHR_GBR);
  5540. opd->cycles = 2;
  5541. break;
  5542. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  5543. tmp = BITMASK1(SHR_VBR);
  5544. opd->cycles = 2;
  5545. break;
  5546. default:
  5547. goto undefined;
  5548. }
  5549. opd->source = BITMASK1(GET_Rn()) | tmp;
  5550. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5551. break;
  5552. case 0x04:
  5553. case 0x05:
  5554. switch (op & 0x3f)
  5555. {
  5556. case 0x04: // ROTL Rn 0100nnnn00000100
  5557. case 0x05: // ROTR Rn 0100nnnn00000101
  5558. opd->source = BITMASK1(GET_Rn());
  5559. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5560. break;
  5561. case 0x24: // ROTCL Rn 0100nnnn00100100
  5562. case 0x25: // ROTCR Rn 0100nnnn00100101
  5563. opd->source = BITMASK2(GET_Rn(), SHR_T);
  5564. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5565. break;
  5566. case 0x15: // CMP/PL Rn 0100nnnn00010101
  5567. opd->source = BITMASK1(GET_Rn());
  5568. opd->dest = BITMASK1(SHR_T);
  5569. break;
  5570. default:
  5571. goto undefined;
  5572. }
  5573. break;
  5574. case 0x06:
  5575. case 0x07:
  5576. switch (op & 0x3f)
  5577. {
  5578. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  5579. tmp = BITMASK1(SHR_MACH);
  5580. break;
  5581. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  5582. tmp = BITMASK1(SHR_MACL);
  5583. break;
  5584. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  5585. tmp = BITMASK1(SHR_PR);
  5586. break;
  5587. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  5588. tmp = BITMASK2(SHR_SR, SHR_T);
  5589. opd->op = OP_LDC;
  5590. opd->cycles = 3;
  5591. break;
  5592. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  5593. tmp = BITMASK1(SHR_GBR);
  5594. opd->op = OP_LDC;
  5595. opd->cycles = 3;
  5596. break;
  5597. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  5598. tmp = BITMASK1(SHR_VBR);
  5599. opd->op = OP_LDC;
  5600. opd->cycles = 3;
  5601. break;
  5602. default:
  5603. goto undefined;
  5604. }
  5605. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5606. opd->dest = BITMASK1(GET_Rn()) | tmp;
  5607. break;
  5608. case 0x08:
  5609. case 0x09:
  5610. switch (GET_Fx())
  5611. {
  5612. case 0:
  5613. // SHLL2 Rn 0100nnnn00001000
  5614. // SHLR2 Rn 0100nnnn00001001
  5615. break;
  5616. case 1:
  5617. // SHLL8 Rn 0100nnnn00011000
  5618. // SHLR8 Rn 0100nnnn00011001
  5619. break;
  5620. case 2:
  5621. // SHLL16 Rn 0100nnnn00101000
  5622. // SHLR16 Rn 0100nnnn00101001
  5623. break;
  5624. default:
  5625. goto undefined;
  5626. }
  5627. opd->source = BITMASK1(GET_Rn());
  5628. opd->dest = BITMASK1(GET_Rn());
  5629. break;
  5630. case 0x0a:
  5631. switch (GET_Fx())
  5632. {
  5633. case 0: // LDS Rm,MACH 0100mmmm00001010
  5634. tmp = SHR_MACH;
  5635. break;
  5636. case 1: // LDS Rm,MACL 0100mmmm00011010
  5637. tmp = SHR_MACL;
  5638. break;
  5639. case 2: // LDS Rm,PR 0100mmmm00101010
  5640. tmp = SHR_PR;
  5641. break;
  5642. default:
  5643. goto undefined;
  5644. }
  5645. opd->op = OP_MOVE;
  5646. opd->source = BITMASK1(GET_Rn());
  5647. opd->dest = BITMASK1(tmp);
  5648. break;
  5649. case 0x0b:
  5650. switch (GET_Fx())
  5651. {
  5652. case 0: // JSR @Rm 0100mmmm00001011
  5653. opd->dest = BITMASK1(SHR_PR);
  5654. case 2: // JMP @Rm 0100mmmm00101011
  5655. opd->op = OP_BRANCH_R;
  5656. opd->rm = GET_Rn();
  5657. opd->source = BITMASK1(opd->rm);
  5658. opd->dest |= BITMASK1(SHR_PC);
  5659. opd->cycles = 2;
  5660. next_is_delay = 1;
  5661. if (!(opd->dest & BITMASK1(SHR_PR)))
  5662. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5663. else
  5664. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5665. break;
  5666. case 1: // TAS.B @Rn 0100nnnn00011011
  5667. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5668. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  5669. opd->cycles = 4;
  5670. break;
  5671. default:
  5672. goto undefined;
  5673. }
  5674. break;
  5675. case 0x0e:
  5676. switch (GET_Fx())
  5677. {
  5678. case 0: // LDC Rm,SR 0100mmmm00001110
  5679. tmp = BITMASK2(SHR_SR, SHR_T);
  5680. break;
  5681. case 1: // LDC Rm,GBR 0100mmmm00011110
  5682. tmp = BITMASK1(SHR_GBR);
  5683. break;
  5684. case 2: // LDC Rm,VBR 0100mmmm00101110
  5685. tmp = BITMASK1(SHR_VBR);
  5686. break;
  5687. default:
  5688. goto undefined;
  5689. }
  5690. opd->op = OP_LDC;
  5691. opd->source = BITMASK1(GET_Rn());
  5692. opd->dest = tmp;
  5693. break;
  5694. case 0x0f:
  5695. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  5696. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5697. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5698. opd->cycles = 3;
  5699. break;
  5700. default:
  5701. goto undefined;
  5702. }
  5703. break;
  5704. /////////////////////////////////////////////
  5705. case 0x05:
  5706. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  5707. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5708. opd->dest = BITMASK1(GET_Rn());
  5709. opd->imm = (op & 0x0f) * 4;
  5710. op_flags[i] |= OF_POLL_INSN;
  5711. break;
  5712. /////////////////////////////////////////////
  5713. case 0x06:
  5714. switch (op & 0x0f)
  5715. {
  5716. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  5717. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  5718. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  5719. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  5720. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5721. break;
  5722. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  5723. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  5724. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  5725. opd->dest = BITMASK1(GET_Rn());
  5726. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5727. op_flags[i] |= OF_POLL_INSN;
  5728. break;
  5729. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  5730. opd->source = BITMASK2(GET_Rm(), SHR_T);
  5731. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5732. break;
  5733. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  5734. opd->op = OP_MOVE;
  5735. goto arith_rmrn;
  5736. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  5737. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  5738. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  5739. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  5740. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  5741. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  5742. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  5743. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  5744. arith_rmrn:
  5745. opd->source = BITMASK1(GET_Rm());
  5746. opd->dest = BITMASK1(GET_Rn());
  5747. break;
  5748. }
  5749. break;
  5750. /////////////////////////////////////////////
  5751. case 0x07:
  5752. // ADD #imm,Rn 0111nnnniiiiiiii
  5753. opd->source = opd->dest = BITMASK1(GET_Rn());
  5754. opd->imm = (s8)op;
  5755. break;
  5756. /////////////////////////////////////////////
  5757. case 0x08:
  5758. switch (op & 0x0f00)
  5759. {
  5760. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  5761. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  5762. opd->dest = BITMASK1(SHR_MEM);
  5763. opd->imm = (op & 0x0f);
  5764. break;
  5765. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  5766. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  5767. opd->dest = BITMASK1(SHR_MEM);
  5768. opd->imm = (op & 0x0f) * 2;
  5769. break;
  5770. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  5771. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5772. opd->dest = BITMASK1(SHR_R0);
  5773. opd->imm = (op & 0x0f);
  5774. op_flags[i] |= OF_POLL_INSN;
  5775. break;
  5776. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  5777. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5778. opd->dest = BITMASK1(SHR_R0);
  5779. opd->imm = (op & 0x0f) * 2;
  5780. op_flags[i] |= OF_POLL_INSN;
  5781. break;
  5782. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  5783. opd->source = BITMASK1(SHR_R0);
  5784. opd->dest = BITMASK1(SHR_T);
  5785. opd->imm = (s8)op;
  5786. break;
  5787. case 0x0d00: // BT/S label 10001101dddddddd
  5788. case 0x0f00: // BF/S label 10001111dddddddd
  5789. next_is_delay = 1;
  5790. // fallthrough
  5791. case 0x0900: // BT label 10001001dddddddd
  5792. case 0x0b00: // BF label 10001011dddddddd
  5793. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  5794. opd->source = BITMASK2(SHR_PC, SHR_T);
  5795. opd->dest = BITMASK1(SHR_PC);
  5796. opd->imm = ((signed int)(op << 24) >> 23);
  5797. opd->imm += pc + 4;
  5798. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  5799. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  5800. break;
  5801. default:
  5802. goto undefined;
  5803. }
  5804. break;
  5805. /////////////////////////////////////////////
  5806. case 0x09:
  5807. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  5808. opd->op = OP_LOAD_POOL;
  5809. tmp = pc + 2;
  5810. if (op_flags[i] & OF_DELAY_OP) {
  5811. if (ops[i-1].op == OP_BRANCH)
  5812. tmp = ops[i-1].imm;
  5813. else if (ops[i-1].op != OP_BRANCH_N)
  5814. tmp = 0;
  5815. }
  5816. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  5817. opd->dest = BITMASK1(GET_Rn());
  5818. if (tmp) {
  5819. opd->imm = tmp + 2 + (op & 0xff) * 2;
  5820. if (lowest_literal == 0 || opd->imm < lowest_literal)
  5821. lowest_literal = opd->imm;
  5822. }
  5823. opd->size = 1;
  5824. break;
  5825. /////////////////////////////////////////////
  5826. case 0x0b:
  5827. // BSR label 1011dddddddddddd
  5828. opd->dest = BITMASK1(SHR_PR);
  5829. case 0x0a:
  5830. // BRA label 1010dddddddddddd
  5831. opd->op = OP_BRANCH;
  5832. opd->source = BITMASK1(SHR_PC);
  5833. opd->dest |= BITMASK1(SHR_PC);
  5834. opd->imm = ((signed int)(op << 20) >> 19);
  5835. opd->imm += pc + 4;
  5836. opd->cycles = 2;
  5837. next_is_delay = 1;
  5838. if (!(opd->dest & BITMASK1(SHR_PR))) {
  5839. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  5840. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  5841. if (opd->imm <= pc)
  5842. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5843. } else
  5844. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5845. } else
  5846. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5847. break;
  5848. /////////////////////////////////////////////
  5849. case 0x0c:
  5850. switch (op & 0x0f00)
  5851. {
  5852. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  5853. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  5854. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  5855. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  5856. opd->dest = BITMASK1(SHR_MEM);
  5857. opd->size = (op & 0x300) >> 8;
  5858. opd->imm = (op & 0xff) << opd->size;
  5859. break;
  5860. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  5861. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  5862. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  5863. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  5864. opd->dest = BITMASK1(SHR_R0);
  5865. opd->size = (op & 0x300) >> 8;
  5866. opd->imm = (op & 0xff) << opd->size;
  5867. op_flags[i] |= OF_POLL_INSN;
  5868. break;
  5869. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  5870. opd->op = OP_TRAPA;
  5871. opd->source = BITMASK4(SHR_SP, SHR_PC, SHR_SR, SHR_T);
  5872. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  5873. opd->imm = (op & 0xff);
  5874. opd->cycles = 8;
  5875. op_flags[i+1] |= OF_BTARGET;
  5876. break;
  5877. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  5878. opd->op = OP_MOVA;
  5879. tmp = pc + 2;
  5880. if (op_flags[i] & OF_DELAY_OP) {
  5881. if (ops[i-1].op == OP_BRANCH)
  5882. tmp = ops[i-1].imm;
  5883. else if (ops[i-1].op != OP_BRANCH_N)
  5884. tmp = 0;
  5885. }
  5886. opd->dest = BITMASK1(SHR_R0);
  5887. if (tmp) {
  5888. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  5889. if (opd->imm >= base_pc) {
  5890. if (lowest_mova == 0 || opd->imm < lowest_mova)
  5891. lowest_mova = opd->imm;
  5892. }
  5893. }
  5894. break;
  5895. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  5896. opd->source = BITMASK1(SHR_R0);
  5897. opd->dest = BITMASK1(SHR_T);
  5898. opd->imm = op & 0xff;
  5899. break;
  5900. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  5901. opd->source = opd->dest = BITMASK1(SHR_R0);
  5902. opd->imm = op & 0xff;
  5903. break;
  5904. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  5905. opd->source = opd->dest = BITMASK1(SHR_R0);
  5906. opd->imm = op & 0xff;
  5907. break;
  5908. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  5909. opd->source = opd->dest = BITMASK1(SHR_R0);
  5910. opd->imm = op & 0xff;
  5911. break;
  5912. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  5913. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  5914. opd->dest = BITMASK1(SHR_T);
  5915. opd->imm = op & 0xff;
  5916. op_flags[i] |= OF_POLL_INSN;
  5917. opd->cycles = 3;
  5918. break;
  5919. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  5920. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  5921. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  5922. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  5923. opd->dest = BITMASK1(SHR_MEM);
  5924. opd->imm = op & 0xff;
  5925. opd->cycles = 3;
  5926. break;
  5927. default:
  5928. goto undefined;
  5929. }
  5930. break;
  5931. /////////////////////////////////////////////
  5932. case 0x0d:
  5933. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  5934. opd->op = OP_LOAD_POOL;
  5935. tmp = pc + 2;
  5936. if (op_flags[i] & OF_DELAY_OP) {
  5937. if (ops[i-1].op == OP_BRANCH)
  5938. tmp = ops[i-1].imm;
  5939. else if (ops[i-1].op != OP_BRANCH_N)
  5940. tmp = 0;
  5941. }
  5942. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  5943. opd->dest = BITMASK1(GET_Rn());
  5944. if (tmp) {
  5945. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  5946. if (lowest_literal == 0 || opd->imm < lowest_literal)
  5947. lowest_literal = opd->imm;
  5948. }
  5949. opd->size = 2;
  5950. break;
  5951. /////////////////////////////////////////////
  5952. case 0x0e:
  5953. // MOV #imm,Rn 1110nnnniiiiiiii
  5954. opd->op = OP_LOAD_CONST;
  5955. opd->dest = BITMASK1(GET_Rn());
  5956. opd->imm = (s8)op;
  5957. break;
  5958. default:
  5959. undefined:
  5960. opd->op = OP_UNDEFINED;
  5961. // an unhandled instruction is probably not code if it's not the 1st insn
  5962. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  5963. goto end;
  5964. break;
  5965. }
  5966. if (op_flags[i] & OF_DELAY_OP) {
  5967. switch (opd->op) {
  5968. case OP_BRANCH:
  5969. case OP_BRANCH_N:
  5970. case OP_BRANCH_CT:
  5971. case OP_BRANCH_CF:
  5972. case OP_BRANCH_R:
  5973. case OP_BRANCH_RF:
  5974. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  5975. is_slave ? 's' : 'm', pc);
  5976. opd->op = OP_UNDEFINED;
  5977. op_flags[i] |= OF_B_IN_DS;
  5978. next_is_delay = 0;
  5979. break;
  5980. }
  5981. }
  5982. }
  5983. end:
  5984. i_end = i;
  5985. end_pc = pc;
  5986. // 2nd pass: some analysis
  5987. lowest_literal = end_literals = lowest_mova = 0;
  5988. t = T_UNKNOWN;
  5989. last_btarget = 0;
  5990. op = 0; // delay/poll insns counter
  5991. for (i = 0, pc = base_pc; i < i_end; i++, pc += 2) {
  5992. opd = &ops[i];
  5993. crc += FETCH_OP(pc);
  5994. // propagate T (TODO: DIV0U)
  5995. if ((op_flags[i] & OF_BTARGET) || (opd->dest & BITMASK1(SHR_T)))
  5996. t = T_UNKNOWN;
  5997. if ((opd->op == OP_BRANCH_CT && t == T_SET) ||
  5998. (opd->op == OP_BRANCH_CF && t == T_CLEAR)) {
  5999. opd->op = OP_BRANCH;
  6000. opd->cycles = (op_flags[i + 1] & OF_DELAY_OP) ? 2 : 3;
  6001. } else if ((opd->op == OP_BRANCH_CT && t == T_CLEAR) ||
  6002. (opd->op == OP_BRANCH_CF && t == T_SET))
  6003. opd->op = OP_BRANCH_N;
  6004. else if ((opd->op == OP_SETCLRT && !opd->imm) || opd->op == OP_BRANCH_CT)
  6005. t = T_CLEAR;
  6006. else if ((opd->op == OP_SETCLRT && opd->imm) || opd->op == OP_BRANCH_CF)
  6007. t = T_SET;
  6008. // "overscan" detection: unreachable code after unconditional branch
  6009. // this can happen if the insn after a forward branch isn't a local target
  6010. if (OP_ISBRAUC(opd->op)) {
  6011. if (op_flags[i + 1] & OF_DELAY_OP) {
  6012. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  6013. i_end = i + 2;
  6014. } else {
  6015. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  6016. i_end = i + 1;
  6017. }
  6018. }
  6019. // literal pool size detection
  6020. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  6021. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6022. lowest_mova = opd->imm;
  6023. if (opd->op == OP_LOAD_POOL) {
  6024. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  6025. if (end_literals < opd->imm + opd->size * 2)
  6026. end_literals = opd->imm + opd->size * 2;
  6027. if (lowest_literal == 0 || lowest_literal > opd->imm)
  6028. lowest_literal = opd->imm;
  6029. if (opd->size == 2) {
  6030. // tweak for NFL: treat a 32bit literal as an address and check if it
  6031. // points to the literal space. In that case handle it like MOVA.
  6032. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  6033. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  6034. if (lowest_mova == 0 || tmp < lowest_mova)
  6035. lowest_mova = tmp;
  6036. }
  6037. }
  6038. }
  6039. #if LOOP_DETECTION
  6040. // inner loop detection
  6041. // 1. a loop always starts with a branch target (for the backwards jump)
  6042. // 2. it doesn't contain more than one polling and/or delaying insn
  6043. // 3. it doesn't contain unconditional jumps
  6044. // 4. no overlapping of loops
  6045. if (op_flags[i] & OF_BTARGET) {
  6046. last_btarget = i; // possible loop starting point
  6047. op = 0;
  6048. }
  6049. // XXX let's hope nobody is putting a delay or poll insn in a delay slot :-/
  6050. if (OP_ISBRAIMM(opd->op)) {
  6051. // BSR, BRA, BT, BF with immediate target
  6052. int i_tmp = (opd->imm - base_pc) / 2; // branch target, index in ops
  6053. if (i_tmp == last_btarget) // candidate for basic loop optimizer
  6054. op_flags[i_tmp] |= OF_BASIC_LOOP;
  6055. if (i_tmp == last_btarget && op <= 1) {
  6056. op_flags[i_tmp] |= OF_LOOP; // conditions met -> mark loop
  6057. last_btarget = i+1; // condition 4
  6058. } else if (opd->op == OP_BRANCH)
  6059. last_btarget = i+1; // condition 3
  6060. }
  6061. else if (OP_ISBRAIND(opd->op))
  6062. // BRAF, BSRF, JMP, JSR, register indirect. treat it as off-limits jump
  6063. last_btarget = i+1; // condition 3
  6064. else if (op_flags[i] & (OF_POLL_INSN|OF_DELAY_INSN))
  6065. op ++; // condition 2
  6066. #endif
  6067. }
  6068. end_pc = pc;
  6069. // end_literals is used to decide to inline a literal or not
  6070. // XXX: need better detection if this actually is used in write
  6071. if (lowest_literal >= base_pc) {
  6072. if (lowest_literal < end_pc) {
  6073. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  6074. // TODO: does this always mean end_pc covers data?
  6075. }
  6076. }
  6077. if (lowest_mova >= base_pc) {
  6078. if (lowest_mova < end_literals) {
  6079. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  6080. end_literals = lowest_mova;
  6081. }
  6082. if (lowest_mova < end_pc) {
  6083. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  6084. end_literals = end_pc;
  6085. }
  6086. }
  6087. if (lowest_literal >= end_literals)
  6088. lowest_literal = end_literals;
  6089. if (lowest_literal && end_literals)
  6090. for (pc = lowest_literal; pc < end_literals; pc += 2)
  6091. crc += FETCH_OP(pc);
  6092. *end_pc_out = end_pc;
  6093. if (base_literals_out != NULL)
  6094. *base_literals_out = (lowest_literal ?: end_pc);
  6095. if (end_literals_out != NULL)
  6096. *end_literals_out = (end_literals ?: end_pc);
  6097. // crc overflow handling, twice to collect all overflows
  6098. crc = (crc & 0xffff) + (crc >> 16);
  6099. crc = (crc & 0xffff) + (crc >> 16);
  6100. return crc;
  6101. }
  6102. // vim:shiftwidth=2:ts=2:expandtab