compiler.c 217 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. * (C) kub, 2018,2019,2020
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. *
  9. * notes:
  10. * - tcache, block descriptor, block entry buffer overflows result in oldest
  11. * blocks being deleted until enough space is available
  12. * - link and list element buffer overflows result in failure and exit
  13. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  14. * except jumps from global to CPU-local tcaches
  15. *
  16. * implemented:
  17. * - static register allocation
  18. * - remaining register caching and tracking in temporaries
  19. * - block-local branch linking
  20. * - block linking
  21. * - some constant propagation
  22. * - call stack caching for host block entry address
  23. * - delay, poll, and idle loop detection and handling
  24. * - some T/M flag optimizations where the value is known or isn't used
  25. *
  26. * TODO:
  27. * - better constant propagation
  28. * - bug fixing
  29. */
  30. #include <stddef.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <assert.h>
  34. #include <pico/pico_int.h>
  35. #include <pico/arm_features.h>
  36. #include "sh2.h"
  37. #include "compiler.h"
  38. #include "../drc/cmn.h"
  39. #include "../debug.h"
  40. // features
  41. #define PROPAGATE_CONSTANTS 1
  42. #define LINK_BRANCHES 1
  43. #define BRANCH_CACHE 1
  44. #define CALL_STACK 1
  45. #define ALIAS_REGISTERS 1
  46. #define REMAP_REGISTER 1
  47. #define LOOP_DETECTION 1
  48. #define LOOP_OPTIMIZER 1
  49. #define T_OPTIMIZER 1
  50. #define DIV_OPTIMIZER 0
  51. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  52. #define MAX_LOCAL_TARGETS (BLOCK_INSN_LIMIT / 4)
  53. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 2)
  54. // debug stuff
  55. // 01 - warnings/errors
  56. // 02 - block info/smc
  57. // 04 - asm
  58. // 08 - runtime block entry log
  59. // 10 - smc self-check
  60. // 20 - runtime block entry counter
  61. // 40 - rcache checking
  62. // 80 - branch cache statistics
  63. // 100 - write trace
  64. // 200 - compare trace
  65. // 400 - block entry backtrace on exit
  66. // 800 - state dump on exit
  67. // {
  68. #ifndef DRC_DEBUG
  69. #define DRC_DEBUG 0//x847
  70. #endif
  71. #if DRC_DEBUG
  72. #define dbg(l,...) { \
  73. if ((l) & DRC_DEBUG) \
  74. elprintf(EL_STATUS, ##__VA_ARGS__); \
  75. }
  76. #include "mame/sh2dasm.h"
  77. #include <platform/libpicofe/linux/host_dasm.h>
  78. static int insns_compiled, hash_collisions, host_insn_count;
  79. #define COUNT_OP \
  80. host_insn_count++
  81. #else // !DRC_DEBUG
  82. #define COUNT_OP
  83. #define dbg(...)
  84. #endif
  85. ///
  86. #define FETCH_OP(pc) \
  87. dr_pc_base[(pc) / 2]
  88. #define FETCH32(a) \
  89. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  90. #define CHECK_UNHANDLED_BITS(mask, label) { \
  91. if ((op & (mask)) != 0) \
  92. goto label; \
  93. }
  94. #define GET_Fx() \
  95. ((op >> 4) & 0x0f)
  96. #define GET_Rm GET_Fx
  97. #define GET_Rn() \
  98. ((op >> 8) & 0x0f)
  99. #define T 0x00000001
  100. #define S 0x00000002
  101. #define I 0x000000f0
  102. #define Q 0x00000100
  103. #define M 0x00000200
  104. #define T_save 0x00000800
  105. #define I_SHIFT 4
  106. #define Q_SHIFT 8
  107. #define M_SHIFT 9
  108. #define T_SHIFT 11
  109. static struct op_data {
  110. u8 op;
  111. u8 cycles;
  112. u8 size; // 0, 1, 2 - byte, word, long
  113. s8 rm; // branch or load/store data reg
  114. u32 source; // bitmask of src regs
  115. u32 dest; // bitmask of dest regs
  116. u32 imm; // immediate/io address/branch target
  117. // (for literal - address, not value)
  118. } ops[BLOCK_INSN_LIMIT];
  119. enum op_types {
  120. OP_UNHANDLED = 0,
  121. OP_BRANCH,
  122. OP_BRANCH_N, // conditional known not to be taken
  123. OP_BRANCH_CT, // conditional, branch if T set
  124. OP_BRANCH_CF, // conditional, branch if T clear
  125. OP_BRANCH_R, // indirect
  126. OP_BRANCH_RF, // indirect far (PC + Rm)
  127. OP_SETCLRT, // T flag set/clear
  128. OP_MOVE, // register move
  129. OP_LOAD_CONST,// load const to register
  130. OP_LOAD_POOL, // literal pool load, imm is address
  131. OP_MOVA, // MOVA instruction
  132. OP_SLEEP, // SLEEP instruction
  133. OP_RTE, // RTE instruction
  134. OP_TRAPA, // TRAPA instruction
  135. OP_LDC, // LDC instruction
  136. OP_DIV0, // DIV0[US] instruction
  137. OP_UNDEFINED,
  138. };
  139. struct div {
  140. u32 state:1; // 0: expect DIV1/ROTCL, 1: expect DIV1
  141. u32 rn:5, rm:5, ro:5; // rn and rm for DIV1, ro for ROTCL
  142. u32 div1:8, rotcl:8; // DIV1 count, ROTCL count
  143. };
  144. union _div { u32 imm; struct div div; }; // XXX tut-tut type punning...
  145. #define div(opd) ((union _div *)&((opd)->imm))->div
  146. // XXX consider trap insns: OP_TRAPA, OP_UNDEFINED?
  147. #define OP_ISBRANCH(op) ((BITRANGE(OP_BRANCH, OP_BRANCH_RF)| BITMASK1(OP_RTE)) \
  148. & BITMASK1(op))
  149. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  150. & BITMASK1(op))
  151. #define OP_ISBRACND(op) (BITMASK2(OP_BRANCH_CT, OP_BRANCH_CF) \
  152. & BITMASK1(op))
  153. #define OP_ISBRAIMM(op) (BITMASK3(OP_BRANCH, OP_BRANCH_CT, OP_BRANCH_CF) \
  154. & BITMASK1(op))
  155. #define OP_ISBRAIND(op) (BITMASK3(OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  156. & BITMASK1(op))
  157. #ifdef DRC_SH2
  158. #if (DRC_DEBUG & 4)
  159. static u8 *tcache_dsm_ptrs[3];
  160. static char sh2dasm_buff[64];
  161. #define do_host_disasm(tcid) \
  162. host_dasm(tcache_dsm_ptrs[tcid], emith_insn_ptr() - tcache_dsm_ptrs[tcid]); \
  163. tcache_dsm_ptrs[tcid] = emith_insn_ptr()
  164. #else
  165. #define do_host_disasm(x)
  166. #endif
  167. #define SH2_DUMP(sh2, reason) { \
  168. char ms = (sh2)->is_slave ? 's' : 'm'; \
  169. printf("%csh2 %s %08lx\n", ms, reason, (ulong)(sh2)->pc); \
  170. printf("%csh2 r0-7 %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", ms, \
  171. (ulong)(sh2)->r[0], (ulong)(sh2)->r[1], (ulong)(sh2)->r[2], (ulong)(sh2)->r[3], \
  172. (ulong)(sh2)->r[4], (ulong)(sh2)->r[5], (ulong)(sh2)->r[6], (ulong)(sh2)->r[7]); \
  173. printf("%csh2 r8-15 %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", ms, \
  174. (ulong)(sh2)->r[8], (ulong)(sh2)->r[9], (ulong)(sh2)->r[10], (ulong)(sh2)->r[11], \
  175. (ulong)(sh2)->r[12], (ulong)(sh2)->r[13], (ulong)(sh2)->r[14], (ulong)(sh2)->r[15]); \
  176. printf("%csh2 pc-ml %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", ms, \
  177. (ulong)(sh2)->pc, (ulong)(sh2)->ppc, (ulong)(sh2)->pr, (ulong)(sh2)->sr&0xfff, \
  178. (ulong)(sh2)->gbr, (ulong)(sh2)->vbr, (ulong)(sh2)->mach, (ulong)(sh2)->macl); \
  179. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08lx %08x %08x\n", ms, \
  180. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  181. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  182. (ulong)(sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  183. }
  184. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  185. #if (DRC_DEBUG & (256|512|1024))
  186. static SH2 csh2[2][8];
  187. static FILE *trace[2];
  188. static int topen[2];
  189. #endif
  190. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  191. {
  192. if (block != NULL) {
  193. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  194. sh2->pc, block, (signed int)sr >> 12);
  195. #if defined PDB
  196. pdb_step(sh2, sh2->pc);
  197. #elif (DRC_DEBUG & 256)
  198. {
  199. static SH2 fsh2;
  200. int idx = sh2->is_slave;
  201. if (!trace[0] && !topen[0]++) {
  202. trace[0] = fopen("pico.trace0", "wb");
  203. trace[1] = fopen("pico.trace1", "wb");
  204. }
  205. if (trace[idx] && csh2[idx][0].pc != sh2->pc) {
  206. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  207. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  208. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  209. csh2[idx][0].is_slave = idx;
  210. }
  211. }
  212. #elif (DRC_DEBUG & 512)
  213. {
  214. static SH2 fsh2;
  215. int idx = sh2->is_slave;
  216. if (!trace[0] && !topen[0]++) {
  217. trace[0] = fopen("pico.trace0", "rb");
  218. trace[1] = fopen("pico.trace1", "rb");
  219. }
  220. if (trace[idx] && csh2[idx][0].pc != sh2->pc) {
  221. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  222. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  223. printf("trace eof at %08lx\n",ftell(trace[idx]));
  224. exit(1);
  225. }
  226. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  227. fsh2.is_slave = idx;
  228. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  229. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  230. {
  231. printf("difference at %08lx!\n",ftell(trace[idx]));
  232. SH2_DUMP(&fsh2, "file");
  233. SH2_DUMP(sh2, "current");
  234. SH2_DUMP(&csh2[idx][0], "previous");
  235. char *ps = (char *)sh2, *pf = (char *)&fsh2;
  236. for (idx = 0; idx < offsetof(SH2, read8_map); idx += sizeof(u32))
  237. if (*(u32 *)(ps+idx) != *(u32 *)(pf+idx))
  238. printf("diff reg %ld\n",(long)idx/sizeof(u32));
  239. exit(1);
  240. }
  241. csh2[idx][0] = fsh2;
  242. }
  243. }
  244. #elif (DRC_DEBUG & 1024)
  245. {
  246. int x = sh2->is_slave, i;
  247. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  248. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  249. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  250. csh2[x][0].is_slave = x;
  251. }
  252. #endif
  253. }
  254. return block;
  255. }
  256. #endif
  257. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  258. // BIOS shares tcache with data array because it's only used for init
  259. // and can be discarded early
  260. #define TCACHE_BUFFERS 3
  261. struct ring_buffer {
  262. u8 *base; // ring buffer memory
  263. unsigned item_sz; // size of one buffer item
  264. unsigned size; // number of itmes in ring
  265. int first, next; // read and write pointers
  266. int used; // number of used items in ring
  267. };
  268. enum { BL_JMP=1, BL_LDJMP, BL_JCCBLX };
  269. struct block_link {
  270. short tcache_id;
  271. short type; // BL_JMP et al
  272. u32 target_pc;
  273. void *jump; // insn address
  274. void *blx; // block link/exit area if any
  275. u8 jdisp[12]; // jump backup buffer
  276. struct block_link *next; // either in block_entry->links or unresolved
  277. struct block_link *o_next; // ...in block_entry->o_links
  278. struct block_link *prev;
  279. struct block_link *o_prev;
  280. struct block_entry *target;// target block this is linked in (be->links)
  281. };
  282. struct block_entry {
  283. u32 pc;
  284. u8 *tcache_ptr; // translated block for above PC
  285. struct block_entry *next; // chain in hash_table with same pc hash
  286. struct block_entry *prev;
  287. struct block_link *links; // incoming links to this entry
  288. struct block_link *o_links;// outgoing links from this entry
  289. #if (DRC_DEBUG & 2)
  290. struct block_desc *block;
  291. #endif
  292. #if (DRC_DEBUG & 32)
  293. int entry_count;
  294. #endif
  295. };
  296. struct block_desc {
  297. u32 addr; // block start SH2 PC address
  298. u32 addr_lit; // block start SH2 literal pool addr
  299. int size; // ..of recompiled insns
  300. int size_lit; // ..of (insns+)literal pool
  301. u8 *tcache_ptr; // start address of block in cache
  302. u16 crc; // crc of insns and literals
  303. u16 active; // actively used or deactivated?
  304. struct block_list *list;
  305. #if (DRC_DEBUG & 2)
  306. int refcount;
  307. #endif
  308. int entry_count;
  309. struct block_entry *entryp;
  310. };
  311. struct block_list {
  312. struct block_desc *block; // block reference
  313. struct block_list *next; // pointers for doubly linked list
  314. struct block_list *prev;
  315. struct block_list **head; // list head (for removing from list)
  316. struct block_list *l_next;
  317. };
  318. static u8 *tcache_ptr; // ptr for code emitters
  319. // XXX: need to tune sizes
  320. static struct ring_buffer tcache_ring[TCACHE_BUFFERS];
  321. static const int tcache_sizes[TCACHE_BUFFERS] = {
  322. DRC_TCACHE_SIZE * 30 / 32, // ROM (rarely used), DRAM
  323. DRC_TCACHE_SIZE / 32, // BIOS, data array in master sh2
  324. DRC_TCACHE_SIZE / 32, // ... slave
  325. };
  326. #define BLOCK_MAX_COUNT(tcid) ((tcid) ? 256 : 32*256)
  327. static struct ring_buffer block_ring[TCACHE_BUFFERS];
  328. static struct block_desc *block_tables[TCACHE_BUFFERS];
  329. #define ENTRY_MAX_COUNT(tcid) ((tcid) ? 8*512 : 256*512)
  330. static struct ring_buffer entry_ring[TCACHE_BUFFERS];
  331. static struct block_entry *entry_tables[TCACHE_BUFFERS];
  332. // we have block_link_pool to avoid using mallocs
  333. #define BLOCK_LINK_MAX_COUNT(tcid) ((tcid) ? 512 : 32*512)
  334. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  335. static int block_link_pool_counts[TCACHE_BUFFERS];
  336. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  337. static struct block_link *blink_free[TCACHE_BUFFERS];
  338. // used for invalidation
  339. #define RAM_SIZE(tcid) ((tcid) ? 0x1000 : 0x40000)
  340. #define INVAL_PAGE_SIZE 0x100
  341. static struct block_list *inactive_blocks[TCACHE_BUFFERS];
  342. // array of pointers to block_lists for RAM and 2 data arrays
  343. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  344. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  345. #define HASH_TABLE_SIZE(tcid) ((tcid) ? 512 : 32*512)
  346. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  347. #define HASH_FUNC(hash_tab, addr, mask) \
  348. (hash_tab)[((addr) >> 1) & (mask)]
  349. #define BLOCK_LIST_MAX_COUNT (64*1024)
  350. static struct block_list *block_list_pool;
  351. static int block_list_pool_count;
  352. static struct block_list *blist_free;
  353. #if (DRC_DEBUG & 128)
  354. #if BRANCH_CACHE
  355. int bchit, bcmiss;
  356. #endif
  357. #if CALL_STACK
  358. int rchit, rcmiss;
  359. #endif
  360. #endif
  361. // host register tracking
  362. enum cache_reg_htype {
  363. HRT_TEMP = 1, // is for temps and args
  364. HRT_REG = 2, // is for sh2 regs
  365. };
  366. enum cache_reg_flags {
  367. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  368. HRF_PINNED = 1 << 1, // has a pinned mapping
  369. HRF_S16 = 1 << 2, // has a sign extended 16 bit value
  370. HRF_U16 = 1 << 3, // has a zero extended 16 bit value
  371. };
  372. enum cache_reg_type {
  373. HR_FREE,
  374. HR_CACHED, // vreg has sh2_reg_e
  375. HR_TEMP, // reg used for temp storage
  376. };
  377. typedef struct {
  378. u8 hreg:6; // "host" reg
  379. u8 htype:2; // TEMP or REG?
  380. u8 flags:4; // DIRTY, PINNED?
  381. u8 type:2; // CACHED or TEMP?
  382. u8 locked:2; // LOCKED reference counter
  383. u16 stamp; // kind of a timestamp
  384. u32 gregs; // "guest" reg mask
  385. } cache_reg_t;
  386. // guest register tracking
  387. enum guest_reg_flags {
  388. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  389. GRF_CONST = 1 << 1, // reg has a constant
  390. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  391. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  392. GRF_PINNED = 1 << 4, // reg has pinned mapping to vreg
  393. };
  394. typedef struct {
  395. u8 flags; // guest flags: is constant, is dirty?
  396. s8 sreg; // cache reg for static mapping
  397. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  398. s8 cnst; // const index if this is constant
  399. } guest_reg_t;
  400. // possibly needed in code emitter
  401. static int rcache_get_tmp(void);
  402. static void rcache_free_tmp(int hr);
  403. // Note: Register assignment goes by ABI convention. Caller save registers are
  404. // TEMPORARY, callee save registers are PRESERVED. Unusable regs are omitted.
  405. // there must be at least the free (not context or statically mapped) amount of
  406. // PRESERVED/TEMPORARY registers used by handlers in worst case (currently 4).
  407. // there must be at least 3 PARAM, and PARAM+TEMPORARY must be at least 4.
  408. // SR must and R0 should by all means be statically mapped.
  409. // XXX the static definition of SR MUST match that in compiler.h
  410. #if defined(__arm__) || defined(_M_ARM)
  411. #include "../drc/emit_arm.c"
  412. #elif defined(__aarch64__) || defined(_M_ARM64)
  413. #include "../drc/emit_arm64.c"
  414. #elif defined(__mips__)
  415. #include "../drc/emit_mips.c"
  416. #elif defined(__riscv__) || defined(__riscv)
  417. #include "../drc/emit_riscv.c"
  418. #elif defined(__powerpc__) || defined(_M_PPC)
  419. #include "../drc/emit_ppc.c"
  420. #elif defined(__i386__) || defined(_M_X86)
  421. #include "../drc/emit_x86.c"
  422. #elif defined(__x86_64__) || defined(_M_X64)
  423. #include "../drc/emit_x86.c"
  424. #else
  425. #error unsupported arch
  426. #endif
  427. static const signed char hregs_param[] = PARAM_REGS;
  428. static const signed char hregs_temp [] = TEMPORARY_REGS;
  429. static const signed char hregs_saved[] = PRESERVED_REGS;
  430. static const signed char regs_static[] = STATIC_SH2_REGS;
  431. #define CACHE_REGS \
  432. (ARRAY_SIZE(hregs_param)+ARRAY_SIZE(hregs_temp)+ARRAY_SIZE(hregs_saved)-1)
  433. static cache_reg_t cache_regs[CACHE_REGS];
  434. static signed char reg_map_host[HOST_REGS];
  435. static guest_reg_t guest_regs[SH2_REGS];
  436. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  437. static void REGPARM(1) (*sh2_drc_dispatcher)(u32 pc);
  438. #if CALL_STACK
  439. static u32 REGPARM(2) (*sh2_drc_dispatcher_call)(u32 pc);
  440. static void REGPARM(1) (*sh2_drc_dispatcher_return)(u32 pc);
  441. #endif
  442. static void REGPARM(1) (*sh2_drc_exit)(u32 pc);
  443. static void (*sh2_drc_test_irq)(void);
  444. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  445. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  446. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  447. static u32 REGPARM(1) (*sh2_drc_read8_poll)(u32 a);
  448. static u32 REGPARM(1) (*sh2_drc_read16_poll)(u32 a);
  449. static u32 REGPARM(1) (*sh2_drc_read32_poll)(u32 a);
  450. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  451. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  452. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  453. #ifdef DRC_SR_REG
  454. void REGPARM(1) (*sh2_drc_save_sr)(SH2 *sh2);
  455. void REGPARM(1) (*sh2_drc_restore_sr)(SH2 *sh2);
  456. #endif
  457. // flags for memory access
  458. #define MF_SIZEMASK 0x03 // size of access
  459. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  460. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  461. #define MF_POLLING 0x20 // include polling check in read
  462. // address space stuff
  463. static int dr_is_rom(u32 a)
  464. {
  465. // tweak for WWF Raw which writes data to some high ROM addresses
  466. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  467. }
  468. static int dr_ctx_get_mem_ptr(SH2 *sh2, u32 a, u32 *mask)
  469. {
  470. void *memptr;
  471. int poffs = -1;
  472. // check if region is mapped memory
  473. memptr = p32x_sh2_get_mem_ptr(a, mask, sh2);
  474. if (memptr == NULL)
  475. return poffs;
  476. if (memptr == sh2->p_bios) // BIOS
  477. poffs = offsetof(SH2, p_bios);
  478. else if (memptr == sh2->p_da) // data array
  479. poffs = offsetof(SH2, p_da);
  480. else if (memptr == sh2->p_sdram) // SDRAM
  481. poffs = offsetof(SH2, p_sdram);
  482. else if (memptr == sh2->p_rom) // ROM
  483. poffs = offsetof(SH2, p_rom);
  484. return poffs;
  485. }
  486. static int dr_get_tcache_id(u32 pc, int is_slave)
  487. {
  488. u32 tcid = 0;
  489. if ((pc & 0xe0000000) == 0xc0000000)
  490. tcid = 1 + is_slave; // data array
  491. if ((pc & ~0xfff) == 0)
  492. tcid = 1 + is_slave; // BIOS
  493. return tcid;
  494. }
  495. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  496. {
  497. struct block_entry *be;
  498. *tcache_id = dr_get_tcache_id(pc, is_slave);
  499. be = HASH_FUNC(hash_tables[*tcache_id], pc, HASH_TABLE_SIZE(*tcache_id) - 1);
  500. if (be != NULL) // don't ask... gcc code generation hint
  501. for (; be != NULL; be = be->next)
  502. if (be->pc == pc)
  503. return be;
  504. return NULL;
  505. }
  506. // ---------------------------------------------------------------
  507. // ring buffer management
  508. #define RING_INIT(r,m,n) *(r) = (struct ring_buffer) { .base = (u8 *)m, \
  509. .item_sz = sizeof(*(m)), .size = n };
  510. static void *ring_alloc(struct ring_buffer *rb, int count)
  511. {
  512. // allocate space in ring buffer
  513. void *p;
  514. p = rb->base + rb->next * rb->item_sz;
  515. if (rb->next+count > rb->size) {
  516. rb->used += rb->size - rb->next;
  517. p = rb->base; // wrap if overflow at end
  518. rb->next = count;
  519. } else {
  520. rb->next += count;
  521. if (rb->next == rb->size) rb->next = 0;
  522. }
  523. rb->used += count;
  524. return p;
  525. }
  526. static void ring_wrap(struct ring_buffer *rb)
  527. {
  528. // insufficient space at end of buffer memory, wrap around
  529. rb->used += rb->size - rb->next;
  530. rb->next = 0;
  531. }
  532. static void ring_free(struct ring_buffer *rb, int count)
  533. {
  534. // free oldest space in ring buffer
  535. rb->first += count;
  536. if (rb->first >= rb->size) rb->first -= rb->size;
  537. rb->used -= count;
  538. }
  539. static void ring_free_p(struct ring_buffer *rb, void *p)
  540. {
  541. // free ring buffer space upto given pointer
  542. rb->first = ((u8 *)p - rb->base) / rb->item_sz;
  543. rb->used = rb->next - rb->first;
  544. if (rb->used < 0) rb->used += rb->size;
  545. }
  546. static void *ring_reset(struct ring_buffer *rb)
  547. {
  548. // reset to initial state
  549. rb->first = rb->next = rb->used = 0;
  550. return rb->base + rb->next * rb->item_sz;
  551. }
  552. static void *ring_first(struct ring_buffer *rb)
  553. {
  554. return rb->base + rb->first * rb->item_sz;
  555. }
  556. static void *ring_next(struct ring_buffer *rb)
  557. {
  558. return rb->base + rb->next * rb->item_sz;
  559. }
  560. // block management
  561. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  562. {
  563. struct block_list *added;
  564. if (blist_free) {
  565. added = blist_free;
  566. blist_free = added->next;
  567. } else if (block_list_pool_count >= BLOCK_LIST_MAX_COUNT) {
  568. printf( "block list overflow\n");
  569. exit(1);
  570. } else {
  571. added = block_list_pool + block_list_pool_count;
  572. block_list_pool_count ++;
  573. }
  574. added->block = block;
  575. added->l_next = block->list;
  576. block->list = added;
  577. added->head = blist;
  578. added->prev = NULL;
  579. if (*blist)
  580. (*blist)->prev = added;
  581. added->next = *blist;
  582. *blist = added;
  583. }
  584. static void rm_from_block_lists(struct block_desc *block)
  585. {
  586. struct block_list *entry;
  587. entry = block->list;
  588. while (entry != NULL) {
  589. if (entry->prev != NULL)
  590. entry->prev->next = entry->next;
  591. else
  592. *(entry->head) = entry->next;
  593. if (entry->next != NULL)
  594. entry->next->prev = entry->prev;
  595. entry->next = blist_free;
  596. blist_free = entry;
  597. entry = entry->l_next;
  598. }
  599. block->list = NULL;
  600. }
  601. static void discard_block_list(struct block_list **blist)
  602. {
  603. struct block_list *next, *current = *blist;
  604. while (current != NULL) {
  605. next = current->next;
  606. current->next = blist_free;
  607. blist_free = current;
  608. current = next;
  609. }
  610. *blist = NULL;
  611. }
  612. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  613. {
  614. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  615. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  616. be->prev = NULL;
  617. if (*head)
  618. (*head)->prev = be;
  619. be->next = *head;
  620. *head = be;
  621. #if (DRC_DEBUG & 2)
  622. if (be->next != NULL) {
  623. printf(" %08lx@%p: entry hash collision with %08lx@%p\n",
  624. (ulong)be->pc, be->tcache_ptr, (ulong)be->next->pc, be->next->tcache_ptr);
  625. hash_collisions++;
  626. }
  627. #endif
  628. }
  629. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  630. {
  631. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  632. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  633. #if DRC_DEBUG & 1
  634. struct block_entry *current = be;
  635. while (current->prev != NULL)
  636. current = current->prev;
  637. if (current != *head)
  638. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  639. #endif
  640. if (be->prev != NULL)
  641. be->prev->next = be->next;
  642. else
  643. *head = be->next;
  644. if (be->next != NULL)
  645. be->next->prev = be->prev;
  646. }
  647. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  648. {
  649. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  650. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  651. #if DRC_DEBUG & 1
  652. struct block_link *current = *head;
  653. while (current != NULL && current != bl)
  654. current = current->next;
  655. if (current == bl)
  656. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  657. #endif
  658. bl->target = NULL; // marker for not resolved
  659. bl->prev = NULL;
  660. if (*head)
  661. (*head)->prev = bl;
  662. bl->next = *head;
  663. *head = bl;
  664. }
  665. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  666. {
  667. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  668. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  669. #if DRC_DEBUG & 1
  670. struct block_link *current = bl;
  671. while (current->prev != NULL)
  672. current = current->prev;
  673. if (current != *head)
  674. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  675. #endif
  676. if (bl->prev != NULL)
  677. bl->prev->next = bl->next;
  678. else
  679. *head = bl->next;
  680. if (bl->next != NULL)
  681. bl->next->prev = bl->prev;
  682. }
  683. #if LINK_BRANCHES
  684. static void dr_block_link(struct block_entry *be, struct block_link *bl, int emit_jump)
  685. {
  686. dbg(2, "- %slink from %p to pc %08x entry %p", emit_jump ? "":"early ",
  687. bl->jump, bl->target_pc, be->tcache_ptr);
  688. if (emit_jump) {
  689. u8 *jump = bl->jump;
  690. int jsz = emith_jump_patch_size();
  691. if (bl->type == BL_JMP) { // patch: jump @entry
  692. // inlined: @jump far jump to target
  693. emith_jump_patch(jump, be->tcache_ptr, &jump);
  694. } else if (bl->type == BL_LDJMP) { // write: jump @entry
  695. // inlined: @jump far jump to target
  696. emith_jump_at(jump, be->tcache_ptr);
  697. jsz = emith_jump_at_size();
  698. } else if (bl->type == BL_JCCBLX) { // patch: jump cond -> jump @entry
  699. if (emith_jump_patch_inrange(bl->jump, be->tcache_ptr)) {
  700. // inlined: @jump near jumpcc to target
  701. emith_jump_patch(jump, be->tcache_ptr, &jump);
  702. } else { // dispatcher cond immediate
  703. // via blx: @jump near jumpcc to blx; @blx far jump
  704. emith_jump_patch(jump, bl->blx, &jump);
  705. emith_jump_at(bl->blx, be->tcache_ptr);
  706. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(),
  707. ((uintptr_t)bl->blx & 0x1f) + emith_jump_at_size()-1 > 0x1f);
  708. }
  709. } else {
  710. printf("unknown BL type %d\n", bl->type);
  711. exit(1);
  712. }
  713. host_instructions_updated(jump, jump + jsz, ((uintptr_t)jump & 0x1f) + jsz-1 > 0x1f);
  714. }
  715. // move bl to block_entry
  716. bl->target = be;
  717. bl->prev = NULL;
  718. if (be->links)
  719. be->links->prev = bl;
  720. bl->next = be->links;
  721. be->links = bl;
  722. }
  723. static void dr_block_unlink(struct block_link *bl, int emit_jump)
  724. {
  725. dbg(2,"- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  726. if (bl->target) {
  727. if (emit_jump) {
  728. u8 *jump = bl->jump;
  729. int jsz = emith_jump_patch_size();
  730. if (bl->type == BL_JMP) { // jump_patch @dispatcher
  731. // inlined: @jump far jump to dispatcher
  732. emith_jump_patch(jump, sh2_drc_dispatcher, &jump);
  733. } else if (bl->type == BL_LDJMP) { // restore: load pc, jump @dispatcher
  734. // inlined: @jump load target_pc, far jump to dispatcher
  735. memcpy(jump, bl->jdisp, emith_jump_at_size());
  736. jsz = emith_jump_at_size();
  737. } else if (bl->type == BL_JCCBLX) { // jump cond @blx; @blx: load pc, jump
  738. // via blx: @jump near jumpcc to blx; @blx load target_pc, far jump
  739. emith_jump_patch(bl->jump, bl->blx, &jump);
  740. memcpy(bl->blx, bl->jdisp, emith_jump_at_size());
  741. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(), 1);
  742. } else {
  743. printf("unknown BL type %d\n", bl->type);
  744. exit(1);
  745. }
  746. // update cpu caches since the previous jump target doesn't exist anymore
  747. host_instructions_updated(jump, jump + jsz, 1);
  748. }
  749. if (bl->prev)
  750. bl->prev->next = bl->next;
  751. else
  752. bl->target->links = bl->next;
  753. if (bl->next)
  754. bl->next->prev = bl->prev;
  755. bl->target = NULL;
  756. }
  757. }
  758. #endif
  759. static struct block_link *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  760. {
  761. #if LINK_BRANCHES
  762. struct block_link *bl = block_link_pool[tcache_id];
  763. int cnt = block_link_pool_counts[tcache_id];
  764. int target_tcache_id;
  765. // get the target block entry
  766. target_tcache_id = dr_get_tcache_id(pc, is_slave);
  767. if (target_tcache_id && target_tcache_id != tcache_id)
  768. return NULL;
  769. // get a block link
  770. if (blink_free[tcache_id] != NULL) {
  771. bl = blink_free[tcache_id];
  772. blink_free[tcache_id] = bl->next;
  773. } else if (cnt >= BLOCK_LINK_MAX_COUNT(tcache_id)) {
  774. dbg(1, "bl overflow for tcache %d", tcache_id);
  775. return NULL;
  776. } else {
  777. bl += cnt;
  778. block_link_pool_counts[tcache_id] = cnt+1;
  779. }
  780. // prepare link and add to outgoing list of owner
  781. bl->tcache_id = tcache_id;
  782. bl->target_pc = pc;
  783. bl->jump = tcache_ptr;
  784. bl->blx = NULL;
  785. bl->o_next = owner->o_links;
  786. owner->o_links = bl;
  787. add_to_hashlist_unresolved(bl, tcache_id);
  788. return bl;
  789. #else
  790. return NULL;
  791. #endif
  792. }
  793. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  794. {
  795. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  796. u32 addr, end, mask = 0, shift = 0, idx;
  797. // mark memory blocks as containing compiled code
  798. if ((block->addr & 0xc7fc0000) == 0x06000000
  799. || (block->addr & 0xfffff000) == 0xc0000000)
  800. {
  801. if (tcache_id != 0) {
  802. // data array
  803. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  804. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  805. shift = SH2_DRCBLK_DA_SHIFT;
  806. }
  807. else {
  808. // SDRAM
  809. drc_ram_blk = Pico32xMem->drcblk_ram;
  810. lit_ram_blk = Pico32xMem->drclit_ram;
  811. shift = SH2_DRCBLK_RAM_SHIFT;
  812. }
  813. mask = RAM_SIZE(tcache_id) - 1;
  814. // mark recompiled insns
  815. addr = block->addr & ~((1 << shift) - 1);
  816. end = block->addr + block->size;
  817. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  818. drc_ram_blk[idx++] += mark;
  819. // mark literal pool
  820. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  821. addr = block->addr_lit & ~((1 << shift) - 1);
  822. end = block->addr_lit + block->size_lit;
  823. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  824. drc_ram_blk[idx++] += mark;
  825. // mark for literals disabled
  826. if (nolit) {
  827. addr = nolit & ~((1 << shift) - 1);
  828. end = block->addr_lit + block->size_lit;
  829. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  830. lit_ram_blk[idx++] = 1;
  831. }
  832. if (mark < 0)
  833. rm_from_block_lists(block);
  834. else {
  835. // add to invalidation lookup lists
  836. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  837. end = block->addr + block->size;
  838. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  839. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  840. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  841. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  842. end = block->addr_lit + block->size_lit;
  843. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  844. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  845. }
  846. }
  847. }
  848. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  849. {
  850. u8 *lit_ram_blk = NULL;
  851. u32 mask = 0, shift = 0, addr, idx;
  852. if ((start & 0xc7fc0000) == 0x06000000
  853. || (start & 0xfffff000) == 0xc0000000)
  854. {
  855. if (tcache_id != 0) {
  856. // data array
  857. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  858. shift = SH2_DRCBLK_DA_SHIFT;
  859. }
  860. else {
  861. // SDRAM
  862. lit_ram_blk = Pico32xMem->drclit_ram;
  863. shift = SH2_DRCBLK_RAM_SHIFT;
  864. }
  865. mask = RAM_SIZE(tcache_id) - 1;
  866. addr = start & ~((1 << shift) - 1);
  867. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  868. if (lit_ram_blk[idx++])
  869. break;
  870. return (addr < start ? start : addr > end ? end : addr);
  871. }
  872. return end;
  873. }
  874. static void dr_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free)
  875. {
  876. struct block_link *bl;
  877. u32 i;
  878. free = free || nolit; // block is invalid if literals are overwritten
  879. dbg(2," %sing block %08x-%08x,%08x-%08x, blkid %d,%d", free?"delet":"disabl",
  880. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  881. tcache_id, bd - block_tables[tcache_id]);
  882. if (bd->addr == 0 || bd->entry_count == 0) {
  883. dbg(1, " killing dead block!? %08x", bd->addr);
  884. return;
  885. }
  886. #if LINK_BRANCHES
  887. // remove from hash table, make incoming links unresolved
  888. if (bd->active) {
  889. for (i = 0; i < bd->entry_count; i++) {
  890. rm_from_hashlist(&bd->entryp[i], tcache_id);
  891. while ((bl = bd->entryp[i].links) != NULL) {
  892. dr_block_unlink(bl, 1);
  893. add_to_hashlist_unresolved(bl, tcache_id);
  894. }
  895. }
  896. dr_mark_memory(-1, bd, tcache_id, nolit);
  897. add_to_block_list(&inactive_blocks[tcache_id], bd);
  898. }
  899. bd->active = 0;
  900. #endif
  901. if (free) {
  902. #if LINK_BRANCHES
  903. // revoke outgoing links
  904. for (bl = bd->entryp[0].o_links; bl != NULL; bl = bl->o_next) {
  905. if (bl->target)
  906. dr_block_unlink(bl, 0);
  907. else
  908. rm_from_hashlist_unresolved(bl, tcache_id);
  909. bl->jump = NULL;
  910. bl->next = blink_free[bl->tcache_id];
  911. blink_free[bl->tcache_id] = bl;
  912. }
  913. bd->entryp[0].o_links = NULL;
  914. #endif
  915. // invalidate block
  916. rm_from_block_lists(bd);
  917. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  918. bd->entry_count = 0;
  919. bd->entryp = NULL;
  920. }
  921. emith_update_cache();
  922. }
  923. static struct block_desc *dr_find_inactive_block(int tcache_id, u16 crc,
  924. u32 addr, int size, u32 addr_lit, int size_lit)
  925. {
  926. struct block_list **head = &inactive_blocks[tcache_id];
  927. struct block_list *current;
  928. for (current = *head; current != NULL; current = current->next) {
  929. struct block_desc *block = current->block;
  930. if (block->crc == crc && block->addr == addr && block->size == size &&
  931. block->addr_lit == addr_lit && block->size_lit == size_lit)
  932. {
  933. rm_from_block_lists(block);
  934. return block;
  935. }
  936. }
  937. return NULL;
  938. }
  939. static struct block_desc *dr_add_block(int entries, u32 addr, int size,
  940. u32 addr_lit, int size_lit, u16 crc, int is_slave, int *blk_id)
  941. {
  942. struct block_entry *be;
  943. struct block_desc *bd;
  944. int tcache_id;
  945. // do a lookup to get tcache_id and override check
  946. be = dr_get_entry(addr, is_slave, &tcache_id);
  947. if (be != NULL)
  948. dbg(1, "block override for %08x", addr);
  949. if (block_ring[tcache_id].used + 1 > block_ring[tcache_id].size ||
  950. entry_ring[tcache_id].used + entries > entry_ring[tcache_id].size) {
  951. dbg(1, "bd overflow for tcache %d", tcache_id);
  952. return NULL;
  953. }
  954. *blk_id = block_ring[tcache_id].next;
  955. bd = ring_alloc(&block_ring[tcache_id], 1);
  956. bd->entryp = ring_alloc(&entry_ring[tcache_id], entries);
  957. bd->addr = addr;
  958. bd->size = size;
  959. bd->addr_lit = addr_lit;
  960. bd->size_lit = size_lit;
  961. bd->tcache_ptr = tcache_ptr;
  962. bd->crc = crc;
  963. bd->active = 0;
  964. bd->list = NULL;
  965. bd->entry_count = 0;
  966. #if (DRC_DEBUG & 2)
  967. bd->refcount = 0;
  968. #endif
  969. return bd;
  970. }
  971. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  972. {
  973. #if LINK_BRANCHES
  974. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  975. u32 pc = be->pc;
  976. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  977. struct block_link *bl = *head, *next;
  978. while (bl != NULL) {
  979. next = bl->next;
  980. if (bl->target_pc == pc && (!bl->tcache_id || bl->tcache_id == tcache_id)) {
  981. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  982. dr_block_link(be, bl, 1);
  983. }
  984. bl = next;
  985. }
  986. #endif
  987. }
  988. static void dr_link_outgoing(struct block_entry *be, int tcache_id, int is_slave)
  989. {
  990. #if LINK_BRANCHES
  991. struct block_link *bl;
  992. int target_tcache_id;
  993. for (bl = be->o_links; bl; bl = bl->o_next) {
  994. if (bl->target == NULL) {
  995. be = dr_get_entry(bl->target_pc, is_slave, &target_tcache_id);
  996. if (be != NULL && (!target_tcache_id || target_tcache_id == tcache_id)) {
  997. // remove bl from unresolved_links (must've been since target was NULL)
  998. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  999. dr_block_link(be, bl, 1);
  1000. }
  1001. }
  1002. }
  1003. #endif
  1004. }
  1005. static void dr_activate_block(struct block_desc *bd, int tcache_id, int is_slave)
  1006. {
  1007. int i;
  1008. // connect branches
  1009. for (i = 0; i < bd->entry_count; i++) {
  1010. struct block_entry *entry = &bd->entryp[i];
  1011. add_to_hashlist(entry, tcache_id);
  1012. // incoming branches
  1013. dr_link_blocks(entry, tcache_id);
  1014. if (!tcache_id)
  1015. dr_link_blocks(entry, is_slave?2:1);
  1016. // outgoing branches
  1017. dr_link_outgoing(entry, tcache_id, is_slave);
  1018. }
  1019. // mark memory for overwrite detection
  1020. dr_mark_memory(1, bd, tcache_id, 0);
  1021. bd->active = 1;
  1022. }
  1023. static void REGPARM(3) *dr_lookup_block(u32 pc, SH2 *sh2, int *tcache_id)
  1024. {
  1025. struct block_entry *be = NULL;
  1026. void *block = NULL;
  1027. be = dr_get_entry(pc, sh2->is_slave, tcache_id);
  1028. if (be != NULL)
  1029. block = be->tcache_ptr;
  1030. #if (DRC_DEBUG & 2)
  1031. if (be != NULL)
  1032. be->block->refcount++;
  1033. #endif
  1034. return block;
  1035. }
  1036. static void dr_free_oldest_block(int tcache_id)
  1037. {
  1038. struct block_desc *bf;
  1039. bf = ring_first(&block_ring[tcache_id]);
  1040. if (bf->addr && bf->entry_count)
  1041. dr_rm_block_entry(bf, tcache_id, 0, 1);
  1042. ring_free(&block_ring[tcache_id], 1);
  1043. if (block_ring[tcache_id].used) {
  1044. bf = ring_first(&block_ring[tcache_id]);
  1045. ring_free_p(&entry_ring[tcache_id], bf->entryp);
  1046. ring_free_p(&tcache_ring[tcache_id], bf->tcache_ptr);
  1047. } else {
  1048. // reset since size of code block isn't known if no successor block exists
  1049. ring_reset(&block_ring[tcache_id]);
  1050. ring_reset(&entry_ring[tcache_id]);
  1051. ring_reset(&tcache_ring[tcache_id]);
  1052. }
  1053. }
  1054. static inline void dr_reserve_cache(int tcache_id, struct ring_buffer *rb, int count)
  1055. {
  1056. // while not enough space available
  1057. if (rb->next + count >= rb->size){
  1058. // not enough space in rest of buffer -> wrap around
  1059. while (rb->first >= rb->next && rb->used)
  1060. dr_free_oldest_block(tcache_id);
  1061. if (rb->first == 0 && rb->used)
  1062. dr_free_oldest_block(tcache_id);
  1063. ring_wrap(rb);
  1064. }
  1065. while (rb->first >= rb->next && rb->next + count > rb->first && rb->used)
  1066. dr_free_oldest_block(tcache_id);
  1067. }
  1068. static u8 *dr_prepare_cache(int tcache_id, int insn_count, int entry_count)
  1069. {
  1070. int bf = block_ring[tcache_id].first;
  1071. // reserve one block desc
  1072. if (block_ring[tcache_id].used >= block_ring[tcache_id].size)
  1073. dr_free_oldest_block(tcache_id);
  1074. // reserve block entries
  1075. dr_reserve_cache(tcache_id, &entry_ring[tcache_id], entry_count);
  1076. // reserve cache space
  1077. dr_reserve_cache(tcache_id, &tcache_ring[tcache_id], insn_count*128);
  1078. if (bf != block_ring[tcache_id].first) {
  1079. // deleted some block(s), clear branch cache and return stack
  1080. #if BRANCH_CACHE
  1081. if (tcache_id)
  1082. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1083. else {
  1084. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1085. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  1086. }
  1087. #endif
  1088. #if CALL_STACK
  1089. if (tcache_id) {
  1090. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1091. sh2s[tcache_id-1].rts_cache_idx = 0;
  1092. } else {
  1093. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1094. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  1095. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1096. }
  1097. #endif
  1098. }
  1099. return ring_next(&tcache_ring[tcache_id]);
  1100. }
  1101. static void dr_flush_tcache(int tcid)
  1102. {
  1103. int i;
  1104. #if (DRC_DEBUG & 1)
  1105. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d bes %d/%d)", tcid,
  1106. tcache_ring[tcid].used, tcache_ring[tcid].size, block_ring[tcid].used,
  1107. block_ring[tcid].size, entry_ring[tcid].used, entry_ring[tcid].size);
  1108. #endif
  1109. ring_reset(&tcache_ring[tcid]);
  1110. ring_reset(&block_ring[tcid]);
  1111. ring_reset(&entry_ring[tcid]);
  1112. block_link_pool_counts[tcid] = 0;
  1113. blink_free[tcid] = NULL;
  1114. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * HASH_TABLE_SIZE(tcid));
  1115. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * HASH_TABLE_SIZE(tcid));
  1116. if (tcid == 0) { // ROM, RAM
  1117. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1118. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1119. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1120. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  1121. memset(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1122. memset(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache));
  1123. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1124. } else {
  1125. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1126. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1127. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  1128. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  1129. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1130. memset(sh2s[tcid - 1].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1131. sh2s[tcid - 1].rts_cache_idx = 0;
  1132. }
  1133. #if (DRC_DEBUG & 4)
  1134. tcache_dsm_ptrs[tcid] = tcache_ring[tcid].base;
  1135. #endif
  1136. for (i = 0; i < RAM_SIZE(tcid) / INVAL_PAGE_SIZE; i++)
  1137. discard_block_list(&inval_lookup[tcid][i]);
  1138. discard_block_list(&inactive_blocks[tcid]);
  1139. }
  1140. static void *dr_failure(void)
  1141. {
  1142. printf("recompilation failed\n");
  1143. exit(1);
  1144. }
  1145. // ---------------------------------------------------------------
  1146. // NB rcache allocation dependencies:
  1147. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  1148. // - get_reg(..., NULL) before get_reg(..., &hr) if it might get the same reg
  1149. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  1150. // register cache / constant propagation stuff
  1151. typedef enum {
  1152. RC_GR_READ,
  1153. RC_GR_WRITE,
  1154. RC_GR_RMW,
  1155. } rc_gr_mode;
  1156. typedef struct {
  1157. u32 gregs;
  1158. u32 val;
  1159. } gconst_t;
  1160. gconst_t gconsts[ARRAY_SIZE(guest_regs)];
  1161. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  1162. static inline int rcache_is_cached(sh2_reg_e r);
  1163. static void rcache_add_vreg_alias(int x, sh2_reg_e r);
  1164. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  1165. static void rcache_evict_vreg(int x);
  1166. static void rcache_remap_vreg(int x);
  1167. static void rcache_set_x16(int hr, int s16_, int u16_)
  1168. {
  1169. int x = reg_map_host[hr];
  1170. if (x >= 0) {
  1171. cache_regs[x].flags &= ~(HRF_S16|HRF_U16);
  1172. if (s16_) cache_regs[x].flags |= HRF_S16;
  1173. if (u16_) cache_regs[x].flags |= HRF_U16;
  1174. }
  1175. }
  1176. static void rcache_copy_x16(int hr, int hr2)
  1177. {
  1178. int x = reg_map_host[hr], y = reg_map_host[hr2];
  1179. if (x >= 0 && y >= 0) {
  1180. cache_regs[x].flags = (cache_regs[x].flags & ~(HRF_S16|HRF_U16)) |
  1181. (cache_regs[y].flags & (HRF_S16|HRF_U16));
  1182. }
  1183. }
  1184. static int rcache_is_s16(int hr)
  1185. {
  1186. int x = reg_map_host[hr];
  1187. return (x >= 0 ? cache_regs[x].flags & HRF_S16 : 0);
  1188. }
  1189. static int rcache_is_u16(int hr)
  1190. {
  1191. int x = reg_map_host[hr];
  1192. return (x >= 0 ? cache_regs[x].flags & HRF_U16 : 0);
  1193. }
  1194. #define RCACHE_DUMP(msg) { \
  1195. cache_reg_t *cp; \
  1196. guest_reg_t *gp; \
  1197. int i; \
  1198. printf("cache dump %s:\n",msg); \
  1199. printf(" cache_regs:\n"); \
  1200. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1201. cp = &cache_regs[i]; \
  1202. if (cp->type != HR_FREE || cp->gregs || cp->locked || cp->flags) \
  1203. printf(" %d: hr=%d t=%d f=%x c=%d m=%lx\n", i, cp->hreg, cp->type, cp->flags, cp->locked, (ulong)cp->gregs); \
  1204. } \
  1205. printf(" guest_regs:\n"); \
  1206. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1207. gp = &guest_regs[i]; \
  1208. if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
  1209. printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
  1210. } \
  1211. printf(" gconsts:\n"); \
  1212. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1213. if (gconsts[i].gregs) \
  1214. printf(" %d: m=%lx v=%lx\n", i, (ulong)gconsts[i].gregs, (ulong)gconsts[i].val); \
  1215. } \
  1216. }
  1217. #define RCACHE_CHECK(msg) { \
  1218. cache_reg_t *cp; \
  1219. guest_reg_t *gp; \
  1220. int i, x, m = 0, d = 0; \
  1221. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1222. cp = &cache_regs[i]; \
  1223. if (cp->flags & HRF_PINNED) m |= (1 << i); \
  1224. if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
  1225. /* check connectivity greg->vreg */ \
  1226. FOR_ALL_BITS_SET_DO(cp->gregs, x, \
  1227. if (guest_regs[x].vreg != i) \
  1228. { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
  1229. ) \
  1230. } \
  1231. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1232. gp = &guest_regs[i]; \
  1233. if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
  1234. { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
  1235. if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_CACHED) \
  1236. { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
  1237. if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
  1238. { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
  1239. if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)))\
  1240. { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
  1241. if (gp->flags & (GRF_STATIC|GRF_PINNED)) { \
  1242. if (gp->sreg == -1 || !(cache_regs[gp->sreg].flags & HRF_PINNED))\
  1243. { d = 1; printf("cache check r=%d v=%d not pinned?\n", i, gp->vreg); } \
  1244. else m &= ~(1 << gp->sreg); \
  1245. } \
  1246. } \
  1247. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1248. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
  1249. if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
  1250. { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
  1251. ) \
  1252. } \
  1253. if (m) \
  1254. { d = 1; printf("cache check m=%x pinning wrong?\n",m); } \
  1255. if (d) RCACHE_DUMP(msg) \
  1256. /* else { \
  1257. printf("locked regs %s:\n",msg); \
  1258. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1259. cp = &cache_regs[i]; \
  1260. if (cp->locked) \
  1261. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1262. } \
  1263. } */ \
  1264. }
  1265. #if PROPAGATE_CONSTANTS
  1266. static inline int gconst_alloc(sh2_reg_e r)
  1267. {
  1268. int i, n = -1;
  1269. for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
  1270. gconsts[i].gregs &= ~(1 << r);
  1271. if (gconsts[i].gregs == 0 && n < 0)
  1272. n = i;
  1273. }
  1274. if (n >= 0)
  1275. gconsts[n].gregs = (1 << r);
  1276. else {
  1277. printf("all gconst buffers in use, aborting\n");
  1278. exit(1); // cannot happen - more constants than guest regs?
  1279. }
  1280. return n;
  1281. }
  1282. static void gconst_set(sh2_reg_e r, u32 val)
  1283. {
  1284. int i = gconst_alloc(r);
  1285. guest_regs[r].flags |= GRF_CONST;
  1286. guest_regs[r].cnst = i;
  1287. gconsts[i].val = val;
  1288. }
  1289. static void gconst_new(sh2_reg_e r, u32 val)
  1290. {
  1291. gconst_set(r, val);
  1292. guest_regs[r].flags |= GRF_CDIRTY;
  1293. // throw away old r that we might have cached
  1294. if (guest_regs[r].vreg >= 0)
  1295. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1296. }
  1297. #endif
  1298. static int gconst_get(sh2_reg_e r, u32 *val)
  1299. {
  1300. if (guest_regs[r].flags & GRF_CONST) {
  1301. *val = gconsts[guest_regs[r].cnst].val;
  1302. return 1;
  1303. }
  1304. *val = 0;
  1305. return 0;
  1306. }
  1307. static int gconst_check(sh2_reg_e r)
  1308. {
  1309. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1310. return 1;
  1311. return 0;
  1312. }
  1313. // update hr if dirty, else do nothing
  1314. static int gconst_try_read(int vreg, sh2_reg_e r)
  1315. {
  1316. int i, x;
  1317. u32 v;
  1318. if (guest_regs[r].flags & GRF_CDIRTY) {
  1319. x = guest_regs[r].cnst;
  1320. v = gconsts[x].val;
  1321. emith_move_r_imm(cache_regs[vreg].hreg, v);
  1322. rcache_set_x16(cache_regs[vreg].hreg, v == (s16)v, v == (u16)v);
  1323. FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
  1324. {
  1325. if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
  1326. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  1327. if (guest_regs[i].vreg < 0)
  1328. rcache_add_vreg_alias(vreg, i);
  1329. guest_regs[i].flags &= ~GRF_CDIRTY;
  1330. guest_regs[i].flags |= GRF_DIRTY;
  1331. });
  1332. cache_regs[vreg].type = HR_CACHED;
  1333. cache_regs[vreg].flags |= HRF_DIRTY;
  1334. return 1;
  1335. }
  1336. return 0;
  1337. }
  1338. static u32 gconst_dirty_mask(void)
  1339. {
  1340. u32 mask = 0;
  1341. int i;
  1342. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1343. if (guest_regs[i].flags & GRF_CDIRTY)
  1344. mask |= (1 << i);
  1345. return mask;
  1346. }
  1347. static void gconst_kill(sh2_reg_e r)
  1348. {
  1349. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1350. gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
  1351. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1352. }
  1353. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1354. {
  1355. gconst_kill(rd);
  1356. if (guest_regs[rs].flags & GRF_CONST) {
  1357. guest_regs[rd].flags |= GRF_CONST;
  1358. if (guest_regs[rd].vreg < 0)
  1359. guest_regs[rd].flags |= GRF_CDIRTY;
  1360. guest_regs[rd].cnst = guest_regs[rs].cnst;
  1361. gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
  1362. }
  1363. }
  1364. static void gconst_clean(void)
  1365. {
  1366. int i;
  1367. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1368. if (guest_regs[i].flags & GRF_CDIRTY) {
  1369. // using RC_GR_READ here: it will call gconst_try_read,
  1370. // cache the reg and mark it dirty.
  1371. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1372. }
  1373. }
  1374. static void gconst_invalidate(void)
  1375. {
  1376. int i;
  1377. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1378. if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
  1379. gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
  1380. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1381. }
  1382. }
  1383. static u16 rcache_counter;
  1384. // SH2 register usage bitmasks
  1385. static u32 rcache_vregs_reg; // regs of type HRT_REG (for pinning)
  1386. static u32 rcache_regs_static; // statically allocated regs
  1387. static u32 rcache_regs_pinned; // pinned regs
  1388. static u32 rcache_regs_now; // regs used in current insn
  1389. static u32 rcache_regs_soon; // regs used in the next few insns
  1390. static u32 rcache_regs_late; // regs used in later insns
  1391. static u32 rcache_regs_discard; // regs overwritten without being used
  1392. static u32 rcache_regs_clean; // regs needing cleaning
  1393. static void rcache_lock_vreg(int x)
  1394. {
  1395. if (x >= 0) {
  1396. cache_regs[x].locked ++;
  1397. #if DRC_DEBUG & 64
  1398. if (cache_regs[x].type == HR_FREE) {
  1399. printf("locking free vreg %x, aborting\n", x);
  1400. exit(1);
  1401. }
  1402. if (!cache_regs[x].locked) {
  1403. printf("locking overflow vreg %x, aborting\n", x);
  1404. exit(1);
  1405. }
  1406. #endif
  1407. }
  1408. }
  1409. static void rcache_unlock_vreg(int x)
  1410. {
  1411. if (x >= 0) {
  1412. #if DRC_DEBUG & 64
  1413. if (cache_regs[x].type == HR_FREE) {
  1414. printf("unlocking free vreg %x, aborting\n", x);
  1415. exit(1);
  1416. }
  1417. #endif
  1418. if (cache_regs[x].locked)
  1419. cache_regs[x].locked --;
  1420. }
  1421. }
  1422. static void rcache_free_vreg(int x)
  1423. {
  1424. cache_regs[x].type = cache_regs[x].locked ? HR_TEMP : HR_FREE;
  1425. cache_regs[x].flags &= HRF_PINNED;
  1426. cache_regs[x].gregs = 0;
  1427. }
  1428. static void rcache_unmap_vreg(int x)
  1429. {
  1430. int i;
  1431. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1432. if (guest_regs[i].flags & GRF_DIRTY) {
  1433. // if a dirty reg is unmapped save its value to context
  1434. if ((~rcache_regs_discard | rcache_regs_now) & (1 << i))
  1435. emith_ctx_write(cache_regs[x].hreg, i * 4);
  1436. guest_regs[i].flags &= ~GRF_DIRTY;
  1437. }
  1438. guest_regs[i].vreg = -1);
  1439. rcache_free_vreg(x);
  1440. }
  1441. static void rcache_move_vreg(int d, int x)
  1442. {
  1443. int i;
  1444. cache_regs[d].type = HR_CACHED;
  1445. cache_regs[d].gregs = cache_regs[x].gregs;
  1446. cache_regs[d].flags &= HRF_PINNED;
  1447. cache_regs[d].flags |= cache_regs[x].flags & ~HRF_PINNED;
  1448. cache_regs[d].locked = 0;
  1449. cache_regs[d].stamp = cache_regs[x].stamp;
  1450. emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
  1451. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1452. if (guest_regs[i].vreg == x)
  1453. guest_regs[i].vreg = d;
  1454. rcache_free_vreg(x);
  1455. }
  1456. static void rcache_clean_vreg(int x)
  1457. {
  1458. u32 rns = rcache_regs_now | rcache_regs_soon;
  1459. int r;
  1460. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1461. cache_regs[x].flags &= ~HRF_DIRTY;
  1462. rcache_lock_vreg(x);
  1463. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1464. if (guest_regs[r].flags & GRF_DIRTY) {
  1465. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) {
  1466. if (guest_regs[r].vreg != guest_regs[r].sreg &&
  1467. !cache_regs[guest_regs[r].sreg].locked &&
  1468. ((~rcache_regs_discard | rcache_regs_now) & (1 << r)) &&
  1469. !(rns & cache_regs[guest_regs[r].sreg].gregs)) {
  1470. // statically mapped reg not in its sreg. move back to sreg
  1471. rcache_evict_vreg(guest_regs[r].sreg);
  1472. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
  1473. cache_regs[guest_regs[r].vreg].hreg);
  1474. rcache_copy_x16(cache_regs[guest_regs[r].sreg].hreg,
  1475. cache_regs[guest_regs[r].vreg].hreg);
  1476. rcache_remove_vreg_alias(x, r);
  1477. rcache_add_vreg_alias(guest_regs[r].sreg, r);
  1478. cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
  1479. } else
  1480. // cannot remap. keep dirty for writeback in unmap
  1481. cache_regs[x].flags |= HRF_DIRTY;
  1482. } else {
  1483. if ((~rcache_regs_discard | rcache_regs_now) & (1 << r))
  1484. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1485. guest_regs[r].flags &= ~GRF_DIRTY;
  1486. }
  1487. rcache_regs_clean &= ~(1 << r);
  1488. })
  1489. rcache_unlock_vreg(x);
  1490. }
  1491. #if DRC_DEBUG & 64
  1492. RCACHE_CHECK("after clean");
  1493. #endif
  1494. }
  1495. static void rcache_add_vreg_alias(int x, sh2_reg_e r)
  1496. {
  1497. cache_regs[x].gregs |= (1 << r);
  1498. guest_regs[r].vreg = x;
  1499. cache_regs[x].type = HR_CACHED;
  1500. }
  1501. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1502. {
  1503. cache_regs[x].gregs &= ~(1 << r);
  1504. if (!cache_regs[x].gregs) {
  1505. // no reg mapped -> free vreg
  1506. if (cache_regs[x].locked)
  1507. cache_regs[x].type = HR_TEMP;
  1508. else
  1509. rcache_free_vreg(x);
  1510. }
  1511. guest_regs[r].vreg = -1;
  1512. }
  1513. static void rcache_evict_vreg(int x)
  1514. {
  1515. rcache_remap_vreg(x);
  1516. rcache_unmap_vreg(x);
  1517. }
  1518. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1519. {
  1520. rcache_remove_vreg_alias(x, r);
  1521. rcache_evict_vreg(x);
  1522. rcache_add_vreg_alias(x, r);
  1523. }
  1524. static int rcache_allocate(int what, int minprio)
  1525. {
  1526. // evict reg with oldest stamp (only for HRT_REG, no temps)
  1527. int i, i_prio, oldest = -1, prio = 0;
  1528. u16 min_stamp = (u16)-1;
  1529. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--) {
  1530. // consider only non-static, unpinned, unlocked REG or TEMP
  1531. if ((cache_regs[i].flags & HRF_PINNED) || cache_regs[i].locked)
  1532. continue;
  1533. if ((what > 0 && !(cache_regs[i].htype & HRT_REG)) || // get a REG
  1534. (what == 0 && (cache_regs[i].htype & HRT_TEMP)) || // get a non-TEMP
  1535. (what < 0 && !(cache_regs[i].htype & HRT_TEMP))) // get a TEMP
  1536. continue;
  1537. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
  1538. // REG is free
  1539. prio = 10;
  1540. oldest = i;
  1541. break;
  1542. }
  1543. if (cache_regs[i].type == HR_CACHED) {
  1544. if (rcache_regs_now & cache_regs[i].gregs)
  1545. // REGs needed for the current insn
  1546. i_prio = 0;
  1547. else if (rcache_regs_soon & cache_regs[i].gregs)
  1548. // REGs needed in the next insns
  1549. i_prio = 2;
  1550. else if (rcache_regs_late & cache_regs[i].gregs)
  1551. // REGs needed in some future insn
  1552. i_prio = 4;
  1553. else if (~rcache_regs_discard & cache_regs[i].gregs)
  1554. // REGs not needed in the foreseeable future
  1555. i_prio = 6;
  1556. else
  1557. // REGs soon overwritten anyway
  1558. i_prio = 8;
  1559. if (!(cache_regs[i].flags & HRF_DIRTY)) i_prio ++;
  1560. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1561. min_stamp = cache_regs[i].stamp;
  1562. oldest = i;
  1563. prio = i_prio;
  1564. }
  1565. }
  1566. }
  1567. if (prio < minprio || oldest == -1)
  1568. return -1;
  1569. if (cache_regs[oldest].type == HR_CACHED)
  1570. rcache_evict_vreg(oldest);
  1571. else
  1572. rcache_free_vreg(oldest);
  1573. return oldest;
  1574. }
  1575. static int rcache_allocate_vreg(int needed)
  1576. {
  1577. int x;
  1578. x = rcache_allocate(1, needed ? 0 : 4);
  1579. if (x < 0)
  1580. x = rcache_allocate(-1, 0);
  1581. return x;
  1582. }
  1583. static int rcache_allocate_nontemp(void)
  1584. {
  1585. int x = rcache_allocate(0, 4);
  1586. return x;
  1587. }
  1588. static int rcache_allocate_temp(void)
  1589. {
  1590. int x = rcache_allocate(-1, 0);
  1591. if (x < 0)
  1592. x = rcache_allocate(0, 0);
  1593. return x;
  1594. }
  1595. // maps a host register to a REG
  1596. static int rcache_map_reg(sh2_reg_e r, int hr)
  1597. {
  1598. #if REMAP_REGISTER
  1599. int i;
  1600. gconst_kill(r);
  1601. // lookup the TEMP hr maps to
  1602. i = reg_map_host[hr];
  1603. if (i < 0) {
  1604. // must not happen
  1605. printf("invalid host register %d\n", hr);
  1606. exit(1);
  1607. }
  1608. // remove old mappings of r and i if one exists
  1609. if (guest_regs[r].vreg >= 0)
  1610. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1611. if (cache_regs[i].type == HR_CACHED)
  1612. rcache_evict_vreg(i);
  1613. // set new mappping
  1614. cache_regs[i].type = HR_CACHED;
  1615. cache_regs[i].gregs = 1 << r;
  1616. cache_regs[i].locked = 0;
  1617. cache_regs[i].stamp = ++rcache_counter;
  1618. cache_regs[i].flags |= HRF_DIRTY;
  1619. rcache_lock_vreg(i);
  1620. guest_regs[r].flags |= GRF_DIRTY;
  1621. guest_regs[r].vreg = i;
  1622. #if DRC_DEBUG & 64
  1623. RCACHE_CHECK("after map");
  1624. #endif
  1625. return cache_regs[i].hreg;
  1626. #else
  1627. return rcache_get_reg(r, RC_GR_WRITE, NULL);
  1628. #endif
  1629. }
  1630. // remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
  1631. static void rcache_remap_vreg(int x)
  1632. {
  1633. #if REMAP_REGISTER
  1634. u32 rsl_d = rcache_regs_soon | rcache_regs_late;
  1635. int d;
  1636. // x must be a cached vreg
  1637. if (cache_regs[x].type != HR_CACHED || cache_regs[x].locked)
  1638. return;
  1639. // don't do it if x isn't used
  1640. if (!(rsl_d & cache_regs[x].gregs)) {
  1641. // clean here to avoid data loss on invalidation
  1642. rcache_clean_vreg(x);
  1643. return;
  1644. }
  1645. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, d,
  1646. if ((guest_regs[d].flags & (GRF_STATIC|GRF_PINNED)) &&
  1647. !cache_regs[guest_regs[d].sreg].locked &&
  1648. !((rsl_d|rcache_regs_now) & cache_regs[guest_regs[d].sreg].gregs)) {
  1649. // STATIC not in its sreg and sreg is available
  1650. rcache_evict_vreg(guest_regs[d].sreg);
  1651. rcache_move_vreg(guest_regs[d].sreg, x);
  1652. return;
  1653. }
  1654. )
  1655. // allocate a non-TEMP vreg
  1656. rcache_lock_vreg(x); // lock to avoid evicting x
  1657. d = rcache_allocate_nontemp();
  1658. rcache_unlock_vreg(x);
  1659. if (d < 0) {
  1660. rcache_clean_vreg(x);
  1661. return;
  1662. }
  1663. // move vreg to new location
  1664. rcache_move_vreg(d, x);
  1665. #if DRC_DEBUG & 64
  1666. RCACHE_CHECK("after remap");
  1667. #endif
  1668. #else
  1669. rcache_clean_vreg(x);
  1670. #endif
  1671. }
  1672. static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
  1673. {
  1674. #if ALIAS_REGISTERS
  1675. int x;
  1676. // if s isn't constant, it must be in cache for aliasing
  1677. if (!gconst_check(rs))
  1678. rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
  1679. // if d and s are not already aliased
  1680. x = guest_regs[rs].vreg;
  1681. if (guest_regs[rd].vreg != x) {
  1682. // remove possible old mapping of dst
  1683. if (guest_regs[rd].vreg >= 0)
  1684. rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
  1685. // make dst an alias of src
  1686. if (x >= 0)
  1687. rcache_add_vreg_alias(x, rd);
  1688. // if d is now in cache, it must be dirty
  1689. if (guest_regs[rd].vreg >= 0) {
  1690. x = guest_regs[rd].vreg;
  1691. cache_regs[x].flags |= HRF_DIRTY;
  1692. guest_regs[rd].flags |= GRF_DIRTY;
  1693. }
  1694. }
  1695. gconst_copy(rd, rs);
  1696. #if DRC_DEBUG & 64
  1697. RCACHE_CHECK("after alias");
  1698. #endif
  1699. #else
  1700. int hr_s = rcache_get_reg(rs, RC_GR_READ, NULL);
  1701. int hr_d = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  1702. emith_move_r_r(hr_d, hr_s);
  1703. gconst_copy(rd, rs);
  1704. #endif
  1705. }
  1706. // note: must not be called when doing conditional code
  1707. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1708. {
  1709. int src, dst, ali;
  1710. cache_reg_t *tr;
  1711. u32 rsp_d = (rcache_regs_soon | rcache_regs_static | rcache_regs_pinned) &
  1712. ~rcache_regs_discard;
  1713. dst = src = guest_regs[r].vreg;
  1714. rcache_lock_vreg(src); // lock to avoid evicting src
  1715. // good opportunity to relocate a remapped STATIC?
  1716. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1717. src != guest_regs[r].sreg && (src < 0 || mode != RC_GR_READ) &&
  1718. !cache_regs[guest_regs[r].sreg].locked &&
  1719. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[r].sreg].gregs)) {
  1720. dst = guest_regs[r].sreg;
  1721. rcache_evict_vreg(dst);
  1722. } else if (dst < 0) {
  1723. // allocate a cache register
  1724. if ((dst = rcache_allocate_vreg(rsp_d & (1 << r))) < 0) {
  1725. printf("no registers to evict, aborting\n");
  1726. exit(1);
  1727. }
  1728. }
  1729. tr = &cache_regs[dst];
  1730. tr->stamp = rcache_counter;
  1731. // remove r from src
  1732. if (src >= 0 && src != dst)
  1733. rcache_remove_vreg_alias(src, r);
  1734. rcache_unlock_vreg(src);
  1735. // if r has a constant it may have aliases
  1736. if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
  1737. src = dst;
  1738. // if r will be modified, check for aliases being needed rsn
  1739. ali = tr->gregs & ~(1 << r);
  1740. if (mode != RC_GR_READ && src == dst && ali) {
  1741. int x = -1;
  1742. if ((rsp_d|rcache_regs_now) & ali) {
  1743. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1744. guest_regs[r].sreg == dst && !tr->locked) {
  1745. // split aliases if r is STATIC in sreg and dst isn't already locked
  1746. int t;
  1747. FOR_ALL_BITS_SET_DO(ali, t,
  1748. if ((guest_regs[t].flags & (GRF_STATIC|GRF_PINNED)) &&
  1749. !(ali & ~(1 << t)) &&
  1750. !cache_regs[guest_regs[t].sreg].locked &&
  1751. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[t].sreg].gregs)) {
  1752. // alias is a single STATIC and its sreg is available
  1753. x = guest_regs[t].sreg;
  1754. rcache_evict_vreg(x);
  1755. } else {
  1756. rcache_lock_vreg(dst); // lock to avoid evicting dst
  1757. x = rcache_allocate_vreg(rsp_d & ali);
  1758. rcache_unlock_vreg(dst);
  1759. }
  1760. break;
  1761. )
  1762. if (x >= 0) {
  1763. rcache_remove_vreg_alias(src, r);
  1764. src = dst;
  1765. rcache_move_vreg(x, dst);
  1766. }
  1767. } else {
  1768. // split r
  1769. rcache_lock_vreg(src); // lock to avoid evicting src
  1770. x = rcache_allocate_vreg(rsp_d & (1 << r));
  1771. rcache_unlock_vreg(src);
  1772. if (x >= 0) {
  1773. rcache_remove_vreg_alias(src, r);
  1774. dst = x;
  1775. tr = &cache_regs[dst];
  1776. tr->stamp = rcache_counter;
  1777. }
  1778. }
  1779. }
  1780. if (x < 0)
  1781. // aliases not needed or no vreg available, remove them
  1782. rcache_evict_vreg_aliases(dst, r);
  1783. }
  1784. // assign r to dst
  1785. rcache_add_vreg_alias(dst, r);
  1786. // handle dst register transfer
  1787. if (src < 0 && mode != RC_GR_WRITE)
  1788. emith_ctx_read(tr->hreg, r * 4);
  1789. if (hr) {
  1790. *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
  1791. rcache_lock_vreg(src >= 0 ? src : dst);
  1792. } else if (src >= 0 && mode != RC_GR_WRITE && cache_regs[src].hreg != tr->hreg)
  1793. emith_move_r_r(tr->hreg, cache_regs[src].hreg);
  1794. // housekeeping
  1795. if (do_locking)
  1796. rcache_lock_vreg(dst);
  1797. if (mode != RC_GR_READ) {
  1798. tr->flags |= HRF_DIRTY;
  1799. guest_regs[r].flags |= GRF_DIRTY;
  1800. gconst_kill(r);
  1801. rcache_set_x16(tr->hreg, 0, 0);
  1802. } else if (src >= 0 && cache_regs[src].hreg != tr->hreg)
  1803. rcache_copy_x16(tr->hreg, cache_regs[src].hreg);
  1804. #if DRC_DEBUG & 64
  1805. RCACHE_CHECK("after getreg");
  1806. #endif
  1807. return tr->hreg;
  1808. }
  1809. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1810. {
  1811. return rcache_get_reg_(r, mode, 1, hr);
  1812. }
  1813. static void rcache_pin_reg(sh2_reg_e r)
  1814. {
  1815. int hr, x;
  1816. // don't pin if static or already pinned
  1817. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED))
  1818. return;
  1819. rcache_regs_soon |= (1 << r); // kludge to prevent allocation of a temp
  1820. hr = rcache_get_reg_(r, RC_GR_RMW, 0, NULL);
  1821. x = reg_map_host[hr];
  1822. // can only pin non-TEMPs
  1823. if (!(cache_regs[x].htype & HRT_TEMP)) {
  1824. guest_regs[r].flags |= GRF_PINNED;
  1825. cache_regs[x].flags |= HRF_PINNED;
  1826. guest_regs[r].sreg = x;
  1827. rcache_regs_pinned |= (1 << r);
  1828. }
  1829. #if DRC_DEBUG & 64
  1830. RCACHE_CHECK("after pin");
  1831. #endif
  1832. }
  1833. static int rcache_get_tmp(void)
  1834. {
  1835. int i;
  1836. i = rcache_allocate_temp();
  1837. if (i < 0) {
  1838. printf("cannot allocate temp\n");
  1839. exit(1);
  1840. }
  1841. cache_regs[i].type = HR_TEMP;
  1842. rcache_lock_vreg(i);
  1843. return cache_regs[i].hreg;
  1844. }
  1845. static int rcache_get_vreg_hr(int hr)
  1846. {
  1847. int i;
  1848. i = reg_map_host[hr];
  1849. if (i < 0 || cache_regs[i].locked) {
  1850. printf("host register %d is locked\n", hr);
  1851. exit(1);
  1852. }
  1853. if (cache_regs[i].type == HR_CACHED)
  1854. rcache_evict_vreg(i);
  1855. else if (cache_regs[i].type == HR_TEMP && cache_regs[i].locked) {
  1856. printf("host reg %d already used, aborting\n", hr);
  1857. exit(1);
  1858. }
  1859. return i;
  1860. }
  1861. static int rcache_get_vreg_arg(int arg)
  1862. {
  1863. int hr = 0;
  1864. host_arg2reg(hr, arg);
  1865. return rcache_get_vreg_hr(hr);
  1866. }
  1867. // get a reg to be used as function arg
  1868. static int rcache_get_tmp_arg(int arg)
  1869. {
  1870. int x = rcache_get_vreg_arg(arg);
  1871. cache_regs[x].type = HR_TEMP;
  1872. rcache_lock_vreg(x);
  1873. return cache_regs[x].hreg;
  1874. }
  1875. // ... as return value after a call
  1876. static int rcache_get_tmp_ret(void)
  1877. {
  1878. int x = rcache_get_vreg_hr(RET_REG);
  1879. cache_regs[x].type = HR_TEMP;
  1880. rcache_lock_vreg(x);
  1881. return cache_regs[x].hreg;
  1882. }
  1883. // same but caches a reg if access is readonly (announced by hr being NULL)
  1884. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1885. {
  1886. int i, srcr, dstr, dstid, keep;
  1887. u32 val;
  1888. host_arg2reg(dstr, arg);
  1889. i = guest_regs[r].vreg;
  1890. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1891. // r is already in arg, avoid evicting
  1892. dstid = i;
  1893. else
  1894. dstid = rcache_get_vreg_arg(arg);
  1895. dstr = cache_regs[dstid].hreg;
  1896. if (rcache_is_cached(r)) {
  1897. // r is needed later on anyway
  1898. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1899. keep = 1;
  1900. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1901. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1902. srcr = dstr;
  1903. emith_move_r_imm(srcr, val);
  1904. keep = 0;
  1905. } else {
  1906. // must read from ctx
  1907. srcr = dstr;
  1908. emith_ctx_read(srcr, r * 4);
  1909. keep = 1;
  1910. }
  1911. if (cache_regs[dstid].type == HR_CACHED)
  1912. rcache_evict_vreg(dstid);
  1913. cache_regs[dstid].type = HR_TEMP;
  1914. if (hr == NULL) {
  1915. if (dstr != srcr)
  1916. // arg is a copy of cached r
  1917. emith_move_r_r(dstr, srcr);
  1918. else if (keep && guest_regs[r].vreg < 0)
  1919. // keep arg as vreg for r
  1920. rcache_add_vreg_alias(dstid, r);
  1921. } else {
  1922. *hr = srcr;
  1923. if (dstr != srcr) // must lock srcr if not copied here
  1924. rcache_lock_vreg(reg_map_host[srcr]);
  1925. }
  1926. cache_regs[dstid].stamp = ++rcache_counter;
  1927. rcache_lock_vreg(dstid);
  1928. #if DRC_DEBUG & 64
  1929. RCACHE_CHECK("after getarg");
  1930. #endif
  1931. return dstr;
  1932. }
  1933. static void rcache_free_tmp(int hr)
  1934. {
  1935. int i = reg_map_host[hr];
  1936. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1937. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1938. exit(1);
  1939. }
  1940. rcache_unlock_vreg(i);
  1941. }
  1942. // saves temporary result either in REG or in drctmp
  1943. static int rcache_save_tmp(int hr)
  1944. {
  1945. int i;
  1946. // find REG, either free or unlocked temp or oldest non-hinted cached
  1947. i = rcache_allocate_nontemp();
  1948. if (i < 0) {
  1949. // if none is available, store in drctmp
  1950. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1951. rcache_free_tmp(hr);
  1952. return -1;
  1953. }
  1954. cache_regs[i].type = HR_CACHED;
  1955. cache_regs[i].gregs = 0; // not storing any guest register
  1956. cache_regs[i].flags &= HRF_PINNED;
  1957. cache_regs[i].locked = 0;
  1958. cache_regs[i].stamp = ++rcache_counter;
  1959. rcache_lock_vreg(i);
  1960. emith_move_r_r(cache_regs[i].hreg, hr);
  1961. rcache_free_tmp(hr);
  1962. return i;
  1963. }
  1964. static int rcache_restore_tmp(int x)
  1965. {
  1966. int hr;
  1967. // find REG with tmp store: cached but with no gregs
  1968. if (x >= 0) {
  1969. if (cache_regs[x].type != HR_CACHED || cache_regs[x].gregs) {
  1970. printf("invalid tmp storage %d\n", x);
  1971. exit(1);
  1972. }
  1973. // found, transform to a TEMP
  1974. cache_regs[x].type = HR_TEMP;
  1975. return cache_regs[x].hreg;
  1976. }
  1977. // if not available, create a TEMP store and fetch from drctmp
  1978. hr = rcache_get_tmp();
  1979. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1980. return hr;
  1981. }
  1982. static void rcache_free(int hr)
  1983. {
  1984. int x = reg_map_host[hr];
  1985. rcache_unlock_vreg(x);
  1986. }
  1987. static void rcache_unlock(int x)
  1988. {
  1989. if (x >= 0)
  1990. cache_regs[x].locked = 0;
  1991. }
  1992. static void rcache_unlock_all(void)
  1993. {
  1994. int i;
  1995. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1996. cache_regs[i].locked = 0;
  1997. }
  1998. static void rcache_unpin_all(void)
  1999. {
  2000. int i;
  2001. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2002. if (guest_regs[i].flags & GRF_PINNED) {
  2003. guest_regs[i].flags &= ~GRF_PINNED;
  2004. cache_regs[guest_regs[i].sreg].flags &= ~HRF_PINNED;
  2005. guest_regs[i].sreg = -1;
  2006. rcache_regs_pinned &= ~(1 << i);
  2007. }
  2008. }
  2009. #if DRC_DEBUG & 64
  2010. RCACHE_CHECK("after unpin");
  2011. #endif
  2012. }
  2013. static void rcache_save_pinned(void)
  2014. {
  2015. int i;
  2016. // save pinned regs to context
  2017. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2018. if ((guest_regs[i].flags & GRF_PINNED) && guest_regs[i].vreg >= 0)
  2019. emith_ctx_write(cache_regs[guest_regs[i].vreg].hreg, i * 4);
  2020. }
  2021. static inline void rcache_set_usage_now(u32 mask)
  2022. {
  2023. rcache_regs_now = mask;
  2024. }
  2025. static inline void rcache_set_usage_soon(u32 mask)
  2026. {
  2027. rcache_regs_soon = mask;
  2028. }
  2029. static inline void rcache_set_usage_late(u32 mask)
  2030. {
  2031. rcache_regs_late = mask;
  2032. }
  2033. static inline void rcache_set_usage_discard(u32 mask)
  2034. {
  2035. rcache_regs_discard = mask;
  2036. }
  2037. static inline int rcache_is_cached(sh2_reg_e r)
  2038. {
  2039. // is r in cache or needed RSN?
  2040. u32 rsc = rcache_regs_soon | rcache_regs_clean;
  2041. return (guest_regs[r].vreg >= 0 || (rsc & (1 << r)));
  2042. }
  2043. static inline int rcache_is_hreg_used(int hr)
  2044. {
  2045. int x = reg_map_host[hr];
  2046. // is hr in use?
  2047. return cache_regs[x].type != HR_FREE &&
  2048. (cache_regs[x].type != HR_TEMP || cache_regs[x].locked);
  2049. }
  2050. static inline u32 rcache_used_hregs_mask(void)
  2051. {
  2052. u32 mask = 0;
  2053. int i;
  2054. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2055. if ((cache_regs[i].htype & HRT_TEMP) && cache_regs[i].type != HR_FREE &&
  2056. (cache_regs[i].type != HR_TEMP || cache_regs[i].locked))
  2057. mask |= 1 << cache_regs[i].hreg;
  2058. return mask;
  2059. }
  2060. static inline u32 rcache_dirty_mask(void)
  2061. {
  2062. u32 mask = 0;
  2063. int i;
  2064. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2065. if (guest_regs[i].flags & GRF_DIRTY)
  2066. mask |= 1 << i;
  2067. mask |= gconst_dirty_mask();
  2068. return mask;
  2069. }
  2070. static inline u32 rcache_cached_mask(void)
  2071. {
  2072. u32 mask = 0;
  2073. int i;
  2074. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2075. if (cache_regs[i].type == HR_CACHED)
  2076. mask |= cache_regs[i].gregs;
  2077. return mask;
  2078. }
  2079. static void rcache_clean_tmp(void)
  2080. {
  2081. int i;
  2082. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2083. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2084. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].htype & HRT_TEMP)) {
  2085. rcache_unlock(i);
  2086. rcache_remap_vreg(i);
  2087. }
  2088. rcache_regs_clean = 0;
  2089. }
  2090. static void rcache_clean_masked(u32 mask)
  2091. {
  2092. int i, r, hr;
  2093. u32 m;
  2094. rcache_regs_clean |= mask;
  2095. mask = rcache_regs_clean;
  2096. // clean constants where all aliases are covered by the mask, exempt statics
  2097. // to avoid flushing them to context if sreg isn't available
  2098. m = mask & ~(rcache_regs_static | rcache_regs_pinned);
  2099. for (i = 0; i < ARRAY_SIZE(gconsts); i++)
  2100. if ((gconsts[i].gregs & m) && !(gconsts[i].gregs & ~mask)) {
  2101. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, r,
  2102. if (guest_regs[r].flags & GRF_CDIRTY) {
  2103. hr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  2104. rcache_clean_vreg(reg_map_host[hr]);
  2105. break;
  2106. });
  2107. }
  2108. // clean vregs where all aliases are covered by the mask
  2109. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2110. if (cache_regs[i].type == HR_CACHED &&
  2111. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  2112. rcache_clean_vreg(i);
  2113. }
  2114. static void rcache_clean(void)
  2115. {
  2116. int i;
  2117. gconst_clean();
  2118. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2119. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  2120. if (cache_regs[i].type == HR_CACHED)
  2121. rcache_clean_vreg(i);
  2122. // relocate statics to their sregs (necessary before conditional jumps)
  2123. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2124. if ((guest_regs[i].flags & (GRF_STATIC|GRF_PINNED)) &&
  2125. guest_regs[i].vreg != guest_regs[i].sreg) {
  2126. rcache_lock_vreg(guest_regs[i].vreg);
  2127. rcache_evict_vreg(guest_regs[i].sreg);
  2128. rcache_unlock_vreg(guest_regs[i].vreg);
  2129. if (guest_regs[i].vreg < 0)
  2130. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  2131. else {
  2132. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  2133. cache_regs[guest_regs[i].vreg].hreg);
  2134. rcache_copy_x16(cache_regs[guest_regs[i].sreg].hreg,
  2135. cache_regs[guest_regs[i].vreg].hreg);
  2136. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  2137. }
  2138. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2139. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2140. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2141. guest_regs[i].flags |= GRF_DIRTY;
  2142. guest_regs[i].vreg = guest_regs[i].sreg;
  2143. }
  2144. }
  2145. rcache_regs_clean = 0;
  2146. }
  2147. static void rcache_invalidate_tmp(void)
  2148. {
  2149. int i;
  2150. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2151. if (cache_regs[i].htype & HRT_TEMP) {
  2152. rcache_unlock(i);
  2153. if (cache_regs[i].type == HR_CACHED)
  2154. rcache_evict_vreg(i);
  2155. else
  2156. rcache_free_vreg(i);
  2157. }
  2158. }
  2159. }
  2160. static void rcache_invalidate(void)
  2161. {
  2162. int i;
  2163. gconst_invalidate();
  2164. rcache_unlock_all();
  2165. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2166. rcache_free_vreg(i);
  2167. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2168. guest_regs[i].flags &= GRF_STATIC;
  2169. if (!(guest_regs[i].flags & GRF_STATIC))
  2170. guest_regs[i].vreg = -1;
  2171. else {
  2172. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2173. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2174. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2175. guest_regs[i].flags |= GRF_DIRTY;
  2176. guest_regs[i].vreg = guest_regs[i].sreg;
  2177. }
  2178. }
  2179. rcache_counter = 0;
  2180. rcache_regs_now = rcache_regs_soon = rcache_regs_late = 0;
  2181. rcache_regs_discard = rcache_regs_clean = 0;
  2182. }
  2183. static void rcache_flush(void)
  2184. {
  2185. rcache_clean();
  2186. rcache_invalidate();
  2187. }
  2188. static void rcache_create(void)
  2189. {
  2190. int x = 0, i;
  2191. // create cache_regs as host register representation
  2192. // RET_REG/params should be first TEMPs to avoid allocation conflicts in calls
  2193. cache_regs[x++] = (cache_reg_t) {.hreg = RET_REG, .htype = HRT_TEMP};
  2194. for (i = 0; i < ARRAY_SIZE(hregs_param); i++)
  2195. if (hregs_param[i] != RET_REG)
  2196. cache_regs[x++] = (cache_reg_t){.hreg = hregs_param[i],.htype = HRT_TEMP};
  2197. for (i = 0; i < ARRAY_SIZE(hregs_temp); i++)
  2198. if (hregs_temp[i] != RET_REG)
  2199. cache_regs[x++] = (cache_reg_t){.hreg = hregs_temp[i], .htype = HRT_TEMP};
  2200. for (i = ARRAY_SIZE(hregs_saved)-1; i >= 0; i--)
  2201. if (hregs_saved[i] != CONTEXT_REG)
  2202. cache_regs[x++] = (cache_reg_t){.hreg = hregs_saved[i], .htype = HRT_REG};
  2203. if (x != ARRAY_SIZE(cache_regs)) {
  2204. printf("rcache_create failed (conflicting register count)\n");
  2205. exit(1);
  2206. }
  2207. // mapping from host_register to cache regs index
  2208. memset(reg_map_host, -1, sizeof(reg_map_host));
  2209. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2210. if (cache_regs[i].htype)
  2211. reg_map_host[cache_regs[i].hreg] = i;
  2212. if (cache_regs[i].htype == HRT_REG)
  2213. rcache_vregs_reg |= (1 << i);
  2214. }
  2215. // create static host register mapping for SH2 regs
  2216. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2217. guest_regs[i] = (guest_reg_t){.sreg = -1};
  2218. }
  2219. for (i = 0; i < ARRAY_SIZE(regs_static); i += 2) {
  2220. for (x = ARRAY_SIZE(cache_regs)-1; x >= 0; x--)
  2221. if (cache_regs[x].hreg == regs_static[i+1]) break;
  2222. if (x >= 0) {
  2223. guest_regs[regs_static[i]] = (guest_reg_t){.flags = GRF_STATIC,.sreg = x};
  2224. rcache_regs_static |= (1 << regs_static[i]);
  2225. rcache_vregs_reg &= ~(1 << x);
  2226. }
  2227. }
  2228. printf("DRC registers created, %ld host regs (%d REG, %d STATIC, 1 CTX)\n",
  2229. CACHE_REGS+1L, count_bits(rcache_vregs_reg),count_bits(rcache_regs_static));
  2230. }
  2231. static void rcache_init(void)
  2232. {
  2233. // create DRC data structures
  2234. rcache_create();
  2235. rcache_invalidate();
  2236. #if DRC_DEBUG & 64
  2237. RCACHE_CHECK("after init");
  2238. #endif
  2239. }
  2240. // ---------------------------------------------------------------
  2241. // swap 32 bit value read from mem in generated code (same as CPU_BE2)
  2242. static void emit_le_swap(int cond, int r)
  2243. {
  2244. #if CPU_IS_LE
  2245. if (cond == -1)
  2246. emith_ror(r, r, 16);
  2247. else
  2248. emith_ror_c(cond, r, r, 16);
  2249. #endif
  2250. }
  2251. // fix memory byte ptr in generated code (same as MEM_BE2)
  2252. static void emit_le_ptr8(int cond, int r)
  2253. {
  2254. #if CPU_IS_LE
  2255. if (cond == -1)
  2256. emith_eor_r_imm_ptr(r, 1);
  2257. else
  2258. emith_eor_r_imm_ptr_c(cond, r, 1);
  2259. #endif
  2260. }
  2261. // split address by mask, in base part (upper) and offset (lower, signed!)
  2262. static uptr split_address(uptr la, uptr mask, s32 *offs)
  2263. {
  2264. uptr sign = (mask>>1) + 1; // sign bit in offset
  2265. *offs = (la & mask) | (la & sign ? ~mask : 0); // offset part, sign extended
  2266. la = (la & ~mask) + ((la & sign) << 1); // base part, corrected for offs sign
  2267. if (~mask && la == ~mask && !(*offs & sign)) { // special case la=-1 & offs>0
  2268. *offs = -*offs;
  2269. la = 0;
  2270. }
  2271. return la;
  2272. }
  2273. // NB may return either REG or TEMP
  2274. static int emit_get_rbase_and_offs(SH2 *sh2, sh2_reg_e r, int rmode, s32 *offs)
  2275. {
  2276. uptr omask = emith_rw_offs_max(); // offset mask
  2277. u32 mask = 0;
  2278. u32 a;
  2279. int poffs;
  2280. int hr, hr2;
  2281. uptr la;
  2282. // is r constant and points to a memory region?
  2283. if (! gconst_get(r, &a))
  2284. return -1;
  2285. poffs = dr_ctx_get_mem_ptr(sh2, a, &mask);
  2286. if (poffs == -1)
  2287. return -1;
  2288. if (mask < 0x20000) {
  2289. // data array, BIOS, DRAM, can't safely access directly since host addr may
  2290. // change (BIOS,da code may run on either core, DRAM may be switched)
  2291. hr = rcache_get_tmp();
  2292. a = (a + *offs) & mask;
  2293. if (poffs == offsetof(SH2, p_da)) {
  2294. // access sh2->data_array directly
  2295. a = split_address(a + offsetof(SH2, data_array), omask, offs);
  2296. emith_add_r_r_ptr_imm(hr, CONTEXT_REG, a);
  2297. } else {
  2298. a = split_address(a, omask, offs);
  2299. emith_ctx_read_ptr(hr, poffs);
  2300. if (a)
  2301. emith_add_r_r_ptr_imm(hr, hr, a);
  2302. }
  2303. return hr;
  2304. }
  2305. // ROM, SDRAM. Host address should be mmapped to be equal to SH2 address.
  2306. la = (uptr)*(void **)((char *)sh2 + poffs);
  2307. // if r is in rcache or needed soon anyway, and offs is relative to region,
  2308. // and address translation fits in add_ptr_imm (s32), then use rcached const
  2309. if (la == (s32)la && !(((a & mask) + *offs) & ~mask) && rcache_is_cached(r)) {
  2310. #if CPU_IS_LE // need to fix odd address for correct byte addressing
  2311. if (a & 1) *offs += (*offs&1) ? 2 : -2;
  2312. #endif
  2313. la -= (s32)((a & ~mask) - *offs); // diff between reg and memory
  2314. hr = hr2 = rcache_get_reg(r, rmode, NULL);
  2315. if ((s32)a < 0) emith_uext_ptr(hr2);
  2316. la = split_address(la, omask, offs);
  2317. if (la) {
  2318. hr = rcache_get_tmp();
  2319. emith_add_r_r_ptr_imm(hr, hr2, la);
  2320. rcache_free(hr2);
  2321. }
  2322. } else {
  2323. // known fixed host address
  2324. la = split_address(la + ((a + *offs) & mask), omask, offs);
  2325. hr = rcache_get_tmp();
  2326. emith_move_r_ptr_imm(hr, la);
  2327. }
  2328. return hr;
  2329. }
  2330. // read const data from const ROM address
  2331. static int emit_get_rom_data(SH2 *sh2, sh2_reg_e r, s32 offs, int size, u32 *val)
  2332. {
  2333. u32 a, mask;
  2334. *val = 0;
  2335. if (gconst_get(r, &a)) {
  2336. a += offs;
  2337. // check if rom is memory mapped (not bank switched), and address is in rom
  2338. if (dr_is_rom(a) && p32x_sh2_get_mem_ptr(a, &mask, sh2) == sh2->p_rom) {
  2339. switch (size & MF_SIZEMASK) {
  2340. case 0: *val = (s8)p32x_sh2_read8(a, sh2s); break; // 8
  2341. case 1: *val = (s16)p32x_sh2_read16(a, sh2s); break; // 16
  2342. case 2: *val = p32x_sh2_read32(a, sh2s); break; // 32
  2343. }
  2344. return 1;
  2345. }
  2346. }
  2347. return 0;
  2348. }
  2349. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  2350. {
  2351. #if PROPAGATE_CONSTANTS
  2352. gconst_new(dst, imm);
  2353. #else
  2354. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2355. emith_move_r_imm(hr, imm);
  2356. #endif
  2357. }
  2358. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  2359. {
  2360. if (gconst_check(src) || rcache_is_cached(src))
  2361. rcache_alias_vreg(dst, src);
  2362. else {
  2363. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2364. emith_ctx_read(hr_d, src * 4);
  2365. }
  2366. }
  2367. static void emit_add_r_imm(sh2_reg_e r, u32 imm)
  2368. {
  2369. u32 val;
  2370. int isgc = gconst_get(r, &val);
  2371. int hr, hr2;
  2372. if (!isgc || rcache_is_cached(r)) {
  2373. // not constant, or r is already in cache
  2374. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2375. emith_add_r_r_imm(hr, hr2, imm);
  2376. rcache_free(hr2);
  2377. if (isgc)
  2378. gconst_set(r, val + imm);
  2379. } else
  2380. gconst_new(r, val + imm);
  2381. }
  2382. static void emit_sub_r_imm(sh2_reg_e r, u32 imm)
  2383. {
  2384. u32 val;
  2385. int isgc = gconst_get(r, &val);
  2386. int hr, hr2;
  2387. if (!isgc || rcache_is_cached(r)) {
  2388. // not constant, or r is already in cache
  2389. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2390. emith_sub_r_r_imm(hr, hr2, imm);
  2391. rcache_free(hr2);
  2392. if (isgc)
  2393. gconst_set(r, val - imm);
  2394. } else
  2395. gconst_new(r, val - imm);
  2396. }
  2397. static void emit_sync_t_to_sr(void)
  2398. {
  2399. // avoid reloading SR from context if there's nothing to do
  2400. if (emith_get_t_cond() >= 0) {
  2401. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2402. emith_sync_t(sr);
  2403. }
  2404. }
  2405. // rd = @(arg0)
  2406. static int emit_memhandler_read(int size)
  2407. {
  2408. int hr;
  2409. emit_sync_t_to_sr();
  2410. rcache_clean_tmp();
  2411. #ifndef DRC_SR_REG
  2412. // must writeback cycles for poll detection stuff
  2413. if (guest_regs[SHR_SR].vreg != -1)
  2414. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2415. #endif
  2416. rcache_invalidate_tmp();
  2417. if (size & MF_POLLING)
  2418. switch (size & MF_SIZEMASK) {
  2419. case 0: emith_call(sh2_drc_read8_poll); break; // 8
  2420. case 1: emith_call(sh2_drc_read16_poll); break; // 16
  2421. case 2: emith_call(sh2_drc_read32_poll); break; // 32
  2422. }
  2423. else
  2424. switch (size & MF_SIZEMASK) {
  2425. case 0: emith_call(sh2_drc_read8); break; // 8
  2426. case 1: emith_call(sh2_drc_read16); break; // 16
  2427. case 2: emith_call(sh2_drc_read32); break; // 32
  2428. }
  2429. hr = rcache_get_tmp_ret();
  2430. rcache_set_x16(hr, (size & MF_SIZEMASK) < 2, 0);
  2431. return hr;
  2432. }
  2433. // @(arg0) = arg1
  2434. static void emit_memhandler_write(int size)
  2435. {
  2436. emit_sync_t_to_sr();
  2437. rcache_clean_tmp();
  2438. #ifndef DRC_SR_REG
  2439. if (guest_regs[SHR_SR].vreg != -1)
  2440. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2441. #endif
  2442. rcache_invalidate_tmp();
  2443. switch (size & MF_SIZEMASK) {
  2444. case 0: emith_call(sh2_drc_write8); break; // 8
  2445. case 1: emith_call(sh2_drc_write16); break; // 16
  2446. case 2: emith_call(sh2_drc_write32); break; // 32
  2447. }
  2448. }
  2449. // rd = @(Rs,#offs); rd < 0 -> return a temp
  2450. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, s32 offs, int size)
  2451. {
  2452. int hr, hr2;
  2453. u32 val;
  2454. #if PROPAGATE_CONSTANTS
  2455. if (emit_get_rom_data(sh2, rs, offs, size, &val)) {
  2456. if (rd == SHR_TMP) {
  2457. hr2 = rcache_get_tmp();
  2458. emith_move_r_imm(hr2, val);
  2459. } else {
  2460. emit_move_r_imm32(rd, val);
  2461. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  2462. }
  2463. rcache_set_x16(hr2, val == (s16)val, val == (u16)val);
  2464. if (size & MF_POSTINCR)
  2465. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2466. return hr2;
  2467. }
  2468. val = size & MF_POSTINCR;
  2469. hr = emit_get_rbase_and_offs(sh2, rs, val ? RC_GR_RMW : RC_GR_READ, &offs);
  2470. if (hr != -1) {
  2471. if (rd == SHR_TMP)
  2472. hr2 = rcache_get_tmp();
  2473. else
  2474. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2475. switch (size & MF_SIZEMASK) {
  2476. case 0: emith_read8s_r_r_offs(hr2, hr, MEM_BE2(offs)); break; // 8
  2477. case 1: emith_read16s_r_r_offs(hr2, hr, offs); break; // 16
  2478. case 2: emith_read_r_r_offs(hr2, hr, offs); emit_le_swap(-1, hr2); break;
  2479. }
  2480. rcache_free(hr);
  2481. if (size & MF_POSTINCR)
  2482. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2483. return hr2;
  2484. }
  2485. #endif
  2486. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2487. hr = rcache_get_tmp_arg(0);
  2488. emith_move_r_imm(hr, val + offs);
  2489. if (size & MF_POSTINCR)
  2490. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  2491. } else if (size & MF_POSTINCR) {
  2492. hr = rcache_get_tmp_arg(0);
  2493. hr2 = rcache_get_reg(rs, RC_GR_RMW, NULL);
  2494. emith_add_r_r_imm(hr, hr2, offs);
  2495. emith_add_r_imm(hr2, 1 << (size & MF_SIZEMASK));
  2496. if (gconst_get(rs, &val))
  2497. gconst_set(rs, val + (1 << (size & MF_SIZEMASK)));
  2498. } else {
  2499. hr = rcache_get_reg_arg(0, rs, &hr2);
  2500. if (offs || hr != hr2)
  2501. emith_add_r_r_imm(hr, hr2, offs);
  2502. }
  2503. hr = emit_memhandler_read(size);
  2504. if (rd == SHR_TMP)
  2505. hr2 = hr;
  2506. else
  2507. hr2 = rcache_map_reg(rd, hr);
  2508. if (hr != hr2) {
  2509. emith_move_r_r(hr2, hr);
  2510. rcache_free_tmp(hr);
  2511. }
  2512. return hr2;
  2513. }
  2514. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  2515. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, s32 offs, int size)
  2516. {
  2517. int hr, hr2;
  2518. u32 val;
  2519. if (rd == SHR_TMP) {
  2520. host_arg2reg(hr2, 1); // already locked and prepared by caller
  2521. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  2522. hr2 = rcache_get_reg_arg(1, rd, &hr);
  2523. if (hr != hr2) {
  2524. emith_move_r_r(hr2, hr);
  2525. rcache_free(hr2);
  2526. }
  2527. } else
  2528. hr2 = rcache_get_reg_arg(1, rd, NULL);
  2529. if (rd != SHR_TMP)
  2530. rcache_unlock(guest_regs[rd].vreg); // unlock in case rd is in arg0
  2531. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2532. hr = rcache_get_tmp_arg(0);
  2533. if (size & MF_PREDECR) {
  2534. val -= 1 << (size & MF_SIZEMASK);
  2535. gconst_new(rs, val);
  2536. }
  2537. emith_move_r_imm(hr, val + offs);
  2538. } else if (offs || (size & MF_PREDECR)) {
  2539. if (size & MF_PREDECR)
  2540. emit_sub_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2541. rcache_unlock(guest_regs[rs].vreg); // unlock in case rs is in arg0
  2542. hr = rcache_get_reg_arg(0, rs, &hr2);
  2543. if (offs || hr != hr2)
  2544. emith_add_r_r_imm(hr, hr2, offs);
  2545. } else
  2546. hr = rcache_get_reg_arg(0, rs, NULL);
  2547. emit_memhandler_write(size);
  2548. }
  2549. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2550. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2551. {
  2552. int hr, hr2;
  2553. int tx, ty;
  2554. #if PROPAGATE_CONSTANTS
  2555. u32 offs;
  2556. // if offs is larger than 0x01000000, it's most probably the base address part
  2557. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2558. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2559. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2560. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2561. #endif
  2562. hr = rcache_get_reg_arg(0, rx, &tx);
  2563. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2564. emith_add_r_r_r(hr, tx, ty);
  2565. hr = emit_memhandler_read(size);
  2566. if (rd == SHR_TMP)
  2567. hr2 = hr;
  2568. else
  2569. hr2 = rcache_map_reg(rd, hr);
  2570. if (hr != hr2) {
  2571. emith_move_r_r(hr2, hr);
  2572. rcache_free_tmp(hr);
  2573. }
  2574. return hr2;
  2575. }
  2576. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2577. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2578. {
  2579. int hr, tx, ty;
  2580. #if PROPAGATE_CONSTANTS
  2581. u32 offs;
  2582. // if offs is larger than 0x01000000, it's most probably the base address part
  2583. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2584. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2585. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2586. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2587. #endif
  2588. if (rd != SHR_TMP)
  2589. rcache_get_reg_arg(1, rd, NULL);
  2590. hr = rcache_get_reg_arg(0, rx, &tx);
  2591. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2592. emith_add_r_r_r(hr, tx, ty);
  2593. emit_memhandler_write(size);
  2594. }
  2595. // @Rn+,@Rm+
  2596. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2597. {
  2598. int tmp;
  2599. // unlock rn, rm here to avoid REG shortage in MAC operation
  2600. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2601. rcache_unlock(guest_regs[rn].vreg);
  2602. tmp = rcache_save_tmp(tmp);
  2603. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2604. rcache_unlock(guest_regs[rm].vreg);
  2605. *rnr = rcache_restore_tmp(tmp);
  2606. }
  2607. static void emit_do_static_regs(int is_write, int tmpr)
  2608. {
  2609. int i, r, count;
  2610. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2611. if (guest_regs[i].flags & (GRF_STATIC|GRF_PINNED))
  2612. r = cache_regs[guest_regs[i].vreg].hreg;
  2613. else
  2614. continue;
  2615. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2616. if ((guest_regs[i + 1].flags & (GRF_STATIC|GRF_PINNED)) &&
  2617. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2618. count++;
  2619. else
  2620. break;
  2621. }
  2622. if (count > 1) {
  2623. // i, r point to last item
  2624. if (is_write)
  2625. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2626. else
  2627. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2628. } else {
  2629. if (is_write)
  2630. emith_ctx_write(r, i * 4);
  2631. else
  2632. emith_ctx_read(r, i * 4);
  2633. }
  2634. }
  2635. }
  2636. #if DIV_OPTIMIZER
  2637. // divide operation replacement functions, called by compiled code. Only the
  2638. // 32:16 cases and the 64:32 cases described in the SH2 prog man are replaced.
  2639. static uint32_t REGPARM(2) sh2_drc_divu32(uint32_t dv, uint32_t ds)
  2640. {
  2641. if (ds && ds >= dv) {
  2642. // good case: no divide by 0, and no result overflow
  2643. uint32_t quot = dv / (ds>>16), rem = dv - (quot * (ds>>16));
  2644. if (~quot&1) rem -= ds>>16;
  2645. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2646. } else {
  2647. // bad case: use the sh2 algo to get the right result
  2648. int q = 0, t = 0, s = 16;
  2649. while (s--) {
  2650. uint32_t v = dv>>31;
  2651. dv = (dv<<1) | t;
  2652. t = v;
  2653. v = dv;
  2654. if (q) dv += ds, q = dv < v;
  2655. else dv -= ds, q = !(dv < v);
  2656. q ^= t, t = !q;
  2657. }
  2658. return (dv<<1) | t;
  2659. }
  2660. }
  2661. static uint32_t REGPARM(3) sh2_drc_divu64(uint32_t dh, uint32_t *dl, uint32_t ds)
  2662. {
  2663. if (ds > 1 && ds >= dh) {
  2664. // good case: no divide by 0, and no result overflow
  2665. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2666. uint32_t quot = dv / ds, rem = dv - (quot * ds);
  2667. if (~quot&1) rem -= ds;
  2668. *dl = quot;
  2669. return rem;
  2670. } else {
  2671. // bad case: use the sh2 algo to get the right result
  2672. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2673. int q = 0, t = 0, s = 32;
  2674. while (s--) {
  2675. uint64_t v = dv>>63;
  2676. dv = (dv<<1) | t;
  2677. t = v;
  2678. v = dv;
  2679. if (q) dv += ((uint64_t)ds << 32), q = dv < v;
  2680. else dv -= ((uint64_t)ds << 32), q = !(dv < v);
  2681. q ^= t, t = !q;
  2682. }
  2683. *dl = (dv<<1) | t;
  2684. return (dv>>32);
  2685. }
  2686. }
  2687. static uint32_t REGPARM(2) sh2_drc_divs32(int32_t dv, int32_t ds)
  2688. {
  2689. uint32_t adv = abs(dv), ads = abs(ds)>>16;
  2690. if (ads > 1 && ads > adv>>16 && (int32_t)ads > 0 && !(uint16_t)ds) {
  2691. // good case: no divide by 0, and no result overflow
  2692. uint32_t quot = adv / ads, rem = adv - (quot * ads);
  2693. int m1 = (rem ? dv^ds : ds) < 0;
  2694. if (rem && dv < 0) rem = (quot&1 ? -rem : +ads-rem);
  2695. else rem = (quot&1 ? +rem : -ads+rem);
  2696. quot = ((dv^ds)<0 ? -quot : +quot) - m1;
  2697. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2698. } else {
  2699. // bad case: use the sh2 algo to get the right result
  2700. int m = (uint32_t)ds>>31, q = (uint32_t)dv>>31, t = m^q, s = 16;
  2701. while (s--) {
  2702. uint32_t v = (uint32_t)dv>>31;
  2703. dv = (dv<<1) | t;
  2704. t = v;
  2705. v = dv;
  2706. if (m^q) dv += ds, q = (uint32_t)dv < v;
  2707. else dv -= ds, q = !((uint32_t)dv < v);
  2708. q ^= m^t, t = !(m^q);
  2709. }
  2710. return (dv<<1) | t;
  2711. }
  2712. }
  2713. static uint32_t REGPARM(3) sh2_drc_divs64(int32_t dh, uint32_t *dl, int32_t ds)
  2714. {
  2715. int64_t _dv = *dl | ((int64_t)dh << 32);
  2716. uint64_t adv = (_dv < 0 ? -_dv : _dv); // llabs isn't in older toolchains
  2717. uint32_t ads = abs(ds);
  2718. if (ads > 1 && ads > adv>>32 && (int64_t)adv > 0) {
  2719. // good case: no divide by 0, and no result overflow
  2720. uint32_t quot = adv / ads, rem = adv - ((uint64_t)quot * ads);
  2721. int m1 = (rem ? dh^ds : ds) < 0;
  2722. if (rem && dh < 0) rem = (quot&1 ? -rem : +ads-rem);
  2723. else rem = (quot&1 ? +rem : -ads+rem);
  2724. quot = ((dh^ds)<0 ? -quot : +quot) - m1;
  2725. *dl = quot;
  2726. return rem;
  2727. } else {
  2728. // bad case: use the sh2 algo to get the right result
  2729. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2730. int m = (uint32_t)ds>>31, q = (uint64_t)dv>>63, t = m^q, s = 32;
  2731. while (s--) {
  2732. int64_t v = (uint64_t)dv>>63;
  2733. dv = (dv<<1) | t;
  2734. t = v;
  2735. v = dv;
  2736. if (m^q) dv += ((uint64_t)ds << 32), q = dv < v;
  2737. else dv -= ((uint64_t)ds << 32), q = !(dv < v);
  2738. q ^= m^t, t = !(m^q);
  2739. }
  2740. *dl = (dv<<1) | t;
  2741. return (dv>>32);
  2742. }
  2743. }
  2744. #endif
  2745. // block local link stuff
  2746. struct linkage {
  2747. u32 pc;
  2748. void *ptr;
  2749. struct block_link *bl;
  2750. u32 mask;
  2751. };
  2752. static inline int find_in_linkage(const struct linkage *array, int size, u32 pc)
  2753. {
  2754. size_t i;
  2755. for (i = 0; i < size; i++)
  2756. if (pc == array[i].pc)
  2757. return i;
  2758. return -1;
  2759. }
  2760. static int find_in_sorted_linkage(const struct linkage *array, int size, u32 pc)
  2761. {
  2762. // binary search in sorted array
  2763. int left = 0, right = size-1;
  2764. while (left <= right)
  2765. {
  2766. int middle = (left + right) / 2;
  2767. if (array[middle].pc == pc)
  2768. return middle;
  2769. else if (array[middle].pc < pc)
  2770. left = middle + 1;
  2771. else
  2772. right = middle - 1;
  2773. }
  2774. return -1;
  2775. }
  2776. static void emit_branch_linkage_code(SH2 *sh2, struct block_desc *block, int tcache_id,
  2777. const struct linkage *targets, int target_count,
  2778. const struct linkage *links, int link_count)
  2779. {
  2780. struct block_link *bl;
  2781. int u, v, tmp;
  2782. emith_flush();
  2783. for (u = 0; u < link_count; u++) {
  2784. emith_pool_check();
  2785. // look up local branch targets
  2786. if (links[u].mask & 0x2) {
  2787. v = find_in_sorted_linkage(targets, target_count, links[u].pc);
  2788. if (v < 0 || ! targets[v].ptr) {
  2789. // forward branch not yet resolved, prepare external linking
  2790. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2791. bl = dr_prepare_ext_branch(block->entryp, links[u].pc, sh2->is_slave, tcache_id);
  2792. if (bl)
  2793. bl->type = BL_LDJMP;
  2794. tmp = rcache_get_tmp_arg(0);
  2795. emith_move_r_imm(tmp, links[u].pc);
  2796. rcache_free_tmp(tmp);
  2797. emith_jump_patchable(sh2_drc_dispatcher);
  2798. } else if (emith_jump_patch_inrange(links[u].ptr, targets[v].ptr)) {
  2799. // inrange local branch
  2800. emith_jump_patch(links[u].ptr, targets[v].ptr, NULL);
  2801. } else {
  2802. // far local branch
  2803. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2804. emith_jump(targets[v].ptr);
  2805. }
  2806. } else {
  2807. // external or exit, emit blx area entry
  2808. void *target = (links[u].mask & 0x1 ? sh2_drc_exit : sh2_drc_dispatcher);
  2809. if (links[u].bl)
  2810. links[u].bl->blx = tcache_ptr;
  2811. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2812. tmp = rcache_get_tmp_arg(0);
  2813. emith_move_r_imm(tmp, links[u].pc & ~1);
  2814. rcache_free_tmp(tmp);
  2815. emith_jump(target);
  2816. }
  2817. }
  2818. }
  2819. #define DELAY_SAVE_T(sr) { \
  2820. int t_ = rcache_get_tmp(); \
  2821. emith_bic_r_imm(sr, T_save); \
  2822. emith_and_r_r_imm(t_, sr, 1); \
  2823. emith_or_r_r_lsl(sr, t_, T_SHIFT); \
  2824. rcache_free_tmp(t_); \
  2825. }
  2826. #define FLUSH_CYCLES(sr) \
  2827. if (cycles > 0) { \
  2828. emith_sub_r_imm(sr, cycles << 12); \
  2829. cycles = 0; \
  2830. }
  2831. static void *dr_get_pc_base(u32 pc, SH2 *sh2);
  2832. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2833. {
  2834. // branch targets in current block
  2835. static struct linkage branch_targets[MAX_LOCAL_TARGETS];
  2836. int branch_target_count = 0;
  2837. // unresolved local or external targets with block link/exit area if needed
  2838. static struct linkage blx_targets[MAX_LOCAL_BRANCHES];
  2839. int blx_target_count = 0;
  2840. static u8 op_flags[BLOCK_INSN_LIMIT];
  2841. enum flg_states { FLG_UNKNOWN, FLG_UNUSED, FLG_0, FLG_1 };
  2842. struct drcf {
  2843. int delay_reg:8;
  2844. u32 loop_type:8;
  2845. u32 polling:8;
  2846. u32 pinning:1;
  2847. u32 test_irq:1;
  2848. u32 pending_branch_direct:1;
  2849. u32 pending_branch_indirect:1;
  2850. u32 Tflag:2, Mflag:2;
  2851. } drcf = { 0, };
  2852. #if LOOP_OPTIMIZER
  2853. // loops with pinned registers for optimzation
  2854. // pinned regs are like statics and don't need saving/restoring inside a loop
  2855. static struct linkage pinned_loops[MAX_LOCAL_TARGETS/16];
  2856. int pinned_loop_count = 0;
  2857. #endif
  2858. // PC of current, first, last SH2 insn
  2859. u32 pc, base_pc, end_pc;
  2860. u32 base_literals, end_literals;
  2861. u8 *block_entry_ptr;
  2862. struct block_desc *block;
  2863. struct block_entry *entry;
  2864. struct block_link *bl;
  2865. u16 *dr_pc_base;
  2866. struct op_data *opd;
  2867. int blkid_main = 0;
  2868. int skip_op = 0;
  2869. int tmp, tmp2;
  2870. int cycles;
  2871. int i, v;
  2872. u32 u, m1, m2, m3, m4;
  2873. int op;
  2874. u16 crc;
  2875. base_pc = sh2->pc;
  2876. // get base/validate PC
  2877. dr_pc_base = dr_get_pc_base(base_pc, sh2);
  2878. if (dr_pc_base == (void *)-1) {
  2879. printf("invalid PC, aborting: %08lx\n", (long)base_pc);
  2880. // FIXME: be less destructive
  2881. exit(1);
  2882. }
  2883. // initial passes to disassemble and analyze the block
  2884. crc = scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2885. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2886. if (base_literals == end_literals) // map empty lit section to end of code
  2887. base_literals = end_literals = end_pc;
  2888. // if there is already a translated but inactive block, reuse it
  2889. block = dr_find_inactive_block(tcache_id, crc, base_pc, end_pc - base_pc,
  2890. base_literals, end_literals - base_literals);
  2891. if (block) {
  2892. dbg(2, "== %csh2 reuse block %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2893. base_pc, end_pc, base_literals, end_literals, block->entryp->tcache_ptr);
  2894. dr_activate_block(block, tcache_id, sh2->is_slave);
  2895. emith_update_cache();
  2896. return block->entryp[0].tcache_ptr;
  2897. }
  2898. // collect branch_targets that don't land on delay slots
  2899. m1 = m2 = m3 = m4 = v = op = 0;
  2900. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2901. if (op_flags[i] & OF_DELAY_OP)
  2902. op_flags[i] &= ~OF_BTARGET;
  2903. if (op_flags[i] & OF_BTARGET) {
  2904. if (branch_target_count < ARRAY_SIZE(branch_targets))
  2905. branch_targets[branch_target_count++] = (struct linkage) { .pc = pc };
  2906. else {
  2907. printf("warning: linkage overflow\n");
  2908. end_pc = pc;
  2909. break;
  2910. }
  2911. }
  2912. if (ops[i].op == OP_LDC && (ops[i].dest & BITMASK1(SHR_SR)) && pc+2 < end_pc)
  2913. op_flags[i+1] |= OF_BTARGET; // RTE entrypoint in case of SR.IMASK change
  2914. // unify T and SR since rcache doesn't know about "virtual" guest regs
  2915. if (ops[i].source & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2916. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2917. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].dest |= BITMASK1(SHR_SR);
  2918. #if LOOP_DETECTION
  2919. // loop types detected:
  2920. // 1. target: ... BRA target -> idle loop
  2921. // 2. target: ... delay insn ... BF target -> delay loop
  2922. // 3. target: ... poll insn ... BF/BT target -> poll loop
  2923. // 4. target: ... poll insn ... BF/BT exit ... BRA target, exit: -> poll
  2924. // conditions:
  2925. // a. no further branch targets between target and back jump.
  2926. // b. no unconditional branch insn inside the loop.
  2927. // c. exactly one poll or delay insn is allowed inside a delay/poll loop
  2928. // (scan_block marks loops only if they meet conditions a through c)
  2929. // d. idle loops do not modify anything but PC,SR and contain no branches
  2930. // e. delay/poll loops do not modify anything but the concerned reg,PC,SR
  2931. // f. loading constants into registers inside the loop is allowed
  2932. // g. a delay/poll loop must have a conditional branch somewhere
  2933. // h. an idle loop must not have a conditional branch
  2934. if (op_flags[i] & OF_BTARGET) {
  2935. // possible loop entry point
  2936. drcf.loop_type = op_flags[i] & OF_LOOP;
  2937. drcf.pending_branch_direct = drcf.pending_branch_indirect = 0;
  2938. op = OF_IDLE_LOOP; // loop type
  2939. v = i;
  2940. m1 = m2 = m3 = m4 = 0;
  2941. if (!drcf.loop_type) // reset basic loop it it isn't recognized as loop
  2942. op_flags[i] &= ~OF_BASIC_LOOP;
  2943. }
  2944. if (drcf.loop_type) {
  2945. // calculate reg masks for loop pinning
  2946. m4 |= ops[i].source & ~m3;
  2947. m3 |= ops[i].dest;
  2948. // detect loop type, and store poll/delay register
  2949. if (op_flags[i] & OF_POLL_INSN) {
  2950. op = OF_POLL_LOOP;
  2951. m1 |= ops[i].dest; // loop poll/delay regs
  2952. } else if (op_flags[i] & OF_DELAY_INSN) {
  2953. op = OF_DELAY_LOOP;
  2954. m1 |= ops[i].dest;
  2955. } else if (ops[i].op != OP_LOAD_POOL && ops[i].op != OP_LOAD_CONST
  2956. && (ops[i].op != OP_MOVE || op != OF_POLL_LOOP)) {
  2957. // not (MOV @(PC) or MOV # or (MOV reg and poll)), condition f
  2958. m2 |= ops[i].dest; // regs modified by other insns
  2959. }
  2960. // branch detector
  2961. if (OP_ISBRAIMM(ops[i].op)) {
  2962. if (ops[i].imm == base_pc + 2*v)
  2963. drcf.pending_branch_direct = 1; // backward branch detected
  2964. else
  2965. op_flags[v] &= ~OF_BASIC_LOOP; // no basic loop
  2966. }
  2967. if (OP_ISBRACND(ops[i].op))
  2968. drcf.pending_branch_indirect = 1; // conditions g,h - cond.branch
  2969. // poll/idle loops terminate with their backwards branch to the loop start
  2970. if (drcf.pending_branch_direct && !(op_flags[i+1] & OF_DELAY_OP)) {
  2971. m2 &= ~(m1 | BITMASK3(SHR_PC, SHR_SR, SHR_T)); // conditions d,e + g,h
  2972. if (m2 || ((op == OF_IDLE_LOOP) == (drcf.pending_branch_indirect)))
  2973. op = 0; // conditions not met
  2974. op_flags[v] = (op_flags[v] & ~OF_LOOP) | op; // set loop type
  2975. drcf.loop_type = 0;
  2976. #if LOOP_OPTIMIZER
  2977. if (op_flags[v] & OF_BASIC_LOOP) {
  2978. m3 &= ~rcache_regs_static & ~BITMASK5(SHR_PC, SHR_PR, SHR_SR, SHR_T, SHR_MEM);
  2979. if (m3 && count_bits(m3) < count_bits(rcache_vregs_reg) &&
  2980. pinned_loop_count < ARRAY_SIZE(pinned_loops)-1) {
  2981. pinned_loops[pinned_loop_count++] =
  2982. (struct linkage) { .pc = base_pc + 2*v, .mask = m3 };
  2983. } else
  2984. op_flags[v] &= ~OF_BASIC_LOOP;
  2985. }
  2986. #endif
  2987. }
  2988. }
  2989. #endif
  2990. }
  2991. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2, branch_target_count);
  2992. #if (DRC_DEBUG & 4)
  2993. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  2994. #endif
  2995. block = dr_add_block(branch_target_count, base_pc, end_pc - base_pc,
  2996. base_literals, end_literals-base_literals, crc, sh2->is_slave, &blkid_main);
  2997. if (block == NULL)
  2998. return NULL;
  2999. block_entry_ptr = tcache_ptr;
  3000. dbg(2, "== %csh2 block #%d,%d %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  3001. tcache_id, blkid_main, base_pc, end_pc, base_literals, end_literals, block_entry_ptr);
  3002. // clear stale state after compile errors
  3003. rcache_invalidate();
  3004. emith_invalidate_t();
  3005. drcf = (struct drcf) { 0 };
  3006. #if LOOP_OPTIMIZER
  3007. pinned_loops[pinned_loop_count].pc = -1;
  3008. pinned_loop_count = 0;
  3009. #endif
  3010. // -------------------------------------------------
  3011. // 3rd pass: actual compilation
  3012. pc = base_pc;
  3013. cycles = 0;
  3014. for (i = 0; pc < end_pc; i++)
  3015. {
  3016. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  3017. int tmp3, tmp4;
  3018. int sr;
  3019. if (op_flags[i] & OF_BTARGET)
  3020. {
  3021. if (pc != base_pc)
  3022. {
  3023. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3024. FLUSH_CYCLES(sr);
  3025. emith_sync_t(sr);
  3026. drcf.Mflag = FLG_UNKNOWN;
  3027. rcache_flush();
  3028. emith_flush();
  3029. }
  3030. // make block entry
  3031. v = block->entry_count;
  3032. entry = &block->entryp[v];
  3033. if (v < branch_target_count)
  3034. {
  3035. entry = &block->entryp[v];
  3036. entry->pc = pc;
  3037. entry->tcache_ptr = tcache_ptr;
  3038. entry->links = entry->o_links = NULL;
  3039. #if (DRC_DEBUG & 2)
  3040. entry->block = block;
  3041. #endif
  3042. block->entry_count++;
  3043. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  3044. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  3045. pc, tcache_ptr);
  3046. }
  3047. else {
  3048. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  3049. tcache_id, blkid_main, pc);
  3050. break;
  3051. }
  3052. v = find_in_sorted_linkage(branch_targets, branch_target_count, pc);
  3053. if (v >= 0)
  3054. branch_targets[v].ptr = tcache_ptr;
  3055. #if LOOP_DETECTION
  3056. drcf.loop_type = op_flags[i] & OF_LOOP;
  3057. drcf.delay_reg = -1;
  3058. drcf.polling = (drcf.loop_type == OF_POLL_LOOP ? MF_POLLING : 0);
  3059. #endif
  3060. rcache_clean();
  3061. #if (DRC_DEBUG & 0x10)
  3062. tmp = rcache_get_tmp_arg(0);
  3063. emith_move_r_imm(tmp, pc);
  3064. tmp = emit_memhandler_read(1);
  3065. tmp2 = rcache_get_tmp();
  3066. tmp3 = rcache_get_tmp();
  3067. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  3068. emith_move_r_imm(tmp3, 0);
  3069. emith_cmp_r_r(tmp, tmp2);
  3070. EMITH_SJMP_START(DCOND_EQ);
  3071. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  3072. EMITH_SJMP_END(DCOND_EQ);
  3073. rcache_free_tmp(tmp);
  3074. rcache_free_tmp(tmp2);
  3075. rcache_free_tmp(tmp3);
  3076. #endif
  3077. // check cycles
  3078. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3079. #if LOOP_OPTIMIZER
  3080. if (op_flags[i] & OF_BASIC_LOOP) {
  3081. if (pinned_loops[pinned_loop_count].pc == pc) {
  3082. // pin needed regs on loop entry
  3083. FOR_ALL_BITS_SET_DO(pinned_loops[pinned_loop_count].mask, v, rcache_pin_reg(v));
  3084. emith_flush();
  3085. // store current PC as loop target
  3086. pinned_loops[pinned_loop_count].ptr = tcache_ptr;
  3087. drcf.pinning = 1;
  3088. } else
  3089. op_flags[i] &= ~OF_BASIC_LOOP;
  3090. }
  3091. if (op_flags[i] & OF_BASIC_LOOP) {
  3092. // if exiting a pinned loop pinned regs must be written back to ctx
  3093. // since they are reloaded in the loop entry code
  3094. emith_cmp_r_imm(sr, 0);
  3095. EMITH_JMP_START(DCOND_GT);
  3096. rcache_save_pinned();
  3097. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3098. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3099. blx_targets[blx_target_count++] =
  3100. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3101. emith_jump_patchable(tcache_ptr);
  3102. } else {
  3103. // blx table full, must inline exit code
  3104. tmp = rcache_get_tmp_arg(0);
  3105. emith_move_r_imm(tmp, pc);
  3106. emith_jump(sh2_drc_exit);
  3107. rcache_free_tmp(tmp);
  3108. }
  3109. EMITH_JMP_END(DCOND_GT);
  3110. } else
  3111. #endif
  3112. {
  3113. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3114. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3115. emith_cmp_r_imm(sr, 0);
  3116. blx_targets[blx_target_count++] =
  3117. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3118. emith_jump_cond_patchable(DCOND_LE, tcache_ptr);
  3119. } else {
  3120. // blx table full, must inline exit code
  3121. tmp = rcache_get_tmp_arg(0);
  3122. emith_cmp_r_imm(sr, 0);
  3123. EMITH_SJMP_START(DCOND_GT);
  3124. emith_move_r_imm_c(DCOND_LE, tmp, pc);
  3125. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  3126. EMITH_SJMP_END(DCOND_GT);
  3127. rcache_free_tmp(tmp);
  3128. }
  3129. }
  3130. #if (DRC_DEBUG & 32)
  3131. // block hit counter
  3132. tmp = rcache_get_tmp_arg(0);
  3133. tmp2 = rcache_get_tmp_arg(1);
  3134. emith_move_r_ptr_imm(tmp, (uptr)entry);
  3135. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3136. emith_add_r_imm(tmp2, 1);
  3137. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3138. rcache_free_tmp(tmp);
  3139. rcache_free_tmp(tmp2);
  3140. #endif
  3141. #if (DRC_DEBUG & (8|256|512|1024))
  3142. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3143. emith_sync_t(sr);
  3144. rcache_clean();
  3145. tmp = rcache_used_hregs_mask();
  3146. emith_save_caller_regs(tmp);
  3147. emit_do_static_regs(1, 0);
  3148. rcache_get_reg_arg(2, SHR_SR, NULL);
  3149. tmp2 = rcache_get_tmp_arg(0);
  3150. tmp3 = rcache_get_tmp_arg(1);
  3151. tmp4 = rcache_get_tmp();
  3152. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  3153. emith_move_r_r_ptr(tmp3, CONTEXT_REG);
  3154. emith_move_r_imm(tmp4, pc);
  3155. emith_ctx_write(tmp4, SHR_PC * 4);
  3156. rcache_invalidate_tmp();
  3157. emith_abicall(sh2_drc_log_entry);
  3158. emith_restore_caller_regs(tmp);
  3159. #endif
  3160. do_host_disasm(tcache_id);
  3161. rcache_unlock_all();
  3162. }
  3163. #ifdef DRC_CMP
  3164. if (!(op_flags[i] & OF_DELAY_OP)) {
  3165. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3166. FLUSH_CYCLES(sr);
  3167. emith_sync_t(sr);
  3168. emit_move_r_imm32(SHR_PC, pc);
  3169. rcache_clean();
  3170. tmp = rcache_used_hregs_mask();
  3171. emith_save_caller_regs(tmp);
  3172. emit_do_static_regs(1, 0);
  3173. emith_pass_arg_r(0, CONTEXT_REG);
  3174. emith_abicall(do_sh2_cmp);
  3175. emith_restore_caller_regs(tmp);
  3176. }
  3177. #endif
  3178. // emit blx area if limits are approached
  3179. if (blx_target_count && (blx_target_count > ARRAY_SIZE(blx_targets)-4 ||
  3180. !emith_jump_patch_inrange(blx_targets[0].ptr, tcache_ptr+0x100))) {
  3181. u8 *jp;
  3182. rcache_invalidate_tmp();
  3183. jp = tcache_ptr;
  3184. emith_jump_patchable(tcache_ptr);
  3185. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  3186. branch_target_count, blx_targets, blx_target_count);
  3187. blx_target_count = 0;
  3188. do_host_disasm(tcache_id);
  3189. emith_jump_patch(jp, tcache_ptr, NULL);
  3190. }
  3191. emith_pool_check();
  3192. opd = &ops[i];
  3193. op = FETCH_OP(pc);
  3194. #if (DRC_DEBUG & 4)
  3195. DasmSH2(sh2dasm_buff, pc, op);
  3196. if (op_flags[i] & OF_BTARGET) {
  3197. if ((op_flags[i] & OF_LOOP) == OF_DELAY_LOOP) tmp3 = '+';
  3198. else if ((op_flags[i] & OF_LOOP) == OF_POLL_LOOP) tmp3 = '=';
  3199. else if ((op_flags[i] & OF_LOOP) == OF_IDLE_LOOP) tmp3 = '~';
  3200. else tmp3 = '*';
  3201. } else if (drcf.loop_type) tmp3 = '.';
  3202. else tmp3 = ' ';
  3203. printf("%c%08lx %04x %s\n", tmp3, (ulong)pc, op, sh2dasm_buff);
  3204. #endif
  3205. pc += 2;
  3206. #if (DRC_DEBUG & 2)
  3207. insns_compiled++;
  3208. #endif
  3209. if (skip_op > 0) {
  3210. skip_op--;
  3211. continue;
  3212. }
  3213. if (op_flags[i] & OF_DELAY_OP)
  3214. {
  3215. // handle delay slot dependencies
  3216. delay_dep_fw = opd->dest & ops[i-1].source;
  3217. delay_dep_bk = opd->source & ops[i-1].dest;
  3218. if (delay_dep_fw & BITMASK1(SHR_T)) {
  3219. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3220. emith_sync_t(sr);
  3221. DELAY_SAVE_T(sr);
  3222. }
  3223. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  3224. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  3225. // can only be those 2 really..
  3226. elprintf_sh2(sh2, EL_ANOMALY,
  3227. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  3228. }
  3229. // store PC for MOVA/MOV @PC address calculation
  3230. if (opd->imm != 0)
  3231. ; // case OP_BRANCH - addr already resolved in scan_block
  3232. else {
  3233. switch (ops[i-1].op) {
  3234. case OP_BRANCH:
  3235. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  3236. break;
  3237. case OP_BRANCH_CT:
  3238. case OP_BRANCH_CF:
  3239. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3240. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3241. emith_move_r_imm(tmp, pc);
  3242. tmp2 = emith_tst_t(sr, (ops[i-1].op == OP_BRANCH_CT));
  3243. tmp3 = emith_invert_cond(tmp2);
  3244. EMITH_SJMP_START(tmp3);
  3245. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  3246. EMITH_SJMP_END(tmp3);
  3247. break;
  3248. case OP_BRANCH_N: // BT/BF known not to be taken
  3249. // XXX could modify opd->imm instead?
  3250. emit_move_r_imm32(SHR_PC, pc);
  3251. break;
  3252. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  3253. }
  3254. }
  3255. }
  3256. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  3257. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  3258. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  3259. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  3260. }
  3261. // inform cache about future register usage
  3262. u32 late = 0; // regs read by future ops
  3263. u32 write = 0; // regs written to (to detect write before read)
  3264. u32 soon = 0; // regs read soon
  3265. for (v = 1; v <= 9; v++) {
  3266. // no sense in looking any further than the next rcache flush
  3267. tmp = ((op_flags[i+v] & OF_BTARGET) || (op_flags[i+v-1] & OF_DELAY_OP) ||
  3268. (OP_ISBRACND(opd[v-1].op) && !(op_flags[i+v] & OF_DELAY_OP)));
  3269. // XXX looking behind cond branch to avoid evicting regs used later?
  3270. if (pc + 2*v <= end_pc && !tmp) { // (pc already incremented above)
  3271. late |= opd[v].source & ~write;
  3272. // ignore source regs after they have been written to
  3273. write |= opd[v].dest;
  3274. // regs needed in the next few instructions
  3275. if (v <= 4)
  3276. soon = late;
  3277. } else
  3278. break;
  3279. }
  3280. rcache_set_usage_now(opd[0].source); // current insn
  3281. rcache_set_usage_soon(soon); // insns 1-4
  3282. rcache_set_usage_late(late & ~soon); // insns 5-9
  3283. rcache_set_usage_discard(write & ~(late|soon));
  3284. if (v <= 9)
  3285. // upcoming rcache_flush, start writing back unused dirty stuff
  3286. rcache_clean_masked(rcache_dirty_mask() & ~(write|opd[0].dest));
  3287. switch (opd->op)
  3288. {
  3289. case OP_BRANCH_N:
  3290. // never taken, just use up cycles
  3291. goto end_op;
  3292. case OP_BRANCH:
  3293. case OP_BRANCH_CT:
  3294. case OP_BRANCH_CF:
  3295. if (opd->dest & BITMASK1(SHR_PR))
  3296. emit_move_r_imm32(SHR_PR, pc + 2);
  3297. drcf.pending_branch_direct = 1;
  3298. goto end_op;
  3299. case OP_BRANCH_R:
  3300. if (opd->dest & BITMASK1(SHR_PR))
  3301. emit_move_r_imm32(SHR_PR, pc + 2);
  3302. emit_move_r_r(SHR_PC, opd->rm);
  3303. drcf.pending_branch_indirect = 1;
  3304. goto end_op;
  3305. case OP_BRANCH_RF:
  3306. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3307. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3308. emith_move_r_imm(tmp, pc + 2);
  3309. if (opd->dest & BITMASK1(SHR_PR)) {
  3310. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  3311. emith_move_r_r(tmp3, tmp);
  3312. }
  3313. emith_add_r_r(tmp, tmp2);
  3314. if (gconst_get(GET_Rn(), &u))
  3315. gconst_set(SHR_PC, pc + 2 + u);
  3316. drcf.pending_branch_indirect = 1;
  3317. goto end_op;
  3318. case OP_SLEEP: // SLEEP 0000000000011011
  3319. printf("TODO sleep\n");
  3320. goto end_op;
  3321. case OP_RTE: // RTE 0000000000101011
  3322. emith_invalidate_t();
  3323. // pop PC
  3324. tmp = emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  3325. rcache_free(tmp);
  3326. // pop SR
  3327. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  3328. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3329. emith_write_sr(sr, tmp);
  3330. rcache_free_tmp(tmp);
  3331. drcf.test_irq = 1;
  3332. drcf.pending_branch_indirect = 1;
  3333. goto end_op;
  3334. case OP_UNDEFINED:
  3335. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  3336. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  3337. // fallthrough
  3338. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  3339. // push SR
  3340. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  3341. emith_sync_t(tmp2);
  3342. emith_clear_msb(tmp, tmp2, 22);
  3343. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3344. // push PC
  3345. if (opd->op == OP_TRAPA) {
  3346. tmp = rcache_get_tmp_arg(1);
  3347. emith_move_r_imm(tmp, pc);
  3348. } else if (drcf.pending_branch_indirect) {
  3349. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  3350. } else {
  3351. tmp = rcache_get_tmp_arg(1);
  3352. emith_move_r_imm(tmp, pc - 2);
  3353. }
  3354. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3355. // obtain new PC
  3356. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  3357. // indirect jump -> back to dispatcher
  3358. drcf.pending_branch_indirect = 1;
  3359. goto end_op;
  3360. case OP_LOAD_POOL:
  3361. #if PROPAGATE_CONSTANTS
  3362. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  3363. dr_is_rom(opd->imm))
  3364. {
  3365. if (opd->size == 2)
  3366. u = FETCH32(opd->imm);
  3367. else
  3368. u = (s16)FETCH_OP(opd->imm);
  3369. // tweak for Blackthorne: avoid stack overwriting
  3370. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  3371. gconst_new(GET_Rn(), u);
  3372. }
  3373. else
  3374. #endif
  3375. {
  3376. if (opd->imm != 0) {
  3377. tmp = rcache_get_tmp_arg(0);
  3378. emith_move_r_imm(tmp, opd->imm);
  3379. } else {
  3380. // have to calculate read addr from PC for delay slot
  3381. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  3382. if (opd->size == 2) {
  3383. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3384. emith_bic_r_imm(tmp, 3);
  3385. }
  3386. else
  3387. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  3388. }
  3389. tmp2 = emit_memhandler_read(opd->size);
  3390. tmp3 = rcache_map_reg(GET_Rn(), tmp2);
  3391. if (tmp3 != tmp2) {
  3392. emith_move_r_r(tmp3, tmp2);
  3393. rcache_free_tmp(tmp2);
  3394. }
  3395. }
  3396. goto end_op;
  3397. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  3398. if (opd->imm != 0)
  3399. emit_move_r_imm32(SHR_R0, opd->imm);
  3400. else {
  3401. // have to calculate addr from PC for delay slot
  3402. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  3403. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  3404. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3405. emith_bic_r_imm(tmp, 3);
  3406. }
  3407. goto end_op;
  3408. }
  3409. switch ((op >> 12) & 0x0f)
  3410. {
  3411. /////////////////////////////////////////////
  3412. case 0x00:
  3413. switch (op & 0x0f)
  3414. {
  3415. case 0x02:
  3416. switch (GET_Fx())
  3417. {
  3418. case 0: // STC SR,Rn 0000nnnn00000010
  3419. tmp2 = SHR_SR;
  3420. break;
  3421. case 1: // STC GBR,Rn 0000nnnn00010010
  3422. tmp2 = SHR_GBR;
  3423. break;
  3424. case 2: // STC VBR,Rn 0000nnnn00100010
  3425. tmp2 = SHR_VBR;
  3426. break;
  3427. default:
  3428. goto default_;
  3429. }
  3430. if (tmp2 == SHR_SR) {
  3431. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3432. emith_sync_t(sr);
  3433. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3434. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  3435. } else
  3436. emit_move_r_r(GET_Rn(), tmp2);
  3437. goto end_op;
  3438. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  3439. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  3440. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  3441. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  3442. goto end_op;
  3443. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  3444. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3445. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3446. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3447. emith_mul(tmp3, tmp2, tmp);
  3448. goto end_op;
  3449. case 0x08:
  3450. switch (GET_Fx())
  3451. {
  3452. case 0: // CLRT 0000000000001000
  3453. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3454. #if T_OPTIMIZER
  3455. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3456. #endif
  3457. emith_set_t(sr, 0);
  3458. break;
  3459. case 1: // SETT 0000000000011000
  3460. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3461. #if T_OPTIMIZER
  3462. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3463. #endif
  3464. emith_set_t(sr, 1);
  3465. break;
  3466. case 2: // CLRMAC 0000000000101000
  3467. emit_move_r_imm32(SHR_MACL, 0);
  3468. emit_move_r_imm32(SHR_MACH, 0);
  3469. break;
  3470. default:
  3471. goto default_;
  3472. }
  3473. goto end_op;
  3474. case 0x09:
  3475. switch (GET_Fx())
  3476. {
  3477. case 0: // NOP 0000000000001001
  3478. break;
  3479. case 1: // DIV0U 0000000000011001
  3480. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3481. emith_invalidate_t();
  3482. emith_bic_r_imm(sr, M|Q|T);
  3483. drcf.Mflag = FLG_0;
  3484. #if DIV_OPTIMIZER
  3485. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3486. // divide 32/16
  3487. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3488. rcache_get_reg_arg(1, div(opd).rm, NULL);
  3489. rcache_invalidate_tmp();
  3490. emith_abicall(sh2_drc_divu32);
  3491. tmp = rcache_get_tmp_ret();
  3492. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3493. if (tmp != tmp2)
  3494. emith_move_r_r(tmp2, tmp);
  3495. tmp3 = rcache_get_tmp();
  3496. emith_and_r_r_imm(tmp3, tmp2, 1); // Q = !Rn[0]
  3497. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3498. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3499. rcache_free_tmp(tmp3);
  3500. emith_or_r_r_r_lsr(sr, sr, tmp2, 31); // T = Rn[31]
  3501. skip_op = div(opd).div1 + div(opd).rotcl;
  3502. }
  3503. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3504. // divide 64/32
  3505. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3506. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3507. tmp = rcache_get_tmp_arg(1);
  3508. emith_add_r_r_ptr_imm(tmp, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3509. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3510. rcache_get_reg_arg(2, div(opd).rm, NULL);
  3511. rcache_invalidate_tmp();
  3512. emith_abicall(sh2_drc_divu64);
  3513. tmp = rcache_get_tmp_ret();
  3514. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3515. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3516. if (tmp != tmp2)
  3517. emith_move_r_r(tmp2, tmp);
  3518. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3519. tmp3 = rcache_get_tmp();
  3520. emith_and_r_r_imm(tmp3, tmp4, 1); // Q = !Ro[0]
  3521. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3522. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3523. rcache_free_tmp(tmp3);
  3524. emith_or_r_r_r_lsr(sr, sr, tmp4, 31); // T = Ro[31]
  3525. skip_op = div(opd).div1 + div(opd).rotcl;
  3526. }
  3527. #endif
  3528. break;
  3529. case 2: // MOVT Rn 0000nnnn00101001
  3530. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3531. emith_sync_t(sr);
  3532. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3533. emith_clear_msb(tmp2, sr, 31);
  3534. break;
  3535. default:
  3536. goto default_;
  3537. }
  3538. goto end_op;
  3539. case 0x0a:
  3540. switch (GET_Fx())
  3541. {
  3542. case 0: // STS MACH,Rn 0000nnnn00001010
  3543. tmp2 = SHR_MACH;
  3544. break;
  3545. case 1: // STS MACL,Rn 0000nnnn00011010
  3546. tmp2 = SHR_MACL;
  3547. break;
  3548. case 2: // STS PR,Rn 0000nnnn00101010
  3549. tmp2 = SHR_PR;
  3550. break;
  3551. default:
  3552. goto default_;
  3553. }
  3554. emit_move_r_r(GET_Rn(), tmp2);
  3555. goto end_op;
  3556. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  3557. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  3558. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  3559. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), (op & 3) | drcf.polling);
  3560. goto end_op;
  3561. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  3562. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  3563. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3564. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3565. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3566. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  3567. rcache_free_tmp(tmp2);
  3568. rcache_free_tmp(tmp);
  3569. goto end_op;
  3570. }
  3571. goto default_;
  3572. /////////////////////////////////////////////
  3573. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  3574. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  3575. goto end_op;
  3576. case 0x02:
  3577. switch (op & 0x0f)
  3578. {
  3579. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  3580. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  3581. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  3582. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  3583. goto end_op;
  3584. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  3585. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  3586. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  3587. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  3588. goto end_op;
  3589. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  3590. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3591. emith_invalidate_t();
  3592. emith_bic_r_imm(sr, M|Q|T);
  3593. drcf.Mflag = FLG_UNKNOWN;
  3594. #if DIV_OPTIMIZER
  3595. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3596. // divide 32/16
  3597. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3598. tmp2 = rcache_get_reg_arg(1, div(opd).rm, NULL);
  3599. tmp3 = rcache_get_tmp();
  3600. emith_lsr(tmp3, tmp2, 31);
  3601. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3602. rcache_invalidate_tmp();
  3603. emith_abicall(sh2_drc_divs32);
  3604. tmp = rcache_get_tmp_ret();
  3605. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3606. if (tmp != tmp2)
  3607. emith_move_r_r(tmp2, tmp);
  3608. tmp3 = rcache_get_tmp();
  3609. emith_eor_r_r_r_lsr(tmp3, tmp2, sr, M_SHIFT);
  3610. emith_and_r_r_imm(tmp3, tmp3, 1);
  3611. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3612. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Rn[0]^M
  3613. rcache_free_tmp(tmp3);
  3614. emith_or_r_r_r_lsr(sr, sr, tmp2, 31); // T = Rn[31]
  3615. skip_op = div(opd).div1 + div(opd).rotcl;
  3616. }
  3617. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3618. // divide 64/32
  3619. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3620. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3621. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3622. tmp2 = rcache_get_reg_arg(2, div(opd).rm, NULL);
  3623. tmp3 = rcache_get_tmp_arg(1);
  3624. emith_lsr(tmp3, tmp2, 31);
  3625. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3626. emith_add_r_r_ptr_imm(tmp3, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3627. rcache_invalidate_tmp();
  3628. emith_abicall(sh2_drc_divs64);
  3629. tmp = rcache_get_tmp_ret();
  3630. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3631. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3632. if (tmp != tmp2)
  3633. emith_move_r_r(tmp2, tmp);
  3634. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3635. tmp3 = rcache_get_tmp();
  3636. emith_eor_r_r_r_lsr(tmp3, tmp4, sr, M_SHIFT);
  3637. emith_and_r_r_imm(tmp3, tmp3, 1);
  3638. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3639. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Ro[0]^M
  3640. rcache_free_tmp(tmp3);
  3641. emith_or_r_r_r_lsr(sr, sr, tmp4, 31); // T = Ro[31]
  3642. skip_op = div(opd).div1 + div(opd).rotcl;
  3643. } else
  3644. #endif
  3645. {
  3646. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3647. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3648. tmp = rcache_get_tmp();
  3649. emith_lsr(tmp, tmp2, 31); // Q = Nn
  3650. emith_or_r_r_lsl(sr, tmp, Q_SHIFT);
  3651. emith_lsr(tmp, tmp3, 31); // M = Nm
  3652. emith_or_r_r_lsl(sr, tmp, M_SHIFT);
  3653. emith_eor_r_r_lsr(tmp, tmp2, 31);
  3654. emith_or_r_r(sr, tmp); // T = Q^M
  3655. rcache_free(tmp);
  3656. }
  3657. goto end_op;
  3658. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  3659. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3660. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3661. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3662. emith_clr_t_cond(sr);
  3663. emith_tst_r_r(tmp2, tmp3);
  3664. emith_set_t_cond(sr, DCOND_EQ);
  3665. goto end_op;
  3666. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  3667. if (GET_Rm() != GET_Rn()) {
  3668. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3669. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3670. emith_and_r_r_r(tmp, tmp3, tmp2);
  3671. }
  3672. goto end_op;
  3673. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  3674. #if PROPAGATE_CONSTANTS
  3675. if (GET_Rn() == GET_Rm()) {
  3676. gconst_new(GET_Rn(), 0);
  3677. goto end_op;
  3678. }
  3679. #endif
  3680. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3681. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3682. emith_eor_r_r_r(tmp, tmp3, tmp2);
  3683. goto end_op;
  3684. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  3685. if (GET_Rm() != GET_Rn()) {
  3686. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3687. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3688. emith_or_r_r_r(tmp, tmp3, tmp2);
  3689. }
  3690. goto end_op;
  3691. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  3692. tmp = rcache_get_tmp();
  3693. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3694. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3695. emith_eor_r_r_r(tmp, tmp2, tmp3);
  3696. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3697. emith_clr_t_cond(sr);
  3698. emith_tst_r_imm(tmp, 0x000000ff);
  3699. EMITH_SJMP_START(DCOND_EQ);
  3700. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  3701. EMITH_SJMP_START(DCOND_EQ);
  3702. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  3703. EMITH_SJMP_START(DCOND_EQ);
  3704. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  3705. EMITH_SJMP_END(DCOND_EQ);
  3706. EMITH_SJMP_END(DCOND_EQ);
  3707. EMITH_SJMP_END(DCOND_EQ);
  3708. emith_set_t_cond(sr, DCOND_EQ);
  3709. rcache_free_tmp(tmp);
  3710. goto end_op;
  3711. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  3712. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3713. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3714. emith_lsr(tmp, tmp3, 16);
  3715. emith_or_r_r_lsl(tmp, tmp2, 16);
  3716. goto end_op;
  3717. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  3718. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  3719. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3720. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3721. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3722. tmp4 = tmp3;
  3723. if (op & 1) {
  3724. if (! rcache_is_s16(tmp2)) {
  3725. emith_sext(tmp, tmp2, 16);
  3726. tmp2 = tmp;
  3727. }
  3728. if (! rcache_is_s16(tmp3)) {
  3729. tmp4 = rcache_get_tmp();
  3730. emith_sext(tmp4, tmp3, 16);
  3731. }
  3732. } else {
  3733. if (! rcache_is_u16(tmp2)) {
  3734. emith_clear_msb(tmp, tmp2, 16);
  3735. tmp2 = tmp;
  3736. }
  3737. if (! rcache_is_u16(tmp3)) {
  3738. tmp4 = rcache_get_tmp();
  3739. emith_clear_msb(tmp4, tmp3, 16);
  3740. }
  3741. }
  3742. emith_mul(tmp, tmp2, tmp4);
  3743. if (tmp4 != tmp3)
  3744. rcache_free_tmp(tmp4);
  3745. goto end_op;
  3746. }
  3747. goto default_;
  3748. /////////////////////////////////////////////
  3749. case 0x03:
  3750. switch (op & 0x0f)
  3751. {
  3752. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  3753. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  3754. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  3755. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  3756. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  3757. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3758. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3759. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3760. switch (op & 0x07)
  3761. {
  3762. case 0x00: // CMP/EQ
  3763. tmp = DCOND_EQ;
  3764. break;
  3765. case 0x02: // CMP/HS
  3766. tmp = DCOND_HS;
  3767. break;
  3768. case 0x03: // CMP/GE
  3769. tmp = DCOND_GE;
  3770. break;
  3771. case 0x06: // CMP/HI
  3772. tmp = DCOND_HI;
  3773. break;
  3774. case 0x07: // CMP/GT
  3775. tmp = DCOND_GT;
  3776. break;
  3777. }
  3778. emith_clr_t_cond(sr);
  3779. emith_cmp_r_r(tmp2, tmp3);
  3780. emith_set_t_cond(sr, tmp);
  3781. goto end_op;
  3782. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  3783. // Q1 = carry(Rn = (Rn << 1) | T)
  3784. // if Q ^ M
  3785. // Q2 = carry(Rn += Rm)
  3786. // else
  3787. // Q2 = carry(Rn -= Rm)
  3788. // Q = M ^ Q1 ^ Q2
  3789. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  3790. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3791. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3792. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3793. emith_sync_t(sr);
  3794. tmp = rcache_get_tmp();
  3795. if (drcf.Mflag != FLG_0) {
  3796. emith_and_r_r_imm(tmp, sr, M);
  3797. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT); // Q ^= M
  3798. }
  3799. rcache_free_tmp(tmp);
  3800. // shift Rn, add T, add or sub Rm, set T = !(Q1 ^ Q2)
  3801. // in: (Q ^ M) passed in Q
  3802. emith_sh2_div1_step(tmp2, tmp3, sr);
  3803. tmp = rcache_get_tmp();
  3804. emith_or_r_imm(sr, Q); // Q = !T
  3805. emith_and_r_r_imm(tmp, sr, T);
  3806. emith_eor_r_r_lsl(sr, tmp, Q_SHIFT);
  3807. if (drcf.Mflag != FLG_0) { // Q = M ^ !T = M ^ Q1 ^ Q2
  3808. emith_and_r_r_imm(tmp, sr, M);
  3809. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT);
  3810. }
  3811. rcache_free_tmp(tmp);
  3812. goto end_op;
  3813. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  3814. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3815. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3816. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3817. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3818. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  3819. goto end_op;
  3820. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  3821. #if PROPAGATE_CONSTANTS
  3822. if (GET_Rn() == GET_Rm()) {
  3823. gconst_new(GET_Rn(), 0);
  3824. goto end_op;
  3825. }
  3826. #endif
  3827. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  3828. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3829. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3830. if (op & 4) {
  3831. emith_add_r_r_r(tmp, tmp3, tmp2);
  3832. } else
  3833. emith_sub_r_r_r(tmp, tmp3, tmp2);
  3834. goto end_op;
  3835. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  3836. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  3837. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3838. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3839. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3840. emith_sync_t(sr);
  3841. #if T_OPTIMIZER
  3842. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3843. if (op & 4) {
  3844. emith_t_to_carry(sr, 0);
  3845. emith_adc_r_r_r(tmp, tmp3, tmp2);
  3846. } else {
  3847. emith_t_to_carry(sr, 1);
  3848. emith_sbc_r_r_r(tmp, tmp3, tmp2);
  3849. }
  3850. } else
  3851. #endif
  3852. {
  3853. EMITH_HINT_COND(DCOND_CS);
  3854. if (op & 4) { // adc
  3855. emith_tpop_carry(sr, 0);
  3856. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  3857. emith_tpush_carry(sr, 0);
  3858. } else {
  3859. emith_tpop_carry(sr, 1);
  3860. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  3861. emith_tpush_carry(sr, 1);
  3862. }
  3863. }
  3864. goto end_op;
  3865. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  3866. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  3867. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3868. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3869. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3870. #if T_OPTIMIZER
  3871. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3872. if (op & 4)
  3873. emith_add_r_r_r(tmp,tmp3,tmp2);
  3874. else
  3875. emith_sub_r_r_r(tmp,tmp3,tmp2);
  3876. } else
  3877. #endif
  3878. {
  3879. emith_clr_t_cond(sr);
  3880. EMITH_HINT_COND(DCOND_VS);
  3881. if (op & 4)
  3882. emith_addf_r_r_r(tmp, tmp3, tmp2);
  3883. else
  3884. emith_subf_r_r_r(tmp, tmp3, tmp2);
  3885. emith_set_t_cond(sr, DCOND_VS);
  3886. }
  3887. goto end_op;
  3888. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  3889. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3890. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3891. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3892. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3893. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  3894. goto end_op;
  3895. }
  3896. goto default_;
  3897. /////////////////////////////////////////////
  3898. case 0x04:
  3899. switch (op & 0x0f)
  3900. {
  3901. case 0x00:
  3902. switch (GET_Fx())
  3903. {
  3904. case 0: // SHLL Rn 0100nnnn00000000
  3905. case 2: // SHAL Rn 0100nnnn00100000
  3906. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3907. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3908. #if T_OPTIMIZER
  3909. if (rcache_regs_discard & BITMASK1(SHR_T))
  3910. emith_lsl(tmp, tmp2, 1);
  3911. else
  3912. #endif
  3913. {
  3914. emith_invalidate_t();
  3915. emith_lslf(tmp, tmp2, 1);
  3916. emith_carry_to_t(sr, 0);
  3917. }
  3918. goto end_op;
  3919. case 1: // DT Rn 0100nnnn00010000
  3920. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3921. #if LOOP_DETECTION
  3922. if (drcf.loop_type == OF_DELAY_LOOP) {
  3923. if (drcf.delay_reg == -1)
  3924. drcf.delay_reg = GET_Rn();
  3925. else
  3926. drcf.polling = drcf.loop_type = 0;
  3927. }
  3928. #endif
  3929. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3930. emith_clr_t_cond(sr);
  3931. EMITH_HINT_COND(DCOND_EQ);
  3932. emith_subf_r_r_imm(tmp, tmp2, 1);
  3933. emith_set_t_cond(sr, DCOND_EQ);
  3934. goto end_op;
  3935. }
  3936. goto default_;
  3937. case 0x01:
  3938. switch (GET_Fx())
  3939. {
  3940. case 0: // SHLR Rn 0100nnnn00000001
  3941. case 2: // SHAR Rn 0100nnnn00100001
  3942. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3943. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3944. #if T_OPTIMIZER
  3945. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3946. if (op & 0x20)
  3947. emith_asr(tmp,tmp2,1);
  3948. else
  3949. emith_lsr(tmp,tmp2,1);
  3950. } else
  3951. #endif
  3952. {
  3953. emith_invalidate_t();
  3954. if (op & 0x20) {
  3955. emith_asrf(tmp, tmp2, 1);
  3956. } else
  3957. emith_lsrf(tmp, tmp2, 1);
  3958. emith_carry_to_t(sr, 0);
  3959. }
  3960. goto end_op;
  3961. case 1: // CMP/PZ Rn 0100nnnn00010001
  3962. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3963. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3964. emith_clr_t_cond(sr);
  3965. emith_cmp_r_imm(tmp, 0);
  3966. emith_set_t_cond(sr, DCOND_GE);
  3967. goto end_op;
  3968. }
  3969. goto default_;
  3970. case 0x02:
  3971. case 0x03:
  3972. switch (op & 0x3f)
  3973. {
  3974. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  3975. tmp = SHR_MACH;
  3976. break;
  3977. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  3978. tmp = SHR_MACL;
  3979. break;
  3980. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  3981. tmp = SHR_PR;
  3982. break;
  3983. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  3984. tmp = SHR_SR;
  3985. break;
  3986. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  3987. tmp = SHR_GBR;
  3988. break;
  3989. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  3990. tmp = SHR_VBR;
  3991. break;
  3992. default:
  3993. goto default_;
  3994. }
  3995. if (tmp == SHR_SR) {
  3996. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  3997. emith_sync_t(tmp4);
  3998. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  3999. } else
  4000. tmp3 = rcache_get_reg_arg(1, tmp, NULL);
  4001. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  4002. goto end_op;
  4003. case 0x04:
  4004. case 0x05:
  4005. switch (op & 0x3f)
  4006. {
  4007. case 0x04: // ROTL Rn 0100nnnn00000100
  4008. case 0x05: // ROTR Rn 0100nnnn00000101
  4009. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  4010. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4011. #if T_OPTIMIZER
  4012. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4013. if (op & 1)
  4014. emith_ror(tmp, tmp2, 1);
  4015. else
  4016. emith_rol(tmp, tmp2, 1);
  4017. } else
  4018. #endif
  4019. {
  4020. emith_invalidate_t();
  4021. if (op & 1)
  4022. emith_rorf(tmp, tmp2, 1);
  4023. else
  4024. emith_rolf(tmp, tmp2, 1);
  4025. emith_carry_to_t(sr, 0);
  4026. }
  4027. goto end_op;
  4028. case 0x24: // ROTCL Rn 0100nnnn00100100
  4029. case 0x25: // ROTCR Rn 0100nnnn00100101
  4030. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  4031. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4032. emith_sync_t(sr);
  4033. #if T_OPTIMIZER
  4034. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4035. emith_t_to_carry(sr, 0);
  4036. if (op & 1)
  4037. emith_rorc(tmp);
  4038. else
  4039. emith_rolc(tmp);
  4040. } else
  4041. #endif
  4042. {
  4043. emith_tpop_carry(sr, 0);
  4044. if (op & 1)
  4045. emith_rorcf(tmp);
  4046. else
  4047. emith_rolcf(tmp);
  4048. emith_tpush_carry(sr, 0);
  4049. }
  4050. goto end_op;
  4051. case 0x15: // CMP/PL Rn 0100nnnn00010101
  4052. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4053. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4054. emith_clr_t_cond(sr);
  4055. emith_cmp_r_imm(tmp, 0);
  4056. emith_set_t_cond(sr, DCOND_GT);
  4057. goto end_op;
  4058. }
  4059. goto default_;
  4060. case 0x06:
  4061. case 0x07:
  4062. switch (op & 0x3f)
  4063. {
  4064. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  4065. tmp = SHR_MACH;
  4066. break;
  4067. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  4068. tmp = SHR_MACL;
  4069. break;
  4070. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  4071. tmp = SHR_PR;
  4072. break;
  4073. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  4074. tmp = SHR_SR;
  4075. break;
  4076. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  4077. tmp = SHR_GBR;
  4078. break;
  4079. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  4080. tmp = SHR_VBR;
  4081. break;
  4082. default:
  4083. goto default_;
  4084. }
  4085. if (tmp == SHR_SR) {
  4086. emith_invalidate_t();
  4087. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  4088. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4089. emith_write_sr(sr, tmp2);
  4090. rcache_free_tmp(tmp2);
  4091. drcf.test_irq = 1;
  4092. } else
  4093. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  4094. goto end_op;
  4095. case 0x08:
  4096. case 0x09:
  4097. switch (GET_Fx())
  4098. {
  4099. case 0: // SHLL2 Rn 0100nnnn00001000
  4100. // SHLR2 Rn 0100nnnn00001001
  4101. tmp = 2;
  4102. break;
  4103. case 1: // SHLL8 Rn 0100nnnn00011000
  4104. // SHLR8 Rn 0100nnnn00011001
  4105. tmp = 8;
  4106. break;
  4107. case 2: // SHLL16 Rn 0100nnnn00101000
  4108. // SHLR16 Rn 0100nnnn00101001
  4109. tmp = 16;
  4110. break;
  4111. default:
  4112. goto default_;
  4113. }
  4114. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  4115. if (op & 1) {
  4116. emith_lsr(tmp2, tmp3, tmp);
  4117. } else
  4118. emith_lsl(tmp2, tmp3, tmp);
  4119. goto end_op;
  4120. case 0x0a:
  4121. switch (GET_Fx())
  4122. {
  4123. case 0: // LDS Rm,MACH 0100mmmm00001010
  4124. tmp2 = SHR_MACH;
  4125. break;
  4126. case 1: // LDS Rm,MACL 0100mmmm00011010
  4127. tmp2 = SHR_MACL;
  4128. break;
  4129. case 2: // LDS Rm,PR 0100mmmm00101010
  4130. tmp2 = SHR_PR;
  4131. break;
  4132. default:
  4133. goto default_;
  4134. }
  4135. emit_move_r_r(tmp2, GET_Rn());
  4136. goto end_op;
  4137. case 0x0b:
  4138. switch (GET_Fx())
  4139. {
  4140. case 1: // TAS.B @Rn 0100nnnn00011011
  4141. // XXX: is TAS working on 32X?
  4142. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4143. tmp = emit_memhandler_read(0);
  4144. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4145. emith_clr_t_cond(sr);
  4146. emith_cmp_r_imm(tmp, 0);
  4147. emith_set_t_cond(sr, DCOND_EQ);
  4148. emith_or_r_imm(tmp, 0x80);
  4149. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  4150. emith_move_r_r(tmp2, tmp);
  4151. rcache_free_tmp(tmp);
  4152. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4153. emit_memhandler_write(0);
  4154. break;
  4155. default:
  4156. goto default_;
  4157. }
  4158. goto end_op;
  4159. case 0x0e:
  4160. switch (GET_Fx())
  4161. {
  4162. case 0: // LDC Rm,SR 0100mmmm00001110
  4163. tmp2 = SHR_SR;
  4164. break;
  4165. case 1: // LDC Rm,GBR 0100mmmm00011110
  4166. tmp2 = SHR_GBR;
  4167. break;
  4168. case 2: // LDC Rm,VBR 0100mmmm00101110
  4169. tmp2 = SHR_VBR;
  4170. break;
  4171. default:
  4172. goto default_;
  4173. }
  4174. if (tmp2 == SHR_SR) {
  4175. emith_invalidate_t();
  4176. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4177. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4178. emith_write_sr(sr, tmp);
  4179. drcf.test_irq = 1;
  4180. } else
  4181. emit_move_r_r(tmp2, GET_Rn());
  4182. goto end_op;
  4183. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  4184. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  4185. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4186. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  4187. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  4188. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  4189. rcache_free_tmp(tmp2);
  4190. rcache_free_tmp(tmp);
  4191. goto end_op;
  4192. }
  4193. goto default_;
  4194. /////////////////////////////////////////////
  4195. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  4196. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2 | drcf.polling);
  4197. goto end_op;
  4198. /////////////////////////////////////////////
  4199. case 0x06:
  4200. switch (op & 0x0f)
  4201. {
  4202. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  4203. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  4204. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  4205. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  4206. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  4207. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  4208. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : drcf.polling;
  4209. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  4210. goto end_op;
  4211. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  4212. emit_move_r_r(GET_Rn(), GET_Rm());
  4213. goto end_op;
  4214. default: // 0x07 ... 0x0f
  4215. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  4216. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  4217. switch (op & 0x0f)
  4218. {
  4219. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  4220. emith_mvn_r_r(tmp2, tmp);
  4221. break;
  4222. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  4223. tmp3 = tmp2;
  4224. if (tmp == tmp2)
  4225. tmp3 = rcache_get_tmp();
  4226. tmp4 = rcache_get_tmp();
  4227. emith_lsr(tmp3, tmp, 16);
  4228. emith_or_r_r_lsl(tmp3, tmp, 24);
  4229. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  4230. emith_or_r_r_lsl(tmp3, tmp4, 8);
  4231. emith_rol(tmp2, tmp3, 16);
  4232. rcache_free_tmp(tmp4);
  4233. if (tmp == tmp2)
  4234. rcache_free_tmp(tmp3);
  4235. break;
  4236. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  4237. emith_rol(tmp2, tmp, 16);
  4238. break;
  4239. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  4240. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4241. emith_sync_t(sr);
  4242. #if T_OPTIMIZER
  4243. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4244. emith_t_to_carry(sr, 1);
  4245. emith_negc_r_r(tmp2, tmp);
  4246. } else
  4247. #endif
  4248. {
  4249. EMITH_HINT_COND(DCOND_CS);
  4250. emith_tpop_carry(sr, 1);
  4251. emith_negcf_r_r(tmp2, tmp);
  4252. emith_tpush_carry(sr, 1);
  4253. }
  4254. break;
  4255. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  4256. emith_neg_r_r(tmp2, tmp);
  4257. break;
  4258. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  4259. emith_clear_msb(tmp2, tmp, 24);
  4260. rcache_set_x16(tmp2, 1, 1);
  4261. break;
  4262. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  4263. emith_clear_msb(tmp2, tmp, 16);
  4264. rcache_set_x16(tmp2, 0, 1);
  4265. break;
  4266. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  4267. emith_sext(tmp2, tmp, 8);
  4268. rcache_set_x16(tmp2, 1, 0);
  4269. break;
  4270. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  4271. emith_sext(tmp2, tmp, 16);
  4272. rcache_set_x16(tmp2, 1, 0);
  4273. break;
  4274. }
  4275. goto end_op;
  4276. }
  4277. goto default_;
  4278. /////////////////////////////////////////////
  4279. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  4280. if (op & 0x80) // adding negative
  4281. emit_sub_r_imm(GET_Rn(), (u8)-op);
  4282. else
  4283. emit_add_r_imm(GET_Rn(), (u8)op);
  4284. goto end_op;
  4285. /////////////////////////////////////////////
  4286. case 0x08:
  4287. switch (op & 0x0f00)
  4288. {
  4289. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  4290. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  4291. tmp = (op & 0x100) >> 8;
  4292. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  4293. goto end_op;
  4294. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  4295. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  4296. tmp = (op & 0x100) >> 8;
  4297. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp | drcf.polling);
  4298. goto end_op;
  4299. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  4300. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4301. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4302. emith_clr_t_cond(sr);
  4303. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  4304. emith_set_t_cond(sr, DCOND_EQ);
  4305. goto end_op;
  4306. }
  4307. goto default_;
  4308. /////////////////////////////////////////////
  4309. case 0x0c:
  4310. switch (op & 0x0f00)
  4311. {
  4312. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  4313. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  4314. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  4315. tmp = (op & 0x300) >> 8;
  4316. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  4317. goto end_op;
  4318. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  4319. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  4320. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  4321. tmp = (op & 0x300) >> 8;
  4322. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp | drcf.polling);
  4323. goto end_op;
  4324. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  4325. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4326. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4327. emith_clr_t_cond(sr);
  4328. emith_tst_r_imm(tmp, op & 0xff);
  4329. emith_set_t_cond(sr, DCOND_EQ);
  4330. goto end_op;
  4331. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  4332. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4333. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  4334. goto end_op;
  4335. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  4336. if (op & 0xff) {
  4337. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4338. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  4339. }
  4340. goto end_op;
  4341. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  4342. if (op & 0xff) {
  4343. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4344. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  4345. }
  4346. goto end_op;
  4347. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  4348. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0 | drcf.polling);
  4349. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4350. emith_clr_t_cond(sr);
  4351. emith_tst_r_imm(tmp, op & 0xff);
  4352. emith_set_t_cond(sr, DCOND_EQ);
  4353. rcache_free_tmp(tmp);
  4354. goto end_op;
  4355. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  4356. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4357. tmp2 = rcache_get_tmp_arg(1);
  4358. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  4359. goto end_rmw_op;
  4360. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  4361. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4362. tmp2 = rcache_get_tmp_arg(1);
  4363. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  4364. goto end_rmw_op;
  4365. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  4366. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4367. tmp2 = rcache_get_tmp_arg(1);
  4368. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  4369. end_rmw_op:
  4370. rcache_free_tmp(tmp);
  4371. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4372. goto end_op;
  4373. }
  4374. goto default_;
  4375. /////////////////////////////////////////////
  4376. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  4377. emit_move_r_imm32(GET_Rn(), (s8)op);
  4378. goto end_op;
  4379. default:
  4380. default_:
  4381. if (!(op_flags[i] & OF_B_IN_DS)) {
  4382. elprintf_sh2(sh2, EL_ANOMALY,
  4383. "drc: illegal op %04x @ %08x", op, pc - 2);
  4384. exit(1);
  4385. }
  4386. }
  4387. end_op:
  4388. rcache_unlock_all();
  4389. rcache_set_usage_now(0);
  4390. #if DRC_DEBUG & 64
  4391. RCACHE_CHECK("after insn");
  4392. #endif
  4393. cycles += opd->cycles;
  4394. if (op_flags[i+1] & OF_DELAY_OP) {
  4395. do_host_disasm(tcache_id);
  4396. continue;
  4397. }
  4398. // test irq?
  4399. if (drcf.test_irq && !drcf.pending_branch_direct) {
  4400. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4401. FLUSH_CYCLES(sr);
  4402. emith_sync_t(sr);
  4403. if (!drcf.pending_branch_indirect)
  4404. emit_move_r_imm32(SHR_PC, pc);
  4405. rcache_flush();
  4406. emith_call(sh2_drc_test_irq);
  4407. drcf.test_irq = 0;
  4408. }
  4409. // branch handling
  4410. if (drcf.pending_branch_direct)
  4411. {
  4412. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4413. u32 target_pc = opd_b->imm;
  4414. int cond = -1;
  4415. int ctaken = 0;
  4416. void *target = NULL;
  4417. if (OP_ISBRACND(opd_b->op))
  4418. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  4419. cycles += ctaken; // assume branch taken
  4420. #if LOOP_OPTIMIZER
  4421. if ((drcf.loop_type == OF_IDLE_LOOP ||
  4422. (drcf.loop_type == OF_DELAY_LOOP && drcf.delay_reg >= 0)))
  4423. {
  4424. // idle or delay loop
  4425. emit_sync_t_to_sr();
  4426. emith_sh2_delay_loop(cycles, drcf.delay_reg);
  4427. rcache_unlock_all(); // may lock delay_reg
  4428. drcf.polling = drcf.loop_type = drcf.pinning = 0;
  4429. }
  4430. #endif
  4431. #if CALL_STACK
  4432. void *rtsadd = NULL, *rtsret = NULL;
  4433. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4434. // BSR - save rts data
  4435. tmp = rcache_get_tmp_arg(1);
  4436. rtsadd = tcache_ptr;
  4437. emith_move_r_imm_s8_patchable(tmp, 0);
  4438. rcache_clean_tmp();
  4439. rcache_invalidate_tmp();
  4440. emith_call(sh2_drc_dispatcher_call);
  4441. rtsret = tcache_ptr;
  4442. }
  4443. #endif
  4444. // XXX move below cond test if not changing host cond (MIPS delay slot)?
  4445. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4446. FLUSH_CYCLES(sr);
  4447. rcache_clean();
  4448. if (OP_ISBRACND(opd_b->op)) {
  4449. // BT[S], BF[S] - emit condition test
  4450. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  4451. if (delay_dep_fw & BITMASK1(SHR_T)) {
  4452. emith_sync_t(sr);
  4453. emith_tst_r_imm(sr, T_save);
  4454. } else {
  4455. cond = emith_tst_t(sr, (opd_b->op == OP_BRANCH_CT));
  4456. if (emith_get_t_cond() >= 0) {
  4457. if (opd_b->op == OP_BRANCH_CT)
  4458. emith_or_r_imm_c(cond, sr, T);
  4459. else
  4460. emith_bic_r_imm_c(cond, sr, T);
  4461. }
  4462. }
  4463. } else
  4464. emith_sync_t(sr);
  4465. // no modification of host status/flags between here and branching!
  4466. v = find_in_sorted_linkage(branch_targets, branch_target_count, target_pc);
  4467. if (v >= 0)
  4468. {
  4469. // local branch
  4470. if (branch_targets[v].ptr) {
  4471. // local backward jump, link here now since host PC is already known
  4472. target = branch_targets[v].ptr;
  4473. #if LOOP_OPTIMIZER
  4474. if (pinned_loops[pinned_loop_count].pc == target_pc) {
  4475. // backward jump at end of optimized loop
  4476. rcache_unpin_all();
  4477. target = pinned_loops[pinned_loop_count].ptr;
  4478. pinned_loop_count ++;
  4479. }
  4480. #endif
  4481. if (cond != -1) {
  4482. if (emith_jump_patch_inrange(tcache_ptr, target)) {
  4483. emith_jump_cond(cond, target);
  4484. } else {
  4485. // not reachable directly, must use far branch
  4486. EMITH_JMP_START(emith_invert_cond(cond));
  4487. emith_jump(target);
  4488. EMITH_JMP_END(emith_invert_cond(cond));
  4489. }
  4490. } else {
  4491. emith_jump(target);
  4492. rcache_invalidate();
  4493. }
  4494. } else if (blx_target_count < MAX_LOCAL_BRANCHES) {
  4495. // local forward jump
  4496. target = tcache_ptr;
  4497. blx_targets[blx_target_count++] =
  4498. (struct linkage) { .pc = target_pc, .ptr = target, .mask = 0x2 };
  4499. if (cond != -1)
  4500. emith_jump_cond_patchable(cond, target);
  4501. else {
  4502. emith_jump_patchable(target);
  4503. rcache_invalidate();
  4504. }
  4505. } else
  4506. // no space for resolving forward branch, handle it as external
  4507. dbg(1, "warning: too many unresolved branches");
  4508. }
  4509. if (target == NULL)
  4510. {
  4511. // can't resolve branch locally, make a block exit
  4512. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4513. if (cond != -1) {
  4514. #if 1
  4515. if (bl && blx_target_count < ARRAY_SIZE(blx_targets)) {
  4516. // conditional jumps get a blx stub for the far jump
  4517. bl->type = BL_JCCBLX;
  4518. target = tcache_ptr;
  4519. blx_targets[blx_target_count++] =
  4520. (struct linkage) { .pc = target_pc, .ptr = target, .bl = bl };
  4521. emith_jump_cond_patchable(cond, target);
  4522. } else {
  4523. // not linkable, or blx table full; inline jump @dispatcher
  4524. EMITH_JMP_START(emith_invert_cond(cond));
  4525. if (bl) {
  4526. bl->jump = tcache_ptr;
  4527. emith_flush(); // flush to inhibit insn swapping
  4528. bl->type = BL_LDJMP;
  4529. }
  4530. tmp = rcache_get_tmp_arg(0);
  4531. emith_move_r_imm(tmp, target_pc);
  4532. rcache_free_tmp(tmp);
  4533. target = sh2_drc_dispatcher;
  4534. emith_jump_patchable(target);
  4535. EMITH_JMP_END(emith_invert_cond(cond));
  4536. }
  4537. #else
  4538. // jump @dispatcher - ARM 32bit version with conditional execution
  4539. EMITH_SJMP_START(emith_invert_cond(cond));
  4540. tmp = rcache_get_tmp_arg(0);
  4541. emith_move_r_imm_c(cond, tmp, target_pc);
  4542. rcache_free_tmp(tmp);
  4543. target = sh2_drc_dispatcher;
  4544. if (bl) {
  4545. bl->jump = tcache_ptr;
  4546. bl->type = BL_JMP;
  4547. }
  4548. emith_jump_cond_patchable(cond, target);
  4549. EMITH_SJMP_END(emith_invert_cond(cond));
  4550. #endif
  4551. } else {
  4552. // unconditional, has the far jump inlined
  4553. if (bl) {
  4554. emith_flush(); // flush to inhibit insn swapping
  4555. bl->type = BL_LDJMP;
  4556. }
  4557. tmp = rcache_get_tmp_arg(0);
  4558. emith_move_r_imm(tmp, target_pc);
  4559. rcache_free_tmp(tmp);
  4560. target = sh2_drc_dispatcher;
  4561. emith_jump_patchable(target);
  4562. rcache_invalidate();
  4563. }
  4564. }
  4565. #if CALL_STACK
  4566. if (rtsadd)
  4567. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4568. #endif
  4569. // branch not taken, correct cycle count
  4570. if (ctaken)
  4571. cycles -= ctaken;
  4572. // set T bit to reflect branch not taken for OP_BRANCH_CT/CF
  4573. if (emith_get_t_cond() >= 0) // T is synced for all other cases
  4574. emith_set_t(sr, opd_b->op == OP_BRANCH_CF);
  4575. drcf.pending_branch_direct = 0;
  4576. if (target_pc >= base_pc && target_pc < pc)
  4577. drcf.polling = drcf.loop_type = 0;
  4578. }
  4579. else if (drcf.pending_branch_indirect) {
  4580. u32 target_pc;
  4581. tmp = rcache_get_reg_arg(0, SHR_PC, NULL);
  4582. #if CALL_STACK
  4583. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4584. void *rtsadd = NULL, *rtsret = NULL;
  4585. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4586. // JSR, BSRF - save rts data
  4587. tmp = rcache_get_tmp_arg(1);
  4588. rtsadd = tcache_ptr;
  4589. emith_move_r_imm_s8_patchable(tmp, 0);
  4590. rcache_clean_tmp();
  4591. rcache_invalidate_tmp();
  4592. emith_call(sh2_drc_dispatcher_call);
  4593. rtsret = tcache_ptr;
  4594. }
  4595. #endif
  4596. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4597. FLUSH_CYCLES(sr);
  4598. emith_sync_t(sr);
  4599. rcache_clean();
  4600. #if CALL_STACK
  4601. if (opd_b->rm == SHR_PR) {
  4602. // RTS - restore rts data, else jump to dispatcher
  4603. emith_jump(sh2_drc_dispatcher_return);
  4604. } else
  4605. #endif
  4606. if (gconst_get(SHR_PC, &target_pc)) {
  4607. // JMP, JSR, BRAF, BSRF const - treat like unconditional direct branch
  4608. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4609. if (bl) // pc already loaded somewhere else, can patch jump only
  4610. bl->type = BL_JMP;
  4611. emith_jump_patchable(sh2_drc_dispatcher);
  4612. } else {
  4613. // JMP, JSR, BRAF, BSRF not const
  4614. emith_jump(sh2_drc_dispatcher);
  4615. }
  4616. rcache_invalidate();
  4617. #if CALL_STACK
  4618. if (rtsadd)
  4619. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4620. #endif
  4621. drcf.pending_branch_indirect = 0;
  4622. drcf.polling = drcf.loop_type = 0;
  4623. }
  4624. rcache_unlock_all();
  4625. do_host_disasm(tcache_id);
  4626. }
  4627. // check the last op
  4628. if (op_flags[i-1] & OF_DELAY_OP)
  4629. opd = &ops[i-2];
  4630. else
  4631. opd = &ops[i-1];
  4632. if (! OP_ISBRAUC(opd->op) || (opd->dest & BITMASK1(SHR_PR)))
  4633. {
  4634. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4635. FLUSH_CYCLES(tmp);
  4636. emith_sync_t(tmp);
  4637. rcache_clean();
  4638. bl = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  4639. if (bl) {
  4640. emith_flush(); // flush to inhibit insn swapping
  4641. bl->type = BL_LDJMP;
  4642. }
  4643. tmp = rcache_get_tmp_arg(0);
  4644. emith_move_r_imm(tmp, pc);
  4645. emith_jump_patchable(sh2_drc_dispatcher);
  4646. rcache_invalidate();
  4647. } else
  4648. rcache_flush();
  4649. // link unresolved branches, emitting blx area entries as needed
  4650. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  4651. branch_target_count, blx_targets, blx_target_count);
  4652. emith_flush();
  4653. do_host_disasm(tcache_id);
  4654. emith_pool_commit(0);
  4655. // fill blx backup; do this last to backup final patched code
  4656. for (i = 0; i < block->entry_count; i++)
  4657. for (bl = block->entryp[i].o_links; bl; bl = bl->o_next)
  4658. memcpy(bl->jdisp, bl->blx ? bl->blx : bl->jump, emith_jump_at_size());
  4659. ring_alloc(&tcache_ring[tcache_id], tcache_ptr - block_entry_ptr);
  4660. host_instructions_updated(block_entry_ptr, tcache_ptr, 1);
  4661. dr_activate_block(block, tcache_id, sh2->is_slave);
  4662. emith_update_cache();
  4663. do_host_disasm(tcache_id);
  4664. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  4665. tcache_id, blkid_main, tcache_ptr,
  4666. tcache_ring[tcache_id].used, tcache_ring[tcache_id].size,
  4667. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  4668. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  4669. dbg(2, " hash collisions %d/%d", hash_collisions, block_ring[tcache_id].used);
  4670. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  4671. }
  4672. /*
  4673. printf("~~~\n");
  4674. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  4675. do_host_disasm(tcache_id);
  4676. printf("~~~\n");
  4677. */
  4678. #if (DRC_DEBUG)
  4679. fflush(stdout);
  4680. #endif
  4681. return block_entry_ptr;
  4682. }
  4683. static void sh2_generate_utils(void)
  4684. {
  4685. int arg0, arg1, arg2, arg3, sr, tmp, tmp2;
  4686. #if DRC_DEBUG
  4687. int hic = host_insn_count; // don't count utils for insn statistics
  4688. #endif
  4689. host_arg2reg(arg0, 0);
  4690. host_arg2reg(arg1, 1);
  4691. host_arg2reg(arg2, 2);
  4692. host_arg2reg(arg3, 3);
  4693. emith_move_r_r(arg0, arg0); // nop
  4694. emith_flush();
  4695. // sh2_drc_write8(u32 a, u32 d)
  4696. sh2_drc_write8 = (void *)tcache_ptr;
  4697. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  4698. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4699. emith_flush();
  4700. // sh2_drc_write16(u32 a, u32 d)
  4701. sh2_drc_write16 = (void *)tcache_ptr;
  4702. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  4703. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4704. emith_flush();
  4705. // sh2_drc_write32(u32 a, u32 d)
  4706. sh2_drc_write32 = (void *)tcache_ptr;
  4707. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  4708. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4709. emith_flush();
  4710. // d = sh2_drc_read8(u32 a)
  4711. sh2_drc_read8 = (void *)tcache_ptr;
  4712. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4713. EMITH_HINT_COND(DCOND_CS);
  4714. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4715. EMITH_SJMP_START(DCOND_CS);
  4716. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4717. emit_le_ptr8(DCOND_CC, arg0);
  4718. emith_read8s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4719. emith_ret_c(DCOND_CC);
  4720. EMITH_SJMP_END(DCOND_CS);
  4721. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4722. emith_abijump_reg(arg2);
  4723. emith_flush();
  4724. // d = sh2_drc_read16(u32 a)
  4725. sh2_drc_read16 = (void *)tcache_ptr;
  4726. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4727. EMITH_HINT_COND(DCOND_CS);
  4728. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4729. EMITH_SJMP_START(DCOND_CS);
  4730. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4731. emith_read16s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4732. emith_ret_c(DCOND_CC);
  4733. EMITH_SJMP_END(DCOND_CS);
  4734. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4735. emith_abijump_reg(arg2);
  4736. emith_flush();
  4737. // d = sh2_drc_read32(u32 a)
  4738. sh2_drc_read32 = (void *)tcache_ptr;
  4739. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4740. EMITH_HINT_COND(DCOND_CS);
  4741. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4742. EMITH_SJMP_START(DCOND_CS);
  4743. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4744. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4745. emit_le_swap(DCOND_CC, RET_REG);
  4746. emith_ret_c(DCOND_CC);
  4747. EMITH_SJMP_END(DCOND_CS);
  4748. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4749. emith_abijump_reg(arg2);
  4750. emith_flush();
  4751. // d = sh2_drc_read8_poll(u32 a)
  4752. sh2_drc_read8_poll = (void *)tcache_ptr;
  4753. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4754. EMITH_HINT_COND(DCOND_CS);
  4755. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4756. EMITH_SJMP_START(DCOND_CC);
  4757. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4758. emith_abijump_reg_c(DCOND_CS, arg2);
  4759. EMITH_SJMP_END(DCOND_CC);
  4760. emith_and_r_r_r(arg1, arg0, arg3);
  4761. emit_le_ptr8(-1, arg1);
  4762. emith_read8s_r_r_r(arg1, arg2, arg1);
  4763. emith_push_ret(arg1);
  4764. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4765. emith_abicall(p32x_sh2_poll_memory8);
  4766. emith_pop_and_ret(arg1);
  4767. emith_flush();
  4768. // d = sh2_drc_read16_poll(u32 a)
  4769. sh2_drc_read16_poll = (void *)tcache_ptr;
  4770. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4771. EMITH_HINT_COND(DCOND_CS);
  4772. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4773. EMITH_SJMP_START(DCOND_CC);
  4774. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4775. emith_abijump_reg_c(DCOND_CS, arg2);
  4776. EMITH_SJMP_END(DCOND_CC);
  4777. emith_and_r_r_r(arg1, arg0, arg3);
  4778. emith_read16s_r_r_r(arg1, arg2, arg1);
  4779. emith_push_ret(arg1);
  4780. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4781. emith_abicall(p32x_sh2_poll_memory16);
  4782. emith_pop_and_ret(arg1);
  4783. emith_flush();
  4784. // d = sh2_drc_read32_poll(u32 a)
  4785. sh2_drc_read32_poll = (void *)tcache_ptr;
  4786. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4787. EMITH_HINT_COND(DCOND_CS);
  4788. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4789. EMITH_SJMP_START(DCOND_CC);
  4790. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4791. emith_abijump_reg_c(DCOND_CS, arg2);
  4792. EMITH_SJMP_END(DCOND_CC);
  4793. emith_and_r_r_r(arg1, arg0, arg3);
  4794. emith_read_r_r_r(arg1, arg2, arg1);
  4795. emit_le_swap(-1, arg1);
  4796. emith_push_ret(arg1);
  4797. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4798. emith_abicall(p32x_sh2_poll_memory32);
  4799. emith_pop_and_ret(arg1);
  4800. emith_flush();
  4801. // sh2_drc_exit(u32 pc)
  4802. sh2_drc_exit = (void *)tcache_ptr;
  4803. emith_ctx_write(arg0, SHR_PC * 4);
  4804. emit_do_static_regs(1, arg2);
  4805. emith_sh2_drc_exit();
  4806. emith_flush();
  4807. // sh2_drc_dispatcher(u32 pc)
  4808. sh2_drc_dispatcher = (void *)tcache_ptr;
  4809. emith_ctx_write(arg0, SHR_PC * 4);
  4810. #if BRANCH_CACHE
  4811. // check if PC is in branch target cache
  4812. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4813. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4814. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  4815. emith_cmp_r_r(arg2, arg0);
  4816. EMITH_SJMP_START(DCOND_NE);
  4817. #if (DRC_DEBUG & 128)
  4818. emith_move_r_ptr_imm(arg2, (uptr)&bchit);
  4819. emith_read_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4820. emith_add_r_imm_c(DCOND_EQ, arg3, 1);
  4821. emith_write_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4822. #endif
  4823. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4824. emith_jump_reg_c(DCOND_EQ, RET_REG);
  4825. EMITH_SJMP_END(DCOND_NE);
  4826. #endif
  4827. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4828. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  4829. emith_abicall(dr_lookup_block);
  4830. // store PC and block entry ptr (in arg0) in branch target cache
  4831. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4832. EMITH_SJMP_START(DCOND_EQ);
  4833. #if BRANCH_CACHE
  4834. #if (DRC_DEBUG & 128)
  4835. emith_move_r_ptr_imm(arg2, (uptr)&bcmiss);
  4836. emith_read_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4837. emith_add_r_imm_c(DCOND_NE, arg3, 1);
  4838. emith_write_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4839. #endif
  4840. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  4841. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4842. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4843. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  4844. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4845. #endif
  4846. emith_jump_reg_c(DCOND_NE, RET_REG);
  4847. EMITH_SJMP_END(DCOND_EQ);
  4848. // lookup failed, call sh2_translate()
  4849. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4850. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  4851. emith_abicall(sh2_translate);
  4852. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4853. EMITH_SJMP_START(DCOND_EQ);
  4854. emith_jump_reg_c(DCOND_NE, RET_REG);
  4855. EMITH_SJMP_END(DCOND_EQ);
  4856. // XXX: can't translate, fail
  4857. emith_abicall(dr_failure);
  4858. emith_flush();
  4859. #if CALL_STACK
  4860. // pc = sh2_drc_dispatcher_call(u32 pc)
  4861. sh2_drc_dispatcher_call = (void *)tcache_ptr;
  4862. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4863. emith_add_r_imm(arg2, (u32)(2*sizeof(void *)));
  4864. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4865. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4866. emith_add_r_r_r_lsl_ptr(arg3, CONTEXT_REG, arg2, 0);
  4867. rcache_get_reg_arg(2, SHR_PR, NULL);
  4868. emith_add_r_ret(arg1);
  4869. emith_write_r_r_offs_ptr(arg1, arg3, offsetof(SH2, rts_cache)+sizeof(void *));
  4870. emith_write_r_r_offs(arg2, arg3, offsetof(SH2, rts_cache));
  4871. rcache_flush();
  4872. emith_ret();
  4873. emith_flush();
  4874. // sh2_drc_dispatcher_return(u32 pc)
  4875. sh2_drc_dispatcher_return = (void *)tcache_ptr;
  4876. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4877. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg2, 0);
  4878. emith_read_r_r_offs(arg3, arg1, offsetof(SH2, rts_cache));
  4879. emith_cmp_r_r(arg0, arg3);
  4880. #if (DRC_DEBUG & 128)
  4881. EMITH_SJMP_START(DCOND_EQ);
  4882. emith_move_r_ptr_imm(arg3, (uptr)&rcmiss);
  4883. emith_read_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4884. emith_add_r_imm_c(DCOND_NE, arg1, 1);
  4885. emith_write_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4886. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4887. EMITH_SJMP_END(DCOND_EQ);
  4888. #else
  4889. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4890. #endif
  4891. emith_read_r_r_offs_ptr(arg0, arg1, offsetof(SH2, rts_cache) + sizeof(void *));
  4892. emith_sub_r_imm(arg2, (u32)(2*sizeof(void *)));
  4893. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4894. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4895. #if (DRC_DEBUG & 128)
  4896. emith_move_r_ptr_imm(arg3, (uptr)&rchit);
  4897. emith_read_r_r_offs(arg1, arg3, 0);
  4898. emith_add_r_imm(arg1, 1);
  4899. emith_write_r_r_offs(arg1, arg3, 0);
  4900. #endif
  4901. emith_jump_reg(arg0);
  4902. emith_flush();
  4903. #endif
  4904. // sh2_drc_test_irq(void)
  4905. // assumes it's called from main function (may jump to dispatcher)
  4906. sh2_drc_test_irq = (void *)tcache_ptr;
  4907. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4908. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4909. emith_lsr(arg0, sr, I_SHIFT);
  4910. emith_and_r_imm(arg0, 0x0f);
  4911. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  4912. EMITH_SJMP_START(DCOND_GT);
  4913. emith_ret_c(DCOND_LE); // nope, return
  4914. EMITH_SJMP_END(DCOND_GT);
  4915. // adjust SP
  4916. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  4917. emith_sub_r_imm(tmp, 4*2);
  4918. rcache_clean();
  4919. // push SR
  4920. tmp = rcache_get_reg_arg(0, SHR_SP, &tmp2);
  4921. emith_add_r_r_imm(tmp, tmp2, 4);
  4922. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  4923. emith_clear_msb(tmp, tmp, 22);
  4924. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4925. rcache_invalidate_tmp();
  4926. emith_abicall(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  4927. // push PC
  4928. rcache_get_reg_arg(0, SHR_SP, NULL);
  4929. rcache_get_reg_arg(1, SHR_PC, NULL);
  4930. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4931. rcache_invalidate_tmp();
  4932. emith_abicall(p32x_sh2_write32);
  4933. // update I, cycles, do callback
  4934. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4935. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4936. emith_bic_r_imm(sr, I);
  4937. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  4938. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  4939. rcache_flush();
  4940. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4941. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  4942. // obtain new PC
  4943. tmp = rcache_get_reg_arg(1, SHR_VBR, &tmp2);
  4944. emith_add_r_r_r_lsl(arg0, tmp2, RET_REG, 2);
  4945. emith_call(sh2_drc_read32);
  4946. if (arg0 != RET_REG)
  4947. emith_move_r_r(arg0, RET_REG);
  4948. emith_call_cleanup();
  4949. rcache_invalidate();
  4950. emith_jump(sh2_drc_dispatcher);
  4951. emith_flush();
  4952. // sh2_drc_entry(SH2 *sh2)
  4953. sh2_drc_entry = (void *)tcache_ptr;
  4954. emith_sh2_drc_entry();
  4955. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  4956. emit_do_static_regs(0, arg2);
  4957. emith_call(sh2_drc_test_irq);
  4958. emith_ctx_read(arg0, SHR_PC * 4);
  4959. emith_jump(sh2_drc_dispatcher);
  4960. emith_flush();
  4961. #ifdef DRC_SR_REG
  4962. // sh2_drc_save_sr(SH2 *sh2)
  4963. sh2_drc_save_sr = (void *)tcache_ptr;
  4964. tmp = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4965. emith_write_r_r_offs(tmp, arg0, SHR_SR * 4);
  4966. rcache_invalidate();
  4967. emith_ret();
  4968. emith_flush();
  4969. // sh2_drc_restore_sr(SH2 *sh2)
  4970. sh2_drc_restore_sr = (void *)tcache_ptr;
  4971. tmp = rcache_get_reg(SHR_SR, RC_GR_WRITE, NULL);
  4972. emith_read_r_r_offs(tmp, arg0, SHR_SR * 4);
  4973. rcache_flush();
  4974. emith_ret();
  4975. emith_flush();
  4976. #endif
  4977. #ifdef PDB_NET
  4978. // debug
  4979. #define MAKE_READ_WRAPPER(func) { \
  4980. void *tmp = (void *)tcache_ptr; \
  4981. emith_push_ret(); \
  4982. emith_call(func); \
  4983. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4984. emith_addf_r_r(arg2, arg0); \
  4985. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4986. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4987. emith_adc_r_imm(arg2, 0x01000000); \
  4988. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4989. emith_pop_and_ret(); \
  4990. emith_flush(); \
  4991. func = tmp; \
  4992. }
  4993. #define MAKE_WRITE_WRAPPER(func) { \
  4994. void *tmp = (void *)tcache_ptr; \
  4995. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4996. emith_addf_r_r(arg2, arg1); \
  4997. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4998. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4999. emith_adc_r_imm(arg2, 0x01000000); \
  5000. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  5001. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  5002. emith_jump(func); \
  5003. emith_flush(); \
  5004. func = tmp; \
  5005. }
  5006. MAKE_READ_WRAPPER(sh2_drc_read8);
  5007. MAKE_READ_WRAPPER(sh2_drc_read16);
  5008. MAKE_READ_WRAPPER(sh2_drc_read32);
  5009. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  5010. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  5011. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  5012. MAKE_READ_WRAPPER(sh2_drc_read8_poll);
  5013. MAKE_READ_WRAPPER(sh2_drc_read16_poll);
  5014. MAKE_READ_WRAPPER(sh2_drc_read32_poll);
  5015. #endif
  5016. emith_pool_commit(0);
  5017. rcache_invalidate();
  5018. #if (DRC_DEBUG & 4)
  5019. host_dasm_new_symbol(sh2_drc_entry);
  5020. host_dasm_new_symbol(sh2_drc_dispatcher);
  5021. #if CALL_STACK
  5022. host_dasm_new_symbol(sh2_drc_dispatcher_call);
  5023. host_dasm_new_symbol(sh2_drc_dispatcher_return);
  5024. #endif
  5025. host_dasm_new_symbol(sh2_drc_exit);
  5026. host_dasm_new_symbol(sh2_drc_test_irq);
  5027. host_dasm_new_symbol(sh2_drc_write8);
  5028. host_dasm_new_symbol(sh2_drc_write16);
  5029. host_dasm_new_symbol(sh2_drc_write32);
  5030. host_dasm_new_symbol(sh2_drc_read8);
  5031. host_dasm_new_symbol(sh2_drc_read16);
  5032. host_dasm_new_symbol(sh2_drc_read32);
  5033. host_dasm_new_symbol(sh2_drc_read8_poll);
  5034. host_dasm_new_symbol(sh2_drc_read16_poll);
  5035. host_dasm_new_symbol(sh2_drc_read32_poll);
  5036. #ifdef DRC_SR_REG
  5037. host_dasm_new_symbol(sh2_drc_save_sr);
  5038. host_dasm_new_symbol(sh2_drc_restore_sr);
  5039. #endif
  5040. #endif
  5041. #if DRC_DEBUG
  5042. host_insn_count = hic;
  5043. #endif
  5044. }
  5045. static void sh2_smc_rm_blocks(u32 a, int len, int tcache_id, u32 shift)
  5046. {
  5047. struct block_list **blist, *entry, *next;
  5048. u32 mask = RAM_SIZE(tcache_id) - 1;
  5049. u32 wtmask = ~0x20000000; // writethrough area mask
  5050. u32 start_addr, end_addr;
  5051. u32 start_lit, end_lit;
  5052. struct block_desc *block;
  5053. #if (DRC_DEBUG & 2)
  5054. int removed = 0;
  5055. #endif
  5056. // ignore cache-through
  5057. a &= wtmask;
  5058. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  5059. entry = *blist;
  5060. // go through the block list for this range
  5061. while (entry != NULL) {
  5062. next = entry->next;
  5063. block = entry->block;
  5064. start_addr = block->addr & wtmask;
  5065. end_addr = start_addr + block->size;
  5066. start_lit = block->addr_lit & wtmask;
  5067. end_lit = start_lit + block->size_lit;
  5068. // disable/delete block if it covers the modified address
  5069. if ((start_addr < a+len && a < end_addr) ||
  5070. (start_lit < a+len && a < end_lit))
  5071. {
  5072. dbg(2, "smc remove @%08x", a);
  5073. end_addr = (start_lit < a+len && block->size_lit ? a : 0);
  5074. dr_rm_block_entry(block, tcache_id, end_addr, 0);
  5075. #if (DRC_DEBUG & 2)
  5076. removed = 1;
  5077. #endif
  5078. }
  5079. entry = next;
  5080. }
  5081. #if (DRC_DEBUG & 2)
  5082. if (!removed)
  5083. dbg(2, "rm_blocks called @%08x, no work?", a);
  5084. #endif
  5085. #if BRANCH_CACHE
  5086. if (tcache_id)
  5087. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5088. else {
  5089. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5090. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  5091. }
  5092. #endif
  5093. #if CALL_STACK
  5094. if (tcache_id) {
  5095. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5096. sh2s[tcache_id-1].rts_cache_idx = 0;
  5097. } else {
  5098. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5099. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  5100. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  5101. }
  5102. #endif
  5103. }
  5104. void sh2_drc_wcheck_ram(u32 a, unsigned len, SH2 *sh2)
  5105. {
  5106. sh2_smc_rm_blocks(a, len, 0, SH2_DRCBLK_RAM_SHIFT);
  5107. }
  5108. void sh2_drc_wcheck_da(u32 a, unsigned len, SH2 *sh2)
  5109. {
  5110. sh2_smc_rm_blocks(a, len, 1 + sh2->is_slave, SH2_DRCBLK_DA_SHIFT);
  5111. }
  5112. int sh2_execute_drc(SH2 *sh2c, int cycles)
  5113. {
  5114. int ret_cycles;
  5115. // cycles are kept in SHR_SR unused bits (upper 20)
  5116. // bit11 contains T saved for delay slot
  5117. // others are usual SH2 flags
  5118. sh2c->sr &= 0x3f3;
  5119. sh2c->sr |= cycles << 12;
  5120. sh2c->state |= SH2_IN_DRC;
  5121. sh2_drc_entry(sh2c);
  5122. sh2c->state &= ~SH2_IN_DRC;
  5123. // TODO: irq cycles
  5124. ret_cycles = (int32_t)sh2c->sr >> 12;
  5125. if (ret_cycles > 0)
  5126. dbg(1, "warning: drc returned with cycles: %d, pc %08x", ret_cycles, sh2c->pc);
  5127. sh2c->sr &= 0x3f3;
  5128. return ret_cycles;
  5129. }
  5130. static void block_stats(void)
  5131. {
  5132. #if (DRC_DEBUG & 2)
  5133. int c, b, i;
  5134. long total = 0;
  5135. printf("block stats:\n");
  5136. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5137. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5138. if (block_tables[b][i].addr != 0)
  5139. total += block_tables[b][i].refcount;
  5140. }
  5141. printf("total: %ld\n",total);
  5142. for (c = 0; c < 20; c++) {
  5143. struct block_desc *blk, *maxb = NULL;
  5144. int max = 0;
  5145. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5146. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5147. if ((blk = &block_tables[b][i])->addr != 0 && blk->refcount > max) {
  5148. max = blk->refcount;
  5149. maxb = blk;
  5150. }
  5151. }
  5152. if (maxb == NULL)
  5153. break;
  5154. printf("%08lx %p %9d %2.3f%%\n", (ulong)maxb->addr, maxb->tcache_ptr, maxb->refcount,
  5155. (double)maxb->refcount / total * 100.0);
  5156. maxb->refcount = 0;
  5157. }
  5158. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  5159. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5160. block_tables[b][i].refcount = 0;
  5161. #endif
  5162. }
  5163. void entry_stats(void)
  5164. {
  5165. #if (DRC_DEBUG & 32)
  5166. int c, b, i, j;
  5167. long total = 0;
  5168. printf("block entry stats:\n");
  5169. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5170. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5171. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5172. total += block_tables[b][i].entryp[j].entry_count;
  5173. }
  5174. printf("total: %ld\n",total);
  5175. for (c = 0; c < 20; c++) {
  5176. struct block_desc *blk;
  5177. struct block_entry *maxb = NULL;
  5178. int max = 0;
  5179. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5180. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size) {
  5181. blk = &block_tables[b][i];
  5182. for (j = 0; j < blk->entry_count; j++)
  5183. if (blk->entryp[j].entry_count > max) {
  5184. max = blk->entryp[j].entry_count;
  5185. maxb = &blk->entryp[j];
  5186. }
  5187. }
  5188. }
  5189. if (maxb == NULL)
  5190. break;
  5191. printf("%08lx %p %9d %2.3f%%\n", (ulong)maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  5192. (double)100 * maxb->entry_count / total);
  5193. maxb->entry_count = 0;
  5194. }
  5195. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5196. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5197. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5198. block_tables[b][i].entryp[j].entry_count = 0;
  5199. }
  5200. #endif
  5201. }
  5202. static void backtrace(void)
  5203. {
  5204. #if (DRC_DEBUG & 1024)
  5205. int i;
  5206. printf("backtrace master:\n");
  5207. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  5208. SH2_DUMP(&csh2[0][i], "bt msh2");
  5209. printf("backtrace slave:\n");
  5210. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  5211. SH2_DUMP(&csh2[1][i], "bt ssh2");
  5212. #endif
  5213. }
  5214. static void state_dump(void)
  5215. {
  5216. #if (DRC_DEBUG & 2048)
  5217. int i;
  5218. SH2_DUMP(&sh2s[0], "master");
  5219. printf("VBR msh2: %lx\n", (ulong)sh2s[0].vbr);
  5220. for (i = 0; i < 0x60; i++) {
  5221. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  5222. if ((i+1) % 8 == 0) printf("\n");
  5223. }
  5224. printf("stack msh2: %lx\n", (ulong)sh2s[0].r[15]);
  5225. for (i = -0x30; i < 0x30; i++) {
  5226. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  5227. if ((i+1) % 8 == 0) printf("\n");
  5228. }
  5229. SH2_DUMP(&sh2s[1], "slave");
  5230. printf("VBR ssh2: %lx\n", (ulong)sh2s[1].vbr);
  5231. for (i = 0; i < 0x60; i++) {
  5232. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  5233. if ((i+1) % 8 == 0) printf("\n");
  5234. }
  5235. printf("stack ssh2: %lx\n", (ulong)sh2s[1].r[15]);
  5236. for (i = -0x30; i < 0x30; i++) {
  5237. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  5238. if ((i+1) % 8 == 0) printf("\n");
  5239. }
  5240. #endif
  5241. }
  5242. static void bcache_stats(void)
  5243. {
  5244. #if (DRC_DEBUG & 128)
  5245. int i;
  5246. #if CALL_STACK
  5247. for (i = 1; i < ARRAY_SIZE(sh2s->rts_cache); i++)
  5248. if (sh2s[0].rts_cache[i].pc == -1 && sh2s[1].rts_cache[i].pc == -1) break;
  5249. printf("return cache hits:%d misses:%d depth: %d index: %d/%d\n", rchit, rcmiss, i,sh2s[0].rts_cache_idx,sh2s[1].rts_cache_idx);
  5250. for (i = 0; i < ARRAY_SIZE(sh2s[0].rts_cache); i++) {
  5251. printf("%08lx ",(ulong)sh2s[0].rts_cache[i].pc);
  5252. if ((i+1) % 8 == 0) printf("\n");
  5253. }
  5254. for (i = 0; i < ARRAY_SIZE(sh2s[1].rts_cache); i++) {
  5255. printf("%08lx ",(ulong)sh2s[1].rts_cache[i].pc);
  5256. if ((i+1) % 8 == 0) printf("\n");
  5257. }
  5258. #endif
  5259. #if BRANCH_CACHE
  5260. printf("branch cache hits:%d misses:%d\n", bchit, bcmiss);
  5261. printf("branch cache master:\n");
  5262. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  5263. printf("%08lx ",(ulong)sh2s[0].branch_cache[i].pc);
  5264. if ((i+1) % 8 == 0) printf("\n");
  5265. }
  5266. printf("branch cache slave:\n");
  5267. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  5268. printf("%08lx ",(ulong)sh2s[1].branch_cache[i].pc);
  5269. if ((i+1) % 8 == 0) printf("\n");
  5270. }
  5271. #endif
  5272. #endif
  5273. }
  5274. void sh2_drc_flush_all(void)
  5275. {
  5276. backtrace();
  5277. state_dump();
  5278. block_stats();
  5279. entry_stats();
  5280. bcache_stats();
  5281. dr_flush_tcache(0);
  5282. dr_flush_tcache(1);
  5283. dr_flush_tcache(2);
  5284. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  5285. }
  5286. void sh2_drc_mem_setup(SH2 *sh2)
  5287. {
  5288. // fill the DRC-only convenience pointers
  5289. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  5290. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  5291. }
  5292. int sh2_drc_init(SH2 *sh2)
  5293. {
  5294. int i;
  5295. if (block_tables[0] == NULL)
  5296. {
  5297. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5298. block_tables[i] = calloc(BLOCK_MAX_COUNT(i), sizeof(*block_tables[0]));
  5299. if (block_tables[i] == NULL)
  5300. goto fail;
  5301. entry_tables[i] = calloc(ENTRY_MAX_COUNT(i), sizeof(*entry_tables[0]));
  5302. if (entry_tables[i] == NULL)
  5303. goto fail;
  5304. block_link_pool[i] = calloc(BLOCK_LINK_MAX_COUNT(i),
  5305. sizeof(*block_link_pool[0]));
  5306. if (block_link_pool[i] == NULL)
  5307. goto fail;
  5308. inval_lookup[i] = calloc(RAM_SIZE(i) / INVAL_PAGE_SIZE,
  5309. sizeof(inval_lookup[0]));
  5310. if (inval_lookup[i] == NULL)
  5311. goto fail;
  5312. hash_tables[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*hash_tables[0]));
  5313. if (hash_tables[i] == NULL)
  5314. goto fail;
  5315. unresolved_links[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*unresolved_links[0]));
  5316. if (unresolved_links[i] == NULL)
  5317. goto fail;
  5318. //atexit(sh2_drc_finish);
  5319. RING_INIT(&block_ring[i], block_tables[i], BLOCK_MAX_COUNT(i));
  5320. RING_INIT(&entry_ring[i], entry_tables[i], ENTRY_MAX_COUNT(i));
  5321. }
  5322. block_list_pool = calloc(BLOCK_LIST_MAX_COUNT, sizeof(*block_list_pool));
  5323. if (block_list_pool == NULL)
  5324. goto fail;
  5325. block_list_pool_count = 0;
  5326. blist_free = NULL;
  5327. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  5328. memset(blink_free, 0, sizeof(blink_free));
  5329. drc_cmn_init();
  5330. rcache_init();
  5331. tcache_ptr = tcache;
  5332. sh2_generate_utils();
  5333. host_instructions_updated(tcache, tcache_ptr, 1);
  5334. emith_update_cache();
  5335. i = tcache_ptr - tcache;
  5336. RING_INIT(&tcache_ring[0], tcache_ptr, tcache_sizes[0] - i);
  5337. for (i = 1; i < ARRAY_SIZE(tcache_ring); i++) {
  5338. RING_INIT(&tcache_ring[i], tcache_ring[i-1].base + tcache_ring[i-1].size,
  5339. tcache_sizes[i]);
  5340. }
  5341. #if (DRC_DEBUG & 4)
  5342. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  5343. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5344. // disasm the utils
  5345. tcache_dsm_ptrs[0] = tcache;
  5346. do_host_disasm(0);
  5347. fflush(stdout);
  5348. #endif
  5349. #if (DRC_DEBUG & 1)
  5350. hash_collisions = 0;
  5351. #endif
  5352. }
  5353. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  5354. memset(sh2->rts_cache, -1, sizeof(sh2->rts_cache));
  5355. sh2->rts_cache_idx = 0;
  5356. return 0;
  5357. fail:
  5358. sh2_drc_finish(sh2);
  5359. return -1;
  5360. }
  5361. void sh2_drc_finish(SH2 *sh2)
  5362. {
  5363. int i;
  5364. if (block_tables[0] == NULL)
  5365. return;
  5366. #if (DRC_DEBUG & (256|512))
  5367. if (trace[0]) fclose(trace[0]);
  5368. if (trace[1]) fclose(trace[1]);
  5369. trace[0] = trace[1] = NULL;
  5370. #endif
  5371. #if (DRC_DEBUG & 4)
  5372. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5373. printf("~~~ tcache %d\n", i);
  5374. #if 0
  5375. if (tcache_ring[i].first < tcache_ring[i].next) {
  5376. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5377. tcache_ptr = tcache_ring[i].next;
  5378. do_host_disasm(i);
  5379. } else if (tcache_ring[i].used) {
  5380. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5381. tcache_ptr = tcache_ring[i].base + tcache_ring[i].size;
  5382. do_host_disasm(i);
  5383. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5384. tcache_ptr = tcache_ring[i].next;
  5385. do_host_disasm(i);
  5386. }
  5387. #endif
  5388. printf("max links: %d\n", block_link_pool_counts[i]);
  5389. }
  5390. printf("max block list: %d\n", block_list_pool_count);
  5391. #endif
  5392. sh2_drc_flush_all();
  5393. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5394. if (block_tables[i] != NULL)
  5395. free(block_tables[i]);
  5396. block_tables[i] = NULL;
  5397. if (entry_tables[i] != NULL)
  5398. free(entry_tables[i]);
  5399. entry_tables[i] = NULL;
  5400. if (block_link_pool[i] != NULL)
  5401. free(block_link_pool[i]);
  5402. block_link_pool[i] = NULL;
  5403. blink_free[i] = NULL;
  5404. if (inval_lookup[i] != NULL)
  5405. free(inval_lookup[i]);
  5406. inval_lookup[i] = NULL;
  5407. if (hash_tables[i] != NULL) {
  5408. free(hash_tables[i]);
  5409. hash_tables[i] = NULL;
  5410. }
  5411. if (unresolved_links[i] != NULL) {
  5412. free(unresolved_links[i]);
  5413. unresolved_links[i] = NULL;
  5414. }
  5415. }
  5416. if (block_list_pool != NULL)
  5417. free(block_list_pool);
  5418. block_list_pool = NULL;
  5419. blist_free = NULL;
  5420. drc_cmn_cleanup();
  5421. }
  5422. #endif /* DRC_SH2 */
  5423. static void *dr_get_pc_base(u32 pc, SH2 *sh2)
  5424. {
  5425. void *ret;
  5426. u32 mask = 0;
  5427. ret = p32x_sh2_get_mem_ptr(pc, &mask, sh2);
  5428. if (ret == (void *)-1)
  5429. return ret;
  5430. return (char *)ret - (pc & ~mask);
  5431. }
  5432. u16 scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  5433. u32 *base_literals_out, u32 *end_literals_out)
  5434. {
  5435. u16 *dr_pc_base;
  5436. u32 pc, op, tmp;
  5437. u32 end_pc, end_literals = 0;
  5438. u32 lowest_literal = 0;
  5439. u32 lowest_mova = 0;
  5440. struct op_data *opd;
  5441. int next_is_delay = 0;
  5442. int end_block = 0;
  5443. int is_divop;
  5444. int i, i_end, i_div = -1;
  5445. u32 crc = 0;
  5446. // 2nd pass stuff
  5447. int last_btarget; // loop detector
  5448. enum { T_UNKNOWN, T_CLEAR, T_SET } t; // T propagation state
  5449. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  5450. op_flags[0] |= OF_BTARGET; // block start is always a target
  5451. dr_pc_base = dr_get_pc_base(base_pc, &sh2s[!!is_slave]);
  5452. // 1st pass: disassemble
  5453. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  5454. // we need an ops[] entry after the last one initialized,
  5455. // so do it before end_block checks
  5456. opd = &ops[i];
  5457. opd->op = OP_UNHANDLED;
  5458. opd->rm = -1;
  5459. opd->source = opd->dest = 0;
  5460. opd->cycles = 1;
  5461. opd->imm = 0;
  5462. if (next_is_delay) {
  5463. op_flags[i] |= OF_DELAY_OP;
  5464. next_is_delay = 0;
  5465. }
  5466. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  5467. break;
  5468. else if ((lowest_mova && lowest_mova <= pc) ||
  5469. (lowest_literal && lowest_literal <= pc))
  5470. break; // text area collides with data area
  5471. is_divop = 0;
  5472. op = FETCH_OP(pc);
  5473. switch ((op & 0xf000) >> 12)
  5474. {
  5475. /////////////////////////////////////////////
  5476. case 0x00:
  5477. switch (op & 0x0f)
  5478. {
  5479. case 0x02:
  5480. switch (GET_Fx())
  5481. {
  5482. case 0: // STC SR,Rn 0000nnnn00000010
  5483. tmp = BITMASK2(SHR_SR, SHR_T);
  5484. break;
  5485. case 1: // STC GBR,Rn 0000nnnn00010010
  5486. tmp = BITMASK1(SHR_GBR);
  5487. break;
  5488. case 2: // STC VBR,Rn 0000nnnn00100010
  5489. tmp = BITMASK1(SHR_VBR);
  5490. break;
  5491. default:
  5492. goto undefined;
  5493. }
  5494. opd->op = OP_MOVE;
  5495. opd->source = tmp;
  5496. opd->dest = BITMASK1(GET_Rn());
  5497. break;
  5498. case 0x03:
  5499. CHECK_UNHANDLED_BITS(0xd0, undefined);
  5500. // BRAF Rm 0000mmmm00100011
  5501. // BSRF Rm 0000mmmm00000011
  5502. opd->op = OP_BRANCH_RF;
  5503. opd->rm = GET_Rn();
  5504. opd->source = BITMASK2(SHR_PC, opd->rm);
  5505. opd->dest = BITMASK1(SHR_PC);
  5506. if (!(op & 0x20))
  5507. opd->dest |= BITMASK1(SHR_PR);
  5508. opd->cycles = 2;
  5509. next_is_delay = 1;
  5510. if (!(opd->dest & BITMASK1(SHR_PR)))
  5511. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5512. else
  5513. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5514. break;
  5515. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  5516. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  5517. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  5518. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  5519. opd->dest = BITMASK1(SHR_MEM);
  5520. break;
  5521. case 0x07:
  5522. // MUL.L Rm,Rn 0000nnnnmmmm0111
  5523. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5524. opd->dest = BITMASK1(SHR_MACL);
  5525. opd->cycles = 2;
  5526. break;
  5527. case 0x08:
  5528. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5529. switch (GET_Fx())
  5530. {
  5531. case 0: // CLRT 0000000000001000
  5532. opd->op = OP_SETCLRT;
  5533. opd->dest = BITMASK1(SHR_T);
  5534. opd->imm = 0;
  5535. break;
  5536. case 1: // SETT 0000000000011000
  5537. opd->op = OP_SETCLRT;
  5538. opd->dest = BITMASK1(SHR_T);
  5539. opd->imm = 1;
  5540. break;
  5541. case 2: // CLRMAC 0000000000101000
  5542. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5543. break;
  5544. default:
  5545. goto undefined;
  5546. }
  5547. break;
  5548. case 0x09:
  5549. switch (GET_Fx())
  5550. {
  5551. case 0: // NOP 0000000000001001
  5552. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5553. break;
  5554. case 1: // DIV0U 0000000000011001
  5555. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5556. opd->op = OP_DIV0;
  5557. opd->source = BITMASK1(SHR_SR);
  5558. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5559. div(opd) = (struct div){ .rn=SHR_MEM, .rm=SHR_MEM, .ro=SHR_MEM };
  5560. i_div = i;
  5561. is_divop = 1;
  5562. break;
  5563. case 2: // MOVT Rn 0000nnnn00101001
  5564. opd->source = BITMASK1(SHR_T);
  5565. opd->dest = BITMASK1(GET_Rn());
  5566. break;
  5567. default:
  5568. goto undefined;
  5569. }
  5570. break;
  5571. case 0x0a:
  5572. switch (GET_Fx())
  5573. {
  5574. case 0: // STS MACH,Rn 0000nnnn00001010
  5575. tmp = SHR_MACH;
  5576. break;
  5577. case 1: // STS MACL,Rn 0000nnnn00011010
  5578. tmp = SHR_MACL;
  5579. break;
  5580. case 2: // STS PR,Rn 0000nnnn00101010
  5581. tmp = SHR_PR;
  5582. break;
  5583. default:
  5584. goto undefined;
  5585. }
  5586. opd->op = OP_MOVE;
  5587. opd->source = BITMASK1(tmp);
  5588. opd->dest = BITMASK1(GET_Rn());
  5589. break;
  5590. case 0x0b:
  5591. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5592. switch (GET_Fx())
  5593. {
  5594. case 0: // RTS 0000000000001011
  5595. opd->op = OP_BRANCH_R;
  5596. opd->rm = SHR_PR;
  5597. opd->source = BITMASK1(opd->rm);
  5598. opd->dest = BITMASK1(SHR_PC);
  5599. opd->cycles = 2;
  5600. next_is_delay = 1;
  5601. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5602. break;
  5603. case 1: // SLEEP 0000000000011011
  5604. opd->op = OP_SLEEP;
  5605. end_block = 1;
  5606. break;
  5607. case 2: // RTE 0000000000101011
  5608. opd->op = OP_RTE;
  5609. opd->source = BITMASK1(SHR_SP);
  5610. opd->dest = BITMASK4(SHR_SP, SHR_SR, SHR_T, SHR_PC);
  5611. opd->cycles = 4;
  5612. next_is_delay = 1;
  5613. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5614. break;
  5615. default:
  5616. goto undefined;
  5617. }
  5618. break;
  5619. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  5620. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  5621. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  5622. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  5623. opd->dest = BITMASK1(GET_Rn());
  5624. op_flags[i] |= OF_POLL_INSN;
  5625. break;
  5626. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  5627. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5628. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5629. opd->cycles = 3;
  5630. break;
  5631. default:
  5632. goto undefined;
  5633. }
  5634. break;
  5635. /////////////////////////////////////////////
  5636. case 0x01:
  5637. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  5638. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5639. opd->dest = BITMASK1(SHR_MEM);
  5640. opd->imm = (op & 0x0f) * 4;
  5641. break;
  5642. /////////////////////////////////////////////
  5643. case 0x02:
  5644. switch (op & 0x0f)
  5645. {
  5646. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  5647. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  5648. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  5649. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5650. opd->dest = BITMASK1(SHR_MEM);
  5651. break;
  5652. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  5653. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  5654. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  5655. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5656. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5657. break;
  5658. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  5659. opd->op = OP_DIV0;
  5660. opd->source = BITMASK3(SHR_SR, GET_Rm(), GET_Rn());
  5661. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5662. div(opd) = (struct div){ .rn=GET_Rn(), .rm=GET_Rm(), .ro=SHR_MEM };
  5663. i_div = i;
  5664. is_divop = 1;
  5665. break;
  5666. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  5667. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5668. opd->dest = BITMASK1(SHR_T);
  5669. break;
  5670. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  5671. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  5672. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  5673. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5674. opd->dest = BITMASK1(GET_Rn());
  5675. break;
  5676. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  5677. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5678. opd->dest = BITMASK1(SHR_T);
  5679. break;
  5680. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  5681. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5682. opd->dest = BITMASK1(GET_Rn());
  5683. break;
  5684. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  5685. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  5686. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5687. opd->dest = BITMASK1(SHR_MACL);
  5688. break;
  5689. default:
  5690. goto undefined;
  5691. }
  5692. break;
  5693. /////////////////////////////////////////////
  5694. case 0x03:
  5695. switch (op & 0x0f)
  5696. {
  5697. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  5698. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  5699. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  5700. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  5701. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  5702. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5703. opd->dest = BITMASK1(SHR_T);
  5704. break;
  5705. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  5706. opd->source = BITMASK4(GET_Rm(), GET_Rn(), SHR_SR, SHR_T);
  5707. opd->dest = BITMASK3(GET_Rn(), SHR_SR, SHR_T);
  5708. if (i_div >= 0) {
  5709. // divide operation: all DIV1 operations must use the same reg pair
  5710. if (div(&ops[i_div]).rn == SHR_MEM)
  5711. div(&ops[i_div]).rn=GET_Rn(), div(&ops[i_div]).rm=GET_Rm();
  5712. if (div(&ops[i_div]).rn == GET_Rn() && div(&ops[i_div]).rm == GET_Rm()) {
  5713. div(&ops[i_div]).div1 += 1;
  5714. div(&ops[i_div]).state = 0;
  5715. is_divop = 1;
  5716. } else {
  5717. ops[i_div].imm = 0;
  5718. i_div = -1;
  5719. }
  5720. }
  5721. break;
  5722. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  5723. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  5724. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5725. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5726. opd->cycles = 2;
  5727. break;
  5728. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  5729. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  5730. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5731. opd->dest = BITMASK1(GET_Rn());
  5732. break;
  5733. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  5734. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  5735. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  5736. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5737. break;
  5738. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  5739. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  5740. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5741. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5742. break;
  5743. default:
  5744. goto undefined;
  5745. }
  5746. break;
  5747. /////////////////////////////////////////////
  5748. case 0x04:
  5749. switch (op & 0x0f)
  5750. {
  5751. case 0x00:
  5752. switch (GET_Fx())
  5753. {
  5754. case 0: // SHLL Rn 0100nnnn00000000
  5755. case 2: // SHAL Rn 0100nnnn00100000
  5756. opd->source = BITMASK1(GET_Rn());
  5757. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5758. break;
  5759. case 1: // DT Rn 0100nnnn00010000
  5760. opd->source = BITMASK1(GET_Rn());
  5761. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5762. op_flags[i] |= OF_DELAY_INSN;
  5763. break;
  5764. default:
  5765. goto undefined;
  5766. }
  5767. break;
  5768. case 0x01:
  5769. switch (GET_Fx())
  5770. {
  5771. case 0: // SHLR Rn 0100nnnn00000001
  5772. case 2: // SHAR Rn 0100nnnn00100001
  5773. opd->source = BITMASK1(GET_Rn());
  5774. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5775. break;
  5776. case 1: // CMP/PZ Rn 0100nnnn00010001
  5777. opd->source = BITMASK1(GET_Rn());
  5778. opd->dest = BITMASK1(SHR_T);
  5779. break;
  5780. default:
  5781. goto undefined;
  5782. }
  5783. break;
  5784. case 0x02:
  5785. case 0x03:
  5786. switch (op & 0x3f)
  5787. {
  5788. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  5789. tmp = BITMASK1(SHR_MACH);
  5790. break;
  5791. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  5792. tmp = BITMASK1(SHR_MACL);
  5793. break;
  5794. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  5795. tmp = BITMASK1(SHR_PR);
  5796. break;
  5797. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  5798. tmp = BITMASK2(SHR_SR, SHR_T);
  5799. opd->cycles = 2;
  5800. break;
  5801. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  5802. tmp = BITMASK1(SHR_GBR);
  5803. opd->cycles = 2;
  5804. break;
  5805. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  5806. tmp = BITMASK1(SHR_VBR);
  5807. opd->cycles = 2;
  5808. break;
  5809. default:
  5810. goto undefined;
  5811. }
  5812. opd->source = BITMASK1(GET_Rn()) | tmp;
  5813. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5814. break;
  5815. case 0x04:
  5816. case 0x05:
  5817. switch (op & 0x3f)
  5818. {
  5819. case 0x04: // ROTL Rn 0100nnnn00000100
  5820. case 0x05: // ROTR Rn 0100nnnn00000101
  5821. opd->source = BITMASK1(GET_Rn());
  5822. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5823. break;
  5824. case 0x24: // ROTCL Rn 0100nnnn00100100
  5825. if (i_div >= 0) {
  5826. // divide operation: all ROTCL operations must use the same register
  5827. if (div(&ops[i_div]).ro == SHR_MEM)
  5828. div(&ops[i_div]).ro = GET_Rn();
  5829. if (div(&ops[i_div]).ro == GET_Rn() && !div(&ops[i_div]).state) {
  5830. div(&ops[i_div]).rotcl += 1;
  5831. div(&ops[i_div]).state = 1;
  5832. is_divop = 1;
  5833. } else {
  5834. ops[i_div].imm = 0;
  5835. i_div = -1;
  5836. }
  5837. }
  5838. case 0x25: // ROTCR Rn 0100nnnn00100101
  5839. opd->source = BITMASK2(GET_Rn(), SHR_T);
  5840. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5841. break;
  5842. case 0x15: // CMP/PL Rn 0100nnnn00010101
  5843. opd->source = BITMASK1(GET_Rn());
  5844. opd->dest = BITMASK1(SHR_T);
  5845. break;
  5846. default:
  5847. goto undefined;
  5848. }
  5849. break;
  5850. case 0x06:
  5851. case 0x07:
  5852. switch (op & 0x3f)
  5853. {
  5854. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  5855. tmp = BITMASK1(SHR_MACH);
  5856. break;
  5857. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  5858. tmp = BITMASK1(SHR_MACL);
  5859. break;
  5860. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  5861. tmp = BITMASK1(SHR_PR);
  5862. break;
  5863. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  5864. tmp = BITMASK2(SHR_SR, SHR_T);
  5865. opd->op = OP_LDC;
  5866. opd->cycles = 3;
  5867. break;
  5868. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  5869. tmp = BITMASK1(SHR_GBR);
  5870. opd->op = OP_LDC;
  5871. opd->cycles = 3;
  5872. break;
  5873. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  5874. tmp = BITMASK1(SHR_VBR);
  5875. opd->op = OP_LDC;
  5876. opd->cycles = 3;
  5877. break;
  5878. default:
  5879. goto undefined;
  5880. }
  5881. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5882. opd->dest = BITMASK1(GET_Rn()) | tmp;
  5883. break;
  5884. case 0x08:
  5885. case 0x09:
  5886. switch (GET_Fx())
  5887. {
  5888. case 0:
  5889. // SHLL2 Rn 0100nnnn00001000
  5890. // SHLR2 Rn 0100nnnn00001001
  5891. break;
  5892. case 1:
  5893. // SHLL8 Rn 0100nnnn00011000
  5894. // SHLR8 Rn 0100nnnn00011001
  5895. break;
  5896. case 2:
  5897. // SHLL16 Rn 0100nnnn00101000
  5898. // SHLR16 Rn 0100nnnn00101001
  5899. break;
  5900. default:
  5901. goto undefined;
  5902. }
  5903. opd->source = BITMASK1(GET_Rn());
  5904. opd->dest = BITMASK1(GET_Rn());
  5905. break;
  5906. case 0x0a:
  5907. switch (GET_Fx())
  5908. {
  5909. case 0: // LDS Rm,MACH 0100mmmm00001010
  5910. tmp = SHR_MACH;
  5911. break;
  5912. case 1: // LDS Rm,MACL 0100mmmm00011010
  5913. tmp = SHR_MACL;
  5914. break;
  5915. case 2: // LDS Rm,PR 0100mmmm00101010
  5916. tmp = SHR_PR;
  5917. break;
  5918. default:
  5919. goto undefined;
  5920. }
  5921. opd->op = OP_MOVE;
  5922. opd->source = BITMASK1(GET_Rn());
  5923. opd->dest = BITMASK1(tmp);
  5924. break;
  5925. case 0x0b:
  5926. switch (GET_Fx())
  5927. {
  5928. case 0: // JSR @Rm 0100mmmm00001011
  5929. opd->dest = BITMASK1(SHR_PR);
  5930. case 2: // JMP @Rm 0100mmmm00101011
  5931. opd->op = OP_BRANCH_R;
  5932. opd->rm = GET_Rn();
  5933. opd->source = BITMASK1(opd->rm);
  5934. opd->dest |= BITMASK1(SHR_PC);
  5935. opd->cycles = 2;
  5936. next_is_delay = 1;
  5937. if (!(opd->dest & BITMASK1(SHR_PR)))
  5938. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5939. else
  5940. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5941. break;
  5942. case 1: // TAS.B @Rn 0100nnnn00011011
  5943. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5944. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  5945. opd->cycles = 4;
  5946. break;
  5947. default:
  5948. goto undefined;
  5949. }
  5950. break;
  5951. case 0x0e:
  5952. switch (GET_Fx())
  5953. {
  5954. case 0: // LDC Rm,SR 0100mmmm00001110
  5955. tmp = BITMASK2(SHR_SR, SHR_T);
  5956. break;
  5957. case 1: // LDC Rm,GBR 0100mmmm00011110
  5958. tmp = BITMASK1(SHR_GBR);
  5959. break;
  5960. case 2: // LDC Rm,VBR 0100mmmm00101110
  5961. tmp = BITMASK1(SHR_VBR);
  5962. break;
  5963. default:
  5964. goto undefined;
  5965. }
  5966. opd->op = OP_LDC;
  5967. opd->source = BITMASK1(GET_Rn());
  5968. opd->dest = tmp;
  5969. break;
  5970. case 0x0f:
  5971. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  5972. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5973. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5974. opd->cycles = 3;
  5975. break;
  5976. default:
  5977. goto undefined;
  5978. }
  5979. break;
  5980. /////////////////////////////////////////////
  5981. case 0x05:
  5982. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  5983. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5984. opd->dest = BITMASK1(GET_Rn());
  5985. opd->imm = (op & 0x0f) * 4;
  5986. op_flags[i] |= OF_POLL_INSN;
  5987. break;
  5988. /////////////////////////////////////////////
  5989. case 0x06:
  5990. switch (op & 0x0f)
  5991. {
  5992. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  5993. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  5994. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  5995. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  5996. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5997. break;
  5998. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  5999. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  6000. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  6001. opd->dest = BITMASK1(GET_Rn());
  6002. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6003. op_flags[i] |= OF_POLL_INSN;
  6004. break;
  6005. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  6006. opd->source = BITMASK2(GET_Rm(), SHR_T);
  6007. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  6008. break;
  6009. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  6010. opd->op = OP_MOVE;
  6011. goto arith_rmrn;
  6012. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  6013. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  6014. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  6015. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  6016. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  6017. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  6018. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  6019. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  6020. arith_rmrn:
  6021. opd->source = BITMASK1(GET_Rm());
  6022. opd->dest = BITMASK1(GET_Rn());
  6023. break;
  6024. }
  6025. break;
  6026. /////////////////////////////////////////////
  6027. case 0x07:
  6028. // ADD #imm,Rn 0111nnnniiiiiiii
  6029. opd->source = opd->dest = BITMASK1(GET_Rn());
  6030. opd->imm = (s8)op;
  6031. break;
  6032. /////////////////////////////////////////////
  6033. case 0x08:
  6034. switch (op & 0x0f00)
  6035. {
  6036. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  6037. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  6038. opd->dest = BITMASK1(SHR_MEM);
  6039. opd->imm = (op & 0x0f);
  6040. break;
  6041. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  6042. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  6043. opd->dest = BITMASK1(SHR_MEM);
  6044. opd->imm = (op & 0x0f) * 2;
  6045. break;
  6046. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  6047. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6048. opd->dest = BITMASK1(SHR_R0);
  6049. opd->imm = (op & 0x0f);
  6050. op_flags[i] |= OF_POLL_INSN;
  6051. break;
  6052. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  6053. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6054. opd->dest = BITMASK1(SHR_R0);
  6055. opd->imm = (op & 0x0f) * 2;
  6056. op_flags[i] |= OF_POLL_INSN;
  6057. break;
  6058. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  6059. opd->source = BITMASK1(SHR_R0);
  6060. opd->dest = BITMASK1(SHR_T);
  6061. opd->imm = (s8)op;
  6062. break;
  6063. case 0x0d00: // BT/S label 10001101dddddddd
  6064. case 0x0f00: // BF/S label 10001111dddddddd
  6065. next_is_delay = 1;
  6066. // fallthrough
  6067. case 0x0900: // BT label 10001001dddddddd
  6068. case 0x0b00: // BF label 10001011dddddddd
  6069. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  6070. opd->source = BITMASK2(SHR_PC, SHR_T);
  6071. opd->dest = BITMASK1(SHR_PC);
  6072. opd->imm = ((signed int)(op << 24) >> 23);
  6073. opd->imm += pc + 4;
  6074. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  6075. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6076. break;
  6077. default:
  6078. goto undefined;
  6079. }
  6080. break;
  6081. /////////////////////////////////////////////
  6082. case 0x09:
  6083. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  6084. opd->op = OP_LOAD_POOL;
  6085. tmp = pc + 2;
  6086. if (op_flags[i] & OF_DELAY_OP) {
  6087. if (ops[i-1].op == OP_BRANCH)
  6088. tmp = ops[i-1].imm;
  6089. else if (ops[i-1].op != OP_BRANCH_N)
  6090. tmp = 0;
  6091. }
  6092. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6093. opd->dest = BITMASK1(GET_Rn());
  6094. if (tmp) {
  6095. opd->imm = tmp + 2 + (op & 0xff) * 2;
  6096. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6097. lowest_literal = opd->imm;
  6098. }
  6099. opd->size = 1;
  6100. break;
  6101. /////////////////////////////////////////////
  6102. case 0x0b:
  6103. // BSR label 1011dddddddddddd
  6104. opd->dest = BITMASK1(SHR_PR);
  6105. case 0x0a:
  6106. // BRA label 1010dddddddddddd
  6107. opd->op = OP_BRANCH;
  6108. opd->source = BITMASK1(SHR_PC);
  6109. opd->dest |= BITMASK1(SHR_PC);
  6110. opd->imm = ((signed int)(op << 20) >> 19);
  6111. opd->imm += pc + 4;
  6112. opd->cycles = 2;
  6113. next_is_delay = 1;
  6114. if (!(opd->dest & BITMASK1(SHR_PR))) {
  6115. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  6116. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6117. if (opd->imm <= pc)
  6118. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6119. } else
  6120. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6121. } else
  6122. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  6123. break;
  6124. /////////////////////////////////////////////
  6125. case 0x0c:
  6126. switch (op & 0x0f00)
  6127. {
  6128. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  6129. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  6130. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  6131. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  6132. opd->dest = BITMASK1(SHR_MEM);
  6133. opd->size = (op & 0x300) >> 8;
  6134. opd->imm = (op & 0xff) << opd->size;
  6135. break;
  6136. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  6137. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  6138. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  6139. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  6140. opd->dest = BITMASK1(SHR_R0);
  6141. opd->size = (op & 0x300) >> 8;
  6142. opd->imm = (op & 0xff) << opd->size;
  6143. op_flags[i] |= OF_POLL_INSN;
  6144. break;
  6145. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  6146. opd->op = OP_TRAPA;
  6147. opd->source = BITMASK4(SHR_SP, SHR_PC, SHR_SR, SHR_T);
  6148. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  6149. opd->imm = (op & 0xff);
  6150. opd->cycles = 8;
  6151. op_flags[i+1] |= OF_BTARGET;
  6152. break;
  6153. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  6154. opd->op = OP_MOVA;
  6155. tmp = pc + 2;
  6156. if (op_flags[i] & OF_DELAY_OP) {
  6157. if (ops[i-1].op == OP_BRANCH)
  6158. tmp = ops[i-1].imm;
  6159. else if (ops[i-1].op != OP_BRANCH_N)
  6160. tmp = 0;
  6161. }
  6162. opd->dest = BITMASK1(SHR_R0);
  6163. if (tmp) {
  6164. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6165. if (opd->imm >= base_pc) {
  6166. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6167. lowest_mova = opd->imm;
  6168. }
  6169. }
  6170. break;
  6171. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  6172. opd->source = BITMASK1(SHR_R0);
  6173. opd->dest = BITMASK1(SHR_T);
  6174. opd->imm = op & 0xff;
  6175. break;
  6176. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  6177. opd->source = opd->dest = BITMASK1(SHR_R0);
  6178. opd->imm = op & 0xff;
  6179. break;
  6180. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  6181. opd->source = opd->dest = BITMASK1(SHR_R0);
  6182. opd->imm = op & 0xff;
  6183. break;
  6184. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  6185. opd->source = opd->dest = BITMASK1(SHR_R0);
  6186. opd->imm = op & 0xff;
  6187. break;
  6188. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  6189. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6190. opd->dest = BITMASK1(SHR_T);
  6191. opd->imm = op & 0xff;
  6192. op_flags[i] |= OF_POLL_INSN;
  6193. opd->cycles = 3;
  6194. break;
  6195. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  6196. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  6197. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  6198. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6199. opd->dest = BITMASK1(SHR_MEM);
  6200. opd->imm = op & 0xff;
  6201. opd->cycles = 3;
  6202. break;
  6203. default:
  6204. goto undefined;
  6205. }
  6206. break;
  6207. /////////////////////////////////////////////
  6208. case 0x0d:
  6209. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  6210. opd->op = OP_LOAD_POOL;
  6211. tmp = pc + 2;
  6212. if (op_flags[i] & OF_DELAY_OP) {
  6213. if (ops[i-1].op == OP_BRANCH)
  6214. tmp = ops[i-1].imm;
  6215. else if (ops[i-1].op != OP_BRANCH_N)
  6216. tmp = 0;
  6217. }
  6218. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6219. opd->dest = BITMASK1(GET_Rn());
  6220. if (tmp) {
  6221. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6222. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6223. lowest_literal = opd->imm;
  6224. }
  6225. opd->size = 2;
  6226. break;
  6227. /////////////////////////////////////////////
  6228. case 0x0e:
  6229. // MOV #imm,Rn 1110nnnniiiiiiii
  6230. opd->op = OP_LOAD_CONST;
  6231. opd->dest = BITMASK1(GET_Rn());
  6232. opd->imm = (s8)op;
  6233. break;
  6234. default:
  6235. undefined:
  6236. opd->op = OP_UNDEFINED;
  6237. // an unhandled instruction is probably not code if it's not the 1st insn
  6238. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  6239. goto end;
  6240. break;
  6241. }
  6242. if (op_flags[i] & OF_DELAY_OP) {
  6243. switch (opd->op) {
  6244. case OP_BRANCH:
  6245. case OP_BRANCH_N:
  6246. case OP_BRANCH_CT:
  6247. case OP_BRANCH_CF:
  6248. case OP_BRANCH_R:
  6249. case OP_BRANCH_RF:
  6250. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  6251. is_slave ? 's' : 'm', pc);
  6252. opd->op = OP_UNDEFINED;
  6253. op_flags[i] |= OF_B_IN_DS;
  6254. next_is_delay = 0;
  6255. break;
  6256. }
  6257. } else if (!is_divop && i_div >= 0)
  6258. i_div = -1; // divide parser stop
  6259. }
  6260. end:
  6261. i_end = i;
  6262. end_pc = pc;
  6263. // 2nd pass: some analysis
  6264. lowest_literal = end_literals = lowest_mova = 0;
  6265. t = T_UNKNOWN; // T flag state
  6266. last_btarget = 0;
  6267. op = 0; // delay/poll insns counter
  6268. is_divop = 0; // divide op insns counter
  6269. i_div = -1; // index of current divide op
  6270. for (i = 0, pc = base_pc; i < i_end; i++, pc += 2) {
  6271. opd = &ops[i];
  6272. crc += FETCH_OP(pc);
  6273. // propagate T (TODO: DIV0U)
  6274. if (op_flags[i] & OF_BTARGET)
  6275. t = T_UNKNOWN;
  6276. if ((opd->op == OP_BRANCH_CT && t == T_SET) ||
  6277. (opd->op == OP_BRANCH_CF && t == T_CLEAR)) {
  6278. opd->op = OP_BRANCH;
  6279. opd->cycles = (op_flags[i + 1] & OF_DELAY_OP) ? 2 : 3;
  6280. } else if ((opd->op == OP_BRANCH_CT && t == T_CLEAR) ||
  6281. (opd->op == OP_BRANCH_CF && t == T_SET))
  6282. opd->op = OP_BRANCH_N;
  6283. else if (OP_ISBRACND(opd->op))
  6284. t = (opd->op == OP_BRANCH_CF ? T_SET : T_CLEAR);
  6285. else if (opd->op == OP_SETCLRT)
  6286. t = (opd->imm ? T_SET : T_CLEAR);
  6287. else if (opd->dest & BITMASK1(SHR_T))
  6288. t = T_UNKNOWN;
  6289. // "overscan" detection: unreachable code after unconditional branch
  6290. // this can happen if the insn after a forward branch isn't a local target
  6291. if (OP_ISBRAUC(opd->op)) {
  6292. if (op_flags[i + 1] & OF_DELAY_OP) {
  6293. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  6294. i_end = i + 2;
  6295. } else {
  6296. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  6297. i_end = i + 1;
  6298. }
  6299. }
  6300. // divide operation verification:
  6301. // 1. there must not be a branch target inside
  6302. // 2. nothing is in a delay slot (could only be DIV0)
  6303. // 2. DIV0/n*(ROTCL+DIV1)/ROTCL:
  6304. // div.div1 > 0 && div.rotcl == div.div1+1 && div.rn =! div.ro
  6305. // 3. DIV0/n*DIV1/ROTCL:
  6306. // div.div1 > 0 && div.rotcl == 1 && div.ro == div.rn
  6307. if (i_div >= 0) {
  6308. if (op_flags[i] & OF_BTARGET) { // condition 1
  6309. ops[i_div].imm = 0;
  6310. i_div = -1;
  6311. } else if (--is_divop == 0)
  6312. i_div = -1;
  6313. } else if (opd->op == OP_DIV0) {
  6314. struct div *div = &div(opd);
  6315. is_divop = div->div1 + div->rotcl;
  6316. if (op_flags[i] & OF_DELAY_OP) // condition 2
  6317. opd->imm = 0;
  6318. else if (! div->div1 || ! ((div->ro == div->rn && div->rotcl == 1) ||
  6319. (div->ro != div->rn && div->rotcl == div->div1+1)))
  6320. opd->imm = 0; // condition 3+4
  6321. else if (is_divop)
  6322. i_div = i;
  6323. }
  6324. // literal pool size detection
  6325. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  6326. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6327. lowest_mova = opd->imm;
  6328. if (opd->op == OP_LOAD_POOL) {
  6329. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  6330. if (end_literals < opd->imm + opd->size * 2)
  6331. end_literals = opd->imm + opd->size * 2;
  6332. if (lowest_literal == 0 || lowest_literal > opd->imm)
  6333. lowest_literal = opd->imm;
  6334. if (opd->size == 2) {
  6335. // tweak for NFL: treat a 32bit literal as an address and check if it
  6336. // points to the literal space. In that case handle it like MOVA.
  6337. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  6338. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  6339. if (lowest_mova == 0 || tmp < lowest_mova)
  6340. lowest_mova = tmp;
  6341. }
  6342. }
  6343. }
  6344. #if LOOP_DETECTION
  6345. // inner loop detection
  6346. // 1. a loop always starts with a branch target (for the backwards jump)
  6347. // 2. it doesn't contain more than one polling and/or delaying insn
  6348. // 3. it doesn't contain unconditional jumps
  6349. // 4. no overlapping of loops
  6350. if (op_flags[i] & OF_BTARGET) {
  6351. last_btarget = i; // possible loop starting point
  6352. op = 0;
  6353. }
  6354. // XXX let's hope nobody is putting a delay or poll insn in a delay slot :-/
  6355. if (OP_ISBRAIMM(opd->op)) {
  6356. // BSR, BRA, BT, BF with immediate target
  6357. int i_tmp = (opd->imm - base_pc) / 2; // branch target, index in ops
  6358. if (i_tmp == last_btarget) // candidate for basic loop optimizer
  6359. op_flags[i_tmp] |= OF_BASIC_LOOP;
  6360. if (i_tmp == last_btarget && op <= 1) {
  6361. op_flags[i_tmp] |= OF_LOOP; // conditions met -> mark loop
  6362. last_btarget = i+1; // condition 4
  6363. } else if (opd->op == OP_BRANCH)
  6364. last_btarget = i+1; // condition 3
  6365. }
  6366. else if (OP_ISBRAIND(opd->op))
  6367. // BRAF, BSRF, JMP, JSR, register indirect. treat it as off-limits jump
  6368. last_btarget = i+1; // condition 3
  6369. else if (op_flags[i] & (OF_POLL_INSN|OF_DELAY_INSN))
  6370. op ++; // condition 2
  6371. #endif
  6372. }
  6373. end_pc = pc;
  6374. // end_literals is used to decide to inline a literal or not
  6375. // XXX: need better detection if this actually is used in write
  6376. if (lowest_literal >= base_pc) {
  6377. if (lowest_literal < end_pc) {
  6378. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  6379. // TODO: does this always mean end_pc covers data?
  6380. }
  6381. }
  6382. if (lowest_mova >= base_pc) {
  6383. if (lowest_mova < end_literals) {
  6384. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  6385. end_literals = lowest_mova;
  6386. }
  6387. if (lowest_mova < end_pc) {
  6388. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  6389. end_literals = end_pc;
  6390. }
  6391. }
  6392. if (lowest_literal >= end_literals)
  6393. lowest_literal = end_literals;
  6394. if (lowest_literal && end_literals)
  6395. for (pc = lowest_literal; pc < end_literals; pc += 2)
  6396. crc += FETCH_OP(pc);
  6397. *end_pc_out = end_pc;
  6398. if (base_literals_out != NULL)
  6399. *base_literals_out = (lowest_literal ? lowest_literal : end_pc);
  6400. if (end_literals_out != NULL)
  6401. *end_literals_out = (end_literals ? end_literals : end_pc);
  6402. // crc overflow handling, twice to collect all overflows
  6403. crc = (crc & 0xffff) + (crc >> 16);
  6404. crc = (crc & 0xffff) + (crc >> 16);
  6405. return crc;
  6406. }
  6407. // vim:shiftwidth=2:ts=2:expandtab