compiler.c 219 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. * (C) kub, 2018,2019,2020
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. *
  9. * notes:
  10. * - tcache, block descriptor, block entry buffer overflows result in oldest
  11. * blocks being deleted until enough space is available
  12. * - link and list element buffer overflows result in failure and exit
  13. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  14. * except jumps from global to CPU-local tcaches
  15. *
  16. * implemented:
  17. * - static register allocation
  18. * - remaining register caching and tracking in temporaries
  19. * - block-local branch linking
  20. * - block linking
  21. * - some constant propagation
  22. * - call stack caching for host block entry address
  23. * - delay, poll, and idle loop detection and handling
  24. * - some T/M flag optimizations where the value is known or isn't used
  25. *
  26. * TODO:
  27. * - better constant propagation
  28. * - bug fixing
  29. */
  30. #include <stddef.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <assert.h>
  34. #include <pico/pico_int.h>
  35. #include <pico/arm_features.h>
  36. #include "sh2.h"
  37. #include "compiler.h"
  38. #include "../drc/cmn.h"
  39. #include "../debug.h"
  40. // features
  41. #define PROPAGATE_CONSTANTS 1
  42. #define LINK_BRANCHES 1
  43. #define BRANCH_CACHE 1
  44. #define CALL_STACK 1
  45. #define ALIAS_REGISTERS 1
  46. #define REMAP_REGISTER 1
  47. #define LOOP_DETECTION 1
  48. #define LOOP_OPTIMIZER 1
  49. #define T_OPTIMIZER 1
  50. #define DIV_OPTIMIZER 0
  51. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  52. #define MAX_LOCAL_TARGETS (BLOCK_INSN_LIMIT / 4)
  53. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 2)
  54. // debug stuff
  55. // 01 - warnings/errors
  56. // 02 - block info/smc
  57. // 04 - asm
  58. // 08 - runtime block entry log
  59. // 10 - smc self-check
  60. // 20 - runtime block entry counter
  61. // 40 - rcache checking
  62. // 80 - branch cache statistics
  63. // 100 - write trace
  64. // 200 - compare trace
  65. // 400 - block entry backtrace on exit
  66. // 800 - state dump on exit
  67. #ifndef DRC_DEBUG
  68. #define DRC_DEBUG 0//x847
  69. #endif
  70. #if DRC_DEBUG
  71. #define dbg(l,...) { \
  72. if ((l) & DRC_DEBUG) \
  73. elprintf(EL_STATUS, ##__VA_ARGS__); \
  74. }
  75. #include "mame/sh2dasm.h"
  76. #include <platform/libpicofe/linux/host_dasm.h>
  77. static int insns_compiled, hash_collisions, host_insn_count;
  78. #define COUNT_OP \
  79. host_insn_count++
  80. #else // !DRC_DEBUG
  81. #define COUNT_OP
  82. #define dbg(...)
  83. #endif
  84. ///
  85. #define FETCH_OP(pc) \
  86. dr_pc_base[(pc) / 2]
  87. #define FETCH32(a) \
  88. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  89. #define CHECK_UNHANDLED_BITS(mask, label) { \
  90. if ((op & (mask)) != 0) \
  91. goto label; \
  92. }
  93. #define GET_Fx() \
  94. ((op >> 4) & 0x0f)
  95. #define GET_Rm GET_Fx
  96. #define GET_Rn() \
  97. ((op >> 8) & 0x0f)
  98. #define T 0x00000001
  99. #define S 0x00000002
  100. #define I 0x000000f0
  101. #define Q 0x00000100
  102. #define M 0x00000200
  103. #define T_save 0x00000800
  104. #define I_SHIFT 4
  105. #define Q_SHIFT 8
  106. #define M_SHIFT 9
  107. #define T_SHIFT 11
  108. static struct op_data {
  109. u8 op;
  110. u8 cycles;
  111. u8 size; // 0, 1, 2 - byte, word, long
  112. s8 rm; // branch or load/store data reg
  113. u32 source; // bitmask of src regs
  114. u32 dest; // bitmask of dest regs
  115. u32 imm; // immediate/io address/branch target
  116. // (for literal - address, not value)
  117. } ops[BLOCK_INSN_LIMIT];
  118. enum op_types {
  119. OP_UNHANDLED = 0,
  120. OP_BRANCH,
  121. OP_BRANCH_N, // conditional known not to be taken
  122. OP_BRANCH_CT, // conditional, branch if T set
  123. OP_BRANCH_CF, // conditional, branch if T clear
  124. OP_BRANCH_R, // indirect
  125. OP_BRANCH_RF, // indirect far (PC + Rm)
  126. OP_SETCLRT, // T flag set/clear
  127. OP_MOVE, // register move
  128. OP_LOAD_CONST,// load const to register
  129. OP_LOAD_POOL, // literal pool load, imm is address
  130. OP_MOVA, // MOVA instruction
  131. OP_SLEEP, // SLEEP instruction
  132. OP_RTE, // RTE instruction
  133. OP_TRAPA, // TRAPA instruction
  134. OP_LDC, // LDC instruction
  135. OP_DIV0, // DIV0[US] instruction
  136. OP_UNDEFINED,
  137. };
  138. struct div {
  139. u32 state:1; // 0: expect DIV1/ROTCL, 1: expect DIV1
  140. u32 rn:5, rm:5, ro:5; // rn and rm for DIV1, ro for ROTCL
  141. u32 div1:8, rotcl:8; // DIV1 count, ROTCL count
  142. };
  143. union _div { u32 imm; struct div div; }; // XXX tut-tut type punning...
  144. #define div(opd) ((union _div *)&((opd)->imm))->div
  145. // XXX consider trap insns: OP_TRAPA, OP_UNDEFINED?
  146. #define OP_ISBRANCH(op) ((BITRANGE(OP_BRANCH, OP_BRANCH_RF)| BITMASK1(OP_RTE)) \
  147. & BITMASK1(op))
  148. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  149. & BITMASK1(op))
  150. #define OP_ISBRACND(op) (BITMASK2(OP_BRANCH_CT, OP_BRANCH_CF) \
  151. & BITMASK1(op))
  152. #define OP_ISBRAIMM(op) (BITMASK3(OP_BRANCH, OP_BRANCH_CT, OP_BRANCH_CF) \
  153. & BITMASK1(op))
  154. #define OP_ISBRAIND(op) (BITMASK3(OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  155. & BITMASK1(op))
  156. #ifdef DRC_SH2
  157. #if (DRC_DEBUG & 4)
  158. static u8 *tcache_dsm_ptrs[3];
  159. static char sh2dasm_buff[64];
  160. #define do_host_disasm(tcid) \
  161. host_dasm(tcache_dsm_ptrs[tcid], emith_insn_ptr() - tcache_dsm_ptrs[tcid]); \
  162. tcache_dsm_ptrs[tcid] = emith_insn_ptr()
  163. #else
  164. #define do_host_disasm(x)
  165. #endif
  166. #define SH2_DUMP(sh2, reason) { \
  167. char ms = (sh2)->is_slave ? 's' : 'm'; \
  168. printf("%csh2 %s %08lx\n", ms, reason, (ulong)(sh2)->pc); \
  169. printf("%csh2 r0-7 %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", ms, \
  170. (ulong)(sh2)->r[0], (ulong)(sh2)->r[1], (ulong)(sh2)->r[2], (ulong)(sh2)->r[3], \
  171. (ulong)(sh2)->r[4], (ulong)(sh2)->r[5], (ulong)(sh2)->r[6], (ulong)(sh2)->r[7]); \
  172. printf("%csh2 r8-15 %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", ms, \
  173. (ulong)(sh2)->r[8], (ulong)(sh2)->r[9], (ulong)(sh2)->r[10], (ulong)(sh2)->r[11], \
  174. (ulong)(sh2)->r[12], (ulong)(sh2)->r[13], (ulong)(sh2)->r[14], (ulong)(sh2)->r[15]); \
  175. printf("%csh2 pc-ml %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", ms, \
  176. (ulong)(sh2)->pc, (ulong)(sh2)->ppc, (ulong)(sh2)->pr, (ulong)(sh2)->sr&0xfff, \
  177. (ulong)(sh2)->gbr, (ulong)(sh2)->vbr, (ulong)(sh2)->mach, (ulong)(sh2)->macl); \
  178. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08lx %08x %08x\n", ms, \
  179. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  180. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  181. (ulong)(sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  182. }
  183. #if (DRC_DEBUG & (256|512|1024))
  184. static SH2 csh2[2][8];
  185. static FILE *trace[2];
  186. static int topen[2];
  187. #endif
  188. #if (DRC_DEBUG & 8)
  189. static u32 lastpc, lastcnt;
  190. static void *lastblock;
  191. #endif
  192. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  193. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  194. {
  195. if (block != NULL) {
  196. #if defined PDB
  197. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave?'s':'m',
  198. sh2->pc, block, ((signed int)sr >> 12)+1);
  199. pdb_step(sh2, sh2->pc);
  200. #elif (DRC_DEBUG & 8)
  201. if (lastpc != sh2->pc) {
  202. if (lastcnt)
  203. dbg(8, "= %csh2 enter %08x %p (%d times), c=%d", sh2->is_slave?'s':'m',
  204. lastpc, lastblock, lastcnt, (signed int)sr >> 12);
  205. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave?'s':'m',
  206. sh2->pc, block, (signed int)sr >> 12);
  207. lastpc = sh2->pc;
  208. lastblock = block;
  209. lastcnt = 0;
  210. } else
  211. lastcnt++;
  212. #elif (DRC_DEBUG & 256)
  213. {
  214. static SH2 fsh2;
  215. int idx = sh2->is_slave;
  216. if (!trace[0] && !topen[0]++) {
  217. trace[0] = fopen("pico.trace0", "wb");
  218. trace[1] = fopen("pico.trace1", "wb");
  219. }
  220. if (trace[idx] && csh2[idx][0].pc != sh2->pc) {
  221. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  222. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  223. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  224. csh2[idx][0].is_slave = idx;
  225. }
  226. }
  227. #elif (DRC_DEBUG & 512)
  228. {
  229. static SH2 fsh2;
  230. int idx = sh2->is_slave;
  231. if (!trace[0] && !topen[0]++) {
  232. trace[0] = fopen("pico.trace0", "rb");
  233. trace[1] = fopen("pico.trace1", "rb");
  234. }
  235. if (trace[idx] && csh2[idx][0].pc != sh2->pc) {
  236. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  237. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  238. printf("trace eof at %08lx\n",ftell(trace[idx]));
  239. exit(1);
  240. }
  241. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  242. fsh2.is_slave = idx;
  243. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  244. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  245. {
  246. printf("difference at %08lx!\n",ftell(trace[idx]));
  247. SH2_DUMP(&fsh2, "file");
  248. SH2_DUMP(sh2, "current");
  249. SH2_DUMP(&csh2[idx][0], "previous");
  250. char *ps = (char *)sh2, *pf = (char *)&fsh2;
  251. for (idx = 0; idx < offsetof(SH2, read8_map); idx += sizeof(u32))
  252. if (*(u32 *)(ps+idx) != *(u32 *)(pf+idx))
  253. printf("diff reg %ld\n",(long)idx/sizeof(u32));
  254. exit(1);
  255. }
  256. csh2[idx][0] = fsh2;
  257. }
  258. }
  259. #elif (DRC_DEBUG & 1024)
  260. {
  261. int x = sh2->is_slave, i;
  262. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  263. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  264. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  265. csh2[x][0].is_slave = x;
  266. }
  267. #endif
  268. }
  269. return block;
  270. }
  271. #endif
  272. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  273. // BIOS shares tcache with data array because it's only used for init
  274. // and can be discarded early
  275. #define TCACHE_BUFFERS 3
  276. struct ring_buffer {
  277. u8 *base; // ring buffer memory
  278. unsigned item_sz; // size of one buffer item
  279. unsigned size; // number of itmes in ring
  280. int first, next; // read and write pointers
  281. int used; // number of used items in ring
  282. };
  283. enum { BL_JMP=1, BL_LDJMP, BL_JCCBLX };
  284. struct block_link {
  285. short tcache_id;
  286. short type; // BL_JMP et al
  287. u32 target_pc;
  288. void *jump; // insn address
  289. void *blx; // block link/exit area if any
  290. u8 jdisp[12]; // jump backup buffer
  291. struct block_link *next; // either in block_entry->links or unresolved
  292. struct block_link *o_next; // ...in block_entry->o_links
  293. struct block_link *prev;
  294. struct block_link *o_prev;
  295. struct block_entry *target;// target block this is linked in (be->links)
  296. };
  297. struct block_entry {
  298. u32 pc;
  299. u8 *tcache_ptr; // translated block for above PC
  300. struct block_entry *next; // chain in hash_table with same pc hash
  301. struct block_entry *prev;
  302. struct block_link *links; // incoming links to this entry
  303. struct block_link *o_links;// outgoing links from this entry
  304. #if (DRC_DEBUG & 2)
  305. struct block_desc *block;
  306. #endif
  307. #if (DRC_DEBUG & 32)
  308. int entry_count;
  309. #endif
  310. };
  311. struct block_desc {
  312. u32 addr; // block start SH2 PC address
  313. u32 addr_lit; // block start SH2 literal pool addr
  314. int size; // ..of recompiled insns
  315. int size_lit; // ..of (insns+)literal pool
  316. u8 *tcache_ptr; // start address of block in cache
  317. u16 crc; // crc of insns and literals
  318. u16 active; // actively used or deactivated?
  319. struct block_list *list;
  320. #if (DRC_DEBUG & 2)
  321. int refcount;
  322. #endif
  323. int entry_count;
  324. struct block_entry *entryp;
  325. };
  326. struct block_list {
  327. struct block_desc *block; // block reference
  328. struct block_list *next; // pointers for doubly linked list
  329. struct block_list *prev;
  330. struct block_list **head; // list head (for removing from list)
  331. struct block_list *l_next;
  332. };
  333. static u8 *tcache_ptr; // ptr for code emitters
  334. // XXX: need to tune sizes
  335. static struct ring_buffer tcache_ring[TCACHE_BUFFERS];
  336. static const int tcache_sizes[TCACHE_BUFFERS] = {
  337. DRC_TCACHE_SIZE * 30 / 32, // ROM (rarely used), DRAM
  338. DRC_TCACHE_SIZE / 32, // BIOS, data array in master sh2
  339. DRC_TCACHE_SIZE / 32, // ... slave
  340. };
  341. #define BLOCK_MAX_COUNT(tcid) ((tcid) ? 256 : 32*256)
  342. static struct ring_buffer block_ring[TCACHE_BUFFERS];
  343. static struct block_desc *block_tables[TCACHE_BUFFERS];
  344. #define ENTRY_MAX_COUNT(tcid) ((tcid) ? 8*512 : 256*512)
  345. static struct ring_buffer entry_ring[TCACHE_BUFFERS];
  346. static struct block_entry *entry_tables[TCACHE_BUFFERS];
  347. // we have block_link_pool to avoid using mallocs
  348. #define BLOCK_LINK_MAX_COUNT(tcid) ((tcid) ? 512 : 32*512)
  349. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  350. static int block_link_pool_counts[TCACHE_BUFFERS];
  351. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  352. static struct block_link *blink_free[TCACHE_BUFFERS];
  353. // used for invalidation
  354. #define RAM_SIZE(tcid) ((tcid) ? 0x1000 : 0x40000)
  355. #define INVAL_PAGE_SIZE 0x100
  356. static struct block_list *inactive_blocks[TCACHE_BUFFERS];
  357. // array of pointers to block_lists for RAM and 2 data arrays
  358. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  359. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  360. #define HASH_TABLE_SIZE(tcid) ((tcid) ? 512 : 32*512)
  361. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  362. #define HASH_FUNC(hash_tab, addr, mask) \
  363. (hash_tab)[((addr) >> 1) & (mask)]
  364. #define BLOCK_LIST_MAX_COUNT (64*1024)
  365. static struct block_list *block_list_pool;
  366. static int block_list_pool_count;
  367. static struct block_list *blist_free;
  368. #if (DRC_DEBUG & 128)
  369. #if BRANCH_CACHE
  370. int bchit, bcmiss;
  371. #endif
  372. #if CALL_STACK
  373. int rchit, rcmiss;
  374. #endif
  375. #endif
  376. // host register tracking
  377. enum cache_reg_htype {
  378. HRT_TEMP = 1, // is for temps and args
  379. HRT_REG = 2, // is for sh2 regs
  380. };
  381. enum cache_reg_flags {
  382. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  383. HRF_PINNED = 1 << 1, // has a pinned mapping
  384. HRF_S16 = 1 << 2, // has a sign extended 16 bit value
  385. HRF_U16 = 1 << 3, // has a zero extended 16 bit value
  386. };
  387. enum cache_reg_type {
  388. HR_FREE,
  389. HR_CACHED, // vreg has sh2_reg_e
  390. HR_TEMP, // reg used for temp storage
  391. };
  392. typedef struct {
  393. u8 hreg:6; // "host" reg
  394. u8 htype:2; // TEMP or REG?
  395. u8 flags:4; // DIRTY, PINNED?
  396. u8 type:2; // CACHED or TEMP?
  397. u8 locked:2; // LOCKED reference counter
  398. u16 stamp; // kind of a timestamp
  399. u32 gregs; // "guest" reg mask
  400. } cache_reg_t;
  401. // guest register tracking
  402. enum guest_reg_flags {
  403. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  404. GRF_CONST = 1 << 1, // reg has a constant
  405. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  406. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  407. GRF_PINNED = 1 << 4, // reg has pinned mapping to vreg
  408. };
  409. typedef struct {
  410. u8 flags; // guest flags: is constant, is dirty?
  411. s8 sreg; // cache reg for static mapping
  412. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  413. s8 cnst; // const index if this is constant
  414. } guest_reg_t;
  415. // possibly needed in code emitter
  416. static int rcache_get_tmp(void);
  417. static void rcache_free_tmp(int hr);
  418. // Note: Register assignment goes by ABI convention. Caller save registers are
  419. // TEMPORARY, callee save registers are PRESERVED. Unusable regs are omitted.
  420. // there must be at least the free (not context or statically mapped) amount of
  421. // PRESERVED/TEMPORARY registers used by handlers in worst case (currently 4).
  422. // there must be at least 3 PARAM, and PARAM+TEMPORARY must be at least 4.
  423. // SR must and R0 should by all means be statically mapped.
  424. // XXX the static definition of SR MUST match that in compiler.h
  425. #if defined(__arm__) || defined(_M_ARM)
  426. #include "../drc/emit_arm.c"
  427. #elif defined(__aarch64__) || defined(_M_ARM64)
  428. #include "../drc/emit_arm64.c"
  429. #elif defined(__mips__)
  430. #include "../drc/emit_mips.c"
  431. #elif defined(__riscv__) || defined(__riscv)
  432. #include "../drc/emit_riscv.c"
  433. #elif defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)
  434. #include "../drc/emit_ppc.c"
  435. #elif defined(__i386__) || defined(_M_X86)
  436. #include "../drc/emit_x86.c"
  437. #elif defined(__x86_64__) || defined(_M_X64)
  438. #include "../drc/emit_x86.c"
  439. #else
  440. #error unsupported arch
  441. #endif
  442. static const signed char hregs_param[] = PARAM_REGS;
  443. static const signed char hregs_temp [] = TEMPORARY_REGS;
  444. static const signed char hregs_saved[] = PRESERVED_REGS;
  445. static const signed char regs_static[] = STATIC_SH2_REGS;
  446. #define CACHE_REGS \
  447. (ARRAY_SIZE(hregs_param)+ARRAY_SIZE(hregs_temp)+ARRAY_SIZE(hregs_saved)-1)
  448. static cache_reg_t cache_regs[CACHE_REGS];
  449. static signed char reg_map_host[HOST_REGS];
  450. static guest_reg_t guest_regs[SH2_REGS];
  451. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  452. static void REGPARM(1) (*sh2_drc_dispatcher)(u32 pc);
  453. #if CALL_STACK
  454. static u32 REGPARM(2) (*sh2_drc_dispatcher_call)(u32 pc);
  455. static void REGPARM(1) (*sh2_drc_dispatcher_return)(u32 pc);
  456. #endif
  457. static void REGPARM(1) (*sh2_drc_exit)(u32 pc);
  458. static void (*sh2_drc_test_irq)(void);
  459. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  460. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  461. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  462. static u32 REGPARM(1) (*sh2_drc_read8_poll)(u32 a);
  463. static u32 REGPARM(1) (*sh2_drc_read16_poll)(u32 a);
  464. static u32 REGPARM(1) (*sh2_drc_read32_poll)(u32 a);
  465. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  466. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  467. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  468. #ifdef DRC_SR_REG
  469. void REGPARM(1) (*sh2_drc_save_sr)(SH2 *sh2);
  470. void REGPARM(1) (*sh2_drc_restore_sr)(SH2 *sh2);
  471. #endif
  472. // flags for memory access
  473. #define MF_SIZEMASK 0x03 // size of access
  474. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  475. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  476. #define MF_POLLING 0x20 // include polling check in read
  477. // address space stuff
  478. static int dr_is_rom(u32 a)
  479. {
  480. // tweak for WWF Raw which writes data to some high ROM addresses
  481. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  482. }
  483. static int dr_ctx_get_mem_ptr(SH2 *sh2, u32 a, u32 *mask)
  484. {
  485. void *memptr;
  486. int poffs = -1;
  487. // check if region is mapped memory
  488. memptr = p32x_sh2_get_mem_ptr(a, mask, sh2);
  489. if (memptr == NULL)
  490. return poffs;
  491. if (memptr == sh2->p_bios) // BIOS
  492. poffs = offsetof(SH2, p_bios);
  493. else if (memptr == sh2->p_da) // data array
  494. poffs = offsetof(SH2, p_da);
  495. else if (memptr == sh2->p_sdram) // SDRAM
  496. poffs = offsetof(SH2, p_sdram);
  497. else if (memptr == sh2->p_rom) // ROM
  498. poffs = offsetof(SH2, p_rom);
  499. return poffs;
  500. }
  501. static int dr_get_tcache_id(u32 pc, int is_slave)
  502. {
  503. u32 tcid = 0;
  504. if ((pc & 0xe0000000) == 0xc0000000)
  505. tcid = 1 + is_slave; // data array
  506. if ((pc & ~0xfff) == 0)
  507. tcid = 1 + is_slave; // BIOS
  508. return tcid;
  509. }
  510. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  511. {
  512. struct block_entry *be;
  513. *tcache_id = dr_get_tcache_id(pc, is_slave);
  514. be = HASH_FUNC(hash_tables[*tcache_id], pc, HASH_TABLE_SIZE(*tcache_id) - 1);
  515. if (be != NULL) // don't ask... gcc code generation hint
  516. for (; be != NULL; be = be->next)
  517. if (be->pc == pc)
  518. return be;
  519. return NULL;
  520. }
  521. // ---------------------------------------------------------------
  522. // ring buffer management
  523. #define RING_INIT(r,m,n) *(r) = (struct ring_buffer) { .base = (u8 *)m, \
  524. .item_sz = sizeof(*(m)), .size = n };
  525. static void *ring_alloc(struct ring_buffer *rb, int count)
  526. {
  527. // allocate space in ring buffer
  528. void *p;
  529. p = rb->base + rb->next * rb->item_sz;
  530. if (rb->next+count > rb->size) {
  531. rb->used += rb->size - rb->next;
  532. p = rb->base; // wrap if overflow at end
  533. rb->next = count;
  534. } else {
  535. rb->next += count;
  536. if (rb->next == rb->size) rb->next = 0;
  537. }
  538. rb->used += count;
  539. return p;
  540. }
  541. static void ring_wrap(struct ring_buffer *rb)
  542. {
  543. // insufficient space at end of buffer memory, wrap around
  544. rb->used += rb->size - rb->next;
  545. rb->next = 0;
  546. }
  547. static void ring_free(struct ring_buffer *rb, int count)
  548. {
  549. // free oldest space in ring buffer
  550. rb->first += count;
  551. if (rb->first >= rb->size) rb->first -= rb->size;
  552. rb->used -= count;
  553. }
  554. static void ring_free_p(struct ring_buffer *rb, void *p)
  555. {
  556. // free ring buffer space upto given pointer
  557. rb->first = ((u8 *)p - rb->base) / rb->item_sz;
  558. rb->used = rb->next - rb->first;
  559. if (rb->used < 0) rb->used += rb->size;
  560. }
  561. static void *ring_reset(struct ring_buffer *rb)
  562. {
  563. // reset to initial state
  564. rb->first = rb->next = rb->used = 0;
  565. return rb->base + rb->next * rb->item_sz;
  566. }
  567. static void *ring_first(struct ring_buffer *rb)
  568. {
  569. return rb->base + rb->first * rb->item_sz;
  570. }
  571. static void *ring_next(struct ring_buffer *rb)
  572. {
  573. return rb->base + rb->next * rb->item_sz;
  574. }
  575. // block management
  576. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  577. {
  578. struct block_list *added;
  579. if (blist_free) {
  580. added = blist_free;
  581. blist_free = added->next;
  582. } else if (block_list_pool_count >= BLOCK_LIST_MAX_COUNT) {
  583. printf( "block list overflow\n");
  584. exit(1);
  585. } else {
  586. added = block_list_pool + block_list_pool_count;
  587. block_list_pool_count ++;
  588. }
  589. added->block = block;
  590. added->l_next = block->list;
  591. block->list = added;
  592. added->head = blist;
  593. added->prev = NULL;
  594. if (*blist)
  595. (*blist)->prev = added;
  596. added->next = *blist;
  597. *blist = added;
  598. }
  599. static void rm_from_block_lists(struct block_desc *block)
  600. {
  601. struct block_list *entry;
  602. entry = block->list;
  603. while (entry != NULL) {
  604. if (entry->prev != NULL)
  605. entry->prev->next = entry->next;
  606. else
  607. *(entry->head) = entry->next;
  608. if (entry->next != NULL)
  609. entry->next->prev = entry->prev;
  610. entry->next = blist_free;
  611. blist_free = entry;
  612. entry = entry->l_next;
  613. }
  614. block->list = NULL;
  615. }
  616. static void discard_block_list(struct block_list **blist)
  617. {
  618. struct block_list *next, *current = *blist;
  619. while (current != NULL) {
  620. next = current->next;
  621. current->next = blist_free;
  622. blist_free = current;
  623. current = next;
  624. }
  625. *blist = NULL;
  626. }
  627. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  628. {
  629. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  630. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  631. be->prev = NULL;
  632. if (*head)
  633. (*head)->prev = be;
  634. be->next = *head;
  635. *head = be;
  636. #if (DRC_DEBUG & 2)
  637. if (be->next != NULL) {
  638. printf(" %08lx@%p: entry hash collision with %08lx@%p\n",
  639. (ulong)be->pc, be->tcache_ptr, (ulong)be->next->pc, be->next->tcache_ptr);
  640. hash_collisions++;
  641. }
  642. #endif
  643. }
  644. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  645. {
  646. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  647. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  648. #if DRC_DEBUG & 1
  649. struct block_entry *current = be;
  650. while (current->prev != NULL)
  651. current = current->prev;
  652. if (current != *head)
  653. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  654. #endif
  655. if (be->prev != NULL)
  656. be->prev->next = be->next;
  657. else
  658. *head = be->next;
  659. if (be->next != NULL)
  660. be->next->prev = be->prev;
  661. }
  662. #if LINK_BRANCHES
  663. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  664. {
  665. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  666. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  667. #if DRC_DEBUG & 1
  668. struct block_link *current = *head;
  669. while (current != NULL && current != bl)
  670. current = current->next;
  671. if (current == bl)
  672. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  673. #endif
  674. bl->target = NULL; // marker for not resolved
  675. bl->prev = NULL;
  676. if (*head)
  677. (*head)->prev = bl;
  678. bl->next = *head;
  679. *head = bl;
  680. }
  681. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  682. {
  683. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  684. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  685. #if DRC_DEBUG & 1
  686. struct block_link *current = bl;
  687. while (current->prev != NULL)
  688. current = current->prev;
  689. if (current != *head)
  690. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  691. #endif
  692. if (bl->prev != NULL)
  693. bl->prev->next = bl->next;
  694. else
  695. *head = bl->next;
  696. if (bl->next != NULL)
  697. bl->next->prev = bl->prev;
  698. }
  699. static void dr_block_link(struct block_entry *be, struct block_link *bl, int emit_jump)
  700. {
  701. dbg(2, "- %slink from %p to pc %08x entry %p", emit_jump ? "":"early ",
  702. bl->jump, bl->target_pc, be->tcache_ptr);
  703. if (emit_jump) {
  704. u8 *jump = bl->jump;
  705. int jsz = emith_jump_patch_size();
  706. if (bl->type == BL_JMP) { // patch: jump @entry
  707. // inlined: @jump far jump to target
  708. emith_jump_patch(jump, be->tcache_ptr, &jump);
  709. } else if (bl->type == BL_LDJMP) { // write: jump @entry
  710. // inlined: @jump far jump to target
  711. emith_jump_at(jump, be->tcache_ptr);
  712. jsz = emith_jump_at_size();
  713. } else if (bl->type == BL_JCCBLX) { // patch: jump cond -> jump @entry
  714. if (emith_jump_patch_inrange(bl->jump, be->tcache_ptr)) {
  715. // inlined: @jump near jumpcc to target
  716. emith_jump_patch(jump, be->tcache_ptr, &jump);
  717. } else { // dispatcher cond immediate
  718. // via blx: @jump near jumpcc to blx; @blx far jump
  719. emith_jump_patch(jump, bl->blx, &jump);
  720. emith_jump_at(bl->blx, be->tcache_ptr);
  721. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(),
  722. ((uintptr_t)bl->blx & 0x1f) + emith_jump_at_size()-1 > 0x1f);
  723. }
  724. } else {
  725. printf("unknown BL type %d\n", bl->type);
  726. exit(1);
  727. }
  728. host_instructions_updated(jump, jump + jsz, ((uintptr_t)jump & 0x1f) + jsz-1 > 0x1f);
  729. }
  730. // move bl to block_entry
  731. bl->target = be;
  732. bl->prev = NULL;
  733. if (be->links)
  734. be->links->prev = bl;
  735. bl->next = be->links;
  736. be->links = bl;
  737. }
  738. static void dr_block_unlink(struct block_link *bl, int emit_jump)
  739. {
  740. dbg(2,"- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  741. if (bl->target) {
  742. if (emit_jump) {
  743. u8 *jump = bl->jump;
  744. int jsz = emith_jump_patch_size();
  745. if (bl->type == BL_JMP) { // jump_patch @dispatcher
  746. // inlined: @jump far jump to dispatcher
  747. emith_jump_patch(jump, sh2_drc_dispatcher, &jump);
  748. } else if (bl->type == BL_LDJMP) { // restore: load pc, jump @dispatcher
  749. // inlined: @jump load target_pc, far jump to dispatcher
  750. memcpy(jump, bl->jdisp, emith_jump_at_size());
  751. jsz = emith_jump_at_size();
  752. } else if (bl->type == BL_JCCBLX) { // jump cond @blx; @blx: load pc, jump
  753. // via blx: @jump near jumpcc to blx; @blx load target_pc, far jump
  754. emith_jump_patch(bl->jump, bl->blx, &jump);
  755. memcpy(bl->blx, bl->jdisp, emith_jump_at_size());
  756. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(), 1);
  757. } else {
  758. printf("unknown BL type %d\n", bl->type);
  759. exit(1);
  760. }
  761. // update cpu caches since the previous jump target doesn't exist anymore
  762. host_instructions_updated(jump, jump + jsz, 1);
  763. }
  764. if (bl->prev)
  765. bl->prev->next = bl->next;
  766. else
  767. bl->target->links = bl->next;
  768. if (bl->next)
  769. bl->next->prev = bl->prev;
  770. bl->target = NULL;
  771. }
  772. }
  773. #endif
  774. static struct block_link *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  775. {
  776. #if LINK_BRANCHES
  777. struct block_link *bl = block_link_pool[tcache_id];
  778. int cnt = block_link_pool_counts[tcache_id];
  779. int target_tcache_id;
  780. // get the target block entry
  781. target_tcache_id = dr_get_tcache_id(pc, is_slave);
  782. if (target_tcache_id && target_tcache_id != tcache_id)
  783. return NULL;
  784. // get a block link
  785. if (blink_free[tcache_id] != NULL) {
  786. bl = blink_free[tcache_id];
  787. blink_free[tcache_id] = bl->next;
  788. } else if (cnt >= BLOCK_LINK_MAX_COUNT(tcache_id)) {
  789. dbg(1, "bl overflow for tcache %d", tcache_id);
  790. return NULL;
  791. } else {
  792. bl += cnt;
  793. block_link_pool_counts[tcache_id] = cnt+1;
  794. }
  795. // prepare link and add to outgoing list of owner
  796. bl->tcache_id = tcache_id;
  797. bl->target_pc = pc;
  798. bl->jump = tcache_ptr;
  799. bl->blx = NULL;
  800. bl->o_next = owner->o_links;
  801. owner->o_links = bl;
  802. add_to_hashlist_unresolved(bl, tcache_id);
  803. return bl;
  804. #else
  805. return NULL;
  806. #endif
  807. }
  808. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  809. {
  810. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  811. u32 addr, end, mask = 0, shift = 0, idx;
  812. // mark memory blocks as containing compiled code
  813. if ((block->addr & 0xc7fc0000) == 0x06000000
  814. || (block->addr & 0xfffff000) == 0xc0000000)
  815. {
  816. if (tcache_id != 0) {
  817. // data array
  818. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  819. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  820. shift = SH2_DRCBLK_DA_SHIFT;
  821. }
  822. else {
  823. // SDRAM
  824. drc_ram_blk = Pico32xMem->drcblk_ram;
  825. lit_ram_blk = Pico32xMem->drclit_ram;
  826. shift = SH2_DRCBLK_RAM_SHIFT;
  827. }
  828. mask = RAM_SIZE(tcache_id) - 1;
  829. // mark recompiled insns
  830. addr = block->addr & ~((1 << shift) - 1);
  831. end = block->addr + block->size;
  832. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  833. drc_ram_blk[idx++] += mark;
  834. // mark literal pool
  835. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  836. addr = block->addr_lit & ~((1 << shift) - 1);
  837. end = block->addr_lit + block->size_lit;
  838. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  839. drc_ram_blk[idx++] += mark;
  840. // mark for literals disabled
  841. if (nolit) {
  842. addr = nolit & ~((1 << shift) - 1);
  843. end = block->addr_lit + block->size_lit;
  844. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  845. lit_ram_blk[idx++] = 1;
  846. }
  847. if (mark < 0)
  848. rm_from_block_lists(block);
  849. else {
  850. // add to invalidation lookup lists
  851. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  852. end = block->addr + block->size;
  853. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  854. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  855. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  856. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  857. end = block->addr_lit + block->size_lit;
  858. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  859. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  860. }
  861. }
  862. }
  863. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  864. {
  865. u8 *lit_ram_blk = NULL;
  866. u32 mask = 0, shift = 0, addr, idx;
  867. if ((start & 0xc7fc0000) == 0x06000000
  868. || (start & 0xfffff000) == 0xc0000000)
  869. {
  870. if (tcache_id != 0) {
  871. // data array
  872. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  873. shift = SH2_DRCBLK_DA_SHIFT;
  874. }
  875. else {
  876. // SDRAM
  877. lit_ram_blk = Pico32xMem->drclit_ram;
  878. shift = SH2_DRCBLK_RAM_SHIFT;
  879. }
  880. mask = RAM_SIZE(tcache_id) - 1;
  881. addr = start & ~((1 << shift) - 1);
  882. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  883. if (lit_ram_blk[idx++])
  884. break;
  885. return (addr < start ? start : addr > end ? end : addr);
  886. }
  887. return end;
  888. }
  889. static void dr_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free)
  890. {
  891. struct block_link *bl;
  892. u32 i;
  893. free = free || nolit; // block is invalid if literals are overwritten
  894. dbg(2," %sing block %08x-%08x,%08x-%08x, blkid %d,%d", free?"delet":"disabl",
  895. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  896. tcache_id, bd - block_tables[tcache_id]);
  897. if (bd->addr == 0 || bd->entry_count == 0) {
  898. dbg(1, " killing dead block!? %08x", bd->addr);
  899. return;
  900. }
  901. // remove from hash table, make incoming links unresolved
  902. if (bd->active) {
  903. for (i = 0; i < bd->entry_count; i++) {
  904. rm_from_hashlist(&bd->entryp[i], tcache_id);
  905. #if LINK_BRANCHES
  906. while ((bl = bd->entryp[i].links) != NULL) {
  907. dr_block_unlink(bl, 1);
  908. add_to_hashlist_unresolved(bl, tcache_id);
  909. }
  910. #endif
  911. }
  912. dr_mark_memory(-1, bd, tcache_id, nolit);
  913. add_to_block_list(&inactive_blocks[tcache_id], bd);
  914. }
  915. bd->active = 0;
  916. if (free) {
  917. #if LINK_BRANCHES
  918. // revoke outgoing links
  919. for (bl = bd->entryp[0].o_links; bl != NULL; bl = bl->o_next) {
  920. if (bl->target)
  921. dr_block_unlink(bl, 0);
  922. else
  923. rm_from_hashlist_unresolved(bl, tcache_id);
  924. bl->jump = NULL;
  925. bl->next = blink_free[bl->tcache_id];
  926. blink_free[bl->tcache_id] = bl;
  927. }
  928. bd->entryp[0].o_links = NULL;
  929. #endif
  930. // invalidate block
  931. rm_from_block_lists(bd);
  932. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  933. bd->entry_count = 0;
  934. }
  935. emith_update_cache();
  936. }
  937. static struct block_desc *dr_find_inactive_block(int tcache_id, u16 crc,
  938. u32 addr, int size, u32 addr_lit, int size_lit)
  939. {
  940. struct block_list **head = &inactive_blocks[tcache_id];
  941. struct block_list *current;
  942. for (current = *head; current != NULL; current = current->next) {
  943. struct block_desc *block = current->block;
  944. if (block->crc == crc && block->addr == addr && block->size == size &&
  945. block->addr_lit == addr_lit && block->size_lit == size_lit)
  946. {
  947. rm_from_block_lists(block);
  948. return block;
  949. }
  950. }
  951. return NULL;
  952. }
  953. static struct block_desc *dr_add_block(int entries, u32 addr, int size,
  954. u32 addr_lit, int size_lit, u16 crc, int is_slave, int *blk_id)
  955. {
  956. struct block_entry *be;
  957. struct block_desc *bd;
  958. int tcache_id;
  959. // do a lookup to get tcache_id and override check
  960. be = dr_get_entry(addr, is_slave, &tcache_id);
  961. if (be != NULL)
  962. dbg(1, "block override for %08x", addr);
  963. if (block_ring[tcache_id].used + 1 > block_ring[tcache_id].size ||
  964. entry_ring[tcache_id].used + entries > entry_ring[tcache_id].size) {
  965. dbg(1, "bd overflow for tcache %d", tcache_id);
  966. return NULL;
  967. }
  968. *blk_id = block_ring[tcache_id].next;
  969. bd = ring_alloc(&block_ring[tcache_id], 1);
  970. bd->entryp = ring_alloc(&entry_ring[tcache_id], entries);
  971. bd->addr = addr;
  972. bd->size = size;
  973. bd->addr_lit = addr_lit;
  974. bd->size_lit = size_lit;
  975. bd->tcache_ptr = tcache_ptr;
  976. bd->crc = crc;
  977. bd->active = 0;
  978. bd->list = NULL;
  979. bd->entry_count = 0;
  980. #if (DRC_DEBUG & 2)
  981. bd->refcount = 0;
  982. #endif
  983. return bd;
  984. }
  985. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  986. {
  987. #if LINK_BRANCHES
  988. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  989. u32 pc = be->pc;
  990. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  991. struct block_link *bl = *head, *next;
  992. while (bl != NULL) {
  993. next = bl->next;
  994. if (bl->target_pc == pc && (!bl->tcache_id || bl->tcache_id == tcache_id)) {
  995. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  996. dr_block_link(be, bl, 1);
  997. }
  998. bl = next;
  999. }
  1000. #endif
  1001. }
  1002. static void dr_link_outgoing(struct block_entry *be, int tcache_id, int is_slave)
  1003. {
  1004. #if LINK_BRANCHES
  1005. struct block_link *bl;
  1006. int target_tcache_id;
  1007. for (bl = be->o_links; bl; bl = bl->o_next) {
  1008. if (bl->target == NULL) {
  1009. be = dr_get_entry(bl->target_pc, is_slave, &target_tcache_id);
  1010. if (be != NULL && (!target_tcache_id || target_tcache_id == tcache_id)) {
  1011. // remove bl from unresolved_links (must've been since target was NULL)
  1012. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  1013. dr_block_link(be, bl, 1);
  1014. }
  1015. }
  1016. }
  1017. #endif
  1018. }
  1019. static void dr_activate_block(struct block_desc *bd, int tcache_id, int is_slave)
  1020. {
  1021. int i;
  1022. // connect branches
  1023. for (i = 0; i < bd->entry_count; i++) {
  1024. struct block_entry *entry = &bd->entryp[i];
  1025. add_to_hashlist(entry, tcache_id);
  1026. // incoming branches
  1027. dr_link_blocks(entry, tcache_id);
  1028. if (!tcache_id)
  1029. dr_link_blocks(entry, is_slave?2:1);
  1030. // outgoing branches
  1031. dr_link_outgoing(entry, tcache_id, is_slave);
  1032. }
  1033. // mark memory for overwrite detection
  1034. dr_mark_memory(1, bd, tcache_id, 0);
  1035. bd->active = 1;
  1036. }
  1037. static void REGPARM(3) *dr_lookup_block(u32 pc, SH2 *sh2, int *tcache_id)
  1038. {
  1039. struct block_entry *be = NULL;
  1040. void *block = NULL;
  1041. be = dr_get_entry(pc, sh2->is_slave, tcache_id);
  1042. if (be != NULL)
  1043. block = be->tcache_ptr;
  1044. #if (DRC_DEBUG & 2)
  1045. if (be != NULL)
  1046. be->block->refcount++;
  1047. #endif
  1048. return block;
  1049. }
  1050. static void dr_free_oldest_block(int tcache_id)
  1051. {
  1052. struct block_desc *bf;
  1053. bf = ring_first(&block_ring[tcache_id]);
  1054. if (bf->addr && bf->entry_count)
  1055. dr_rm_block_entry(bf, tcache_id, 0, 1);
  1056. ring_free(&block_ring[tcache_id], 1);
  1057. if (block_ring[tcache_id].used) {
  1058. bf = ring_first(&block_ring[tcache_id]);
  1059. ring_free_p(&entry_ring[tcache_id], bf->entryp);
  1060. ring_free_p(&tcache_ring[tcache_id], bf->tcache_ptr);
  1061. } else {
  1062. // reset since size of code block isn't known if no successor block exists
  1063. ring_reset(&block_ring[tcache_id]);
  1064. ring_reset(&entry_ring[tcache_id]);
  1065. ring_reset(&tcache_ring[tcache_id]);
  1066. }
  1067. }
  1068. static inline void dr_reserve_cache(int tcache_id, struct ring_buffer *rb, int count)
  1069. {
  1070. // while not enough space available
  1071. if (rb->next + count >= rb->size){
  1072. // not enough space in rest of buffer -> wrap around
  1073. while (rb->first >= rb->next && rb->used)
  1074. dr_free_oldest_block(tcache_id);
  1075. if (rb->first == 0 && rb->used)
  1076. dr_free_oldest_block(tcache_id);
  1077. ring_wrap(rb);
  1078. }
  1079. while (rb->first >= rb->next && rb->next + count > rb->first && rb->used)
  1080. dr_free_oldest_block(tcache_id);
  1081. }
  1082. static u8 *dr_prepare_cache(int tcache_id, int insn_count, int entry_count)
  1083. {
  1084. int bf = block_ring[tcache_id].first;
  1085. // reserve one block desc
  1086. if (block_ring[tcache_id].used >= block_ring[tcache_id].size)
  1087. dr_free_oldest_block(tcache_id);
  1088. // reserve block entries
  1089. dr_reserve_cache(tcache_id, &entry_ring[tcache_id], entry_count);
  1090. // reserve cache space
  1091. dr_reserve_cache(tcache_id, &tcache_ring[tcache_id], insn_count*128);
  1092. if (bf != block_ring[tcache_id].first) {
  1093. // deleted some block(s), clear branch cache and return stack
  1094. #if BRANCH_CACHE
  1095. if (tcache_id)
  1096. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1097. else {
  1098. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1099. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  1100. }
  1101. #endif
  1102. #if CALL_STACK
  1103. if (tcache_id) {
  1104. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1105. sh2s[tcache_id-1].rts_cache_idx = 0;
  1106. } else {
  1107. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1108. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  1109. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1110. }
  1111. #endif
  1112. }
  1113. return ring_next(&tcache_ring[tcache_id]);
  1114. }
  1115. static void dr_flush_tcache(int tcid)
  1116. {
  1117. int i;
  1118. #if (DRC_DEBUG & 1)
  1119. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d bes %d/%d)", tcid,
  1120. tcache_ring[tcid].used, tcache_ring[tcid].size, block_ring[tcid].used,
  1121. block_ring[tcid].size, entry_ring[tcid].used, entry_ring[tcid].size);
  1122. #endif
  1123. ring_reset(&tcache_ring[tcid]);
  1124. ring_reset(&block_ring[tcid]);
  1125. ring_reset(&entry_ring[tcid]);
  1126. block_link_pool_counts[tcid] = 0;
  1127. blink_free[tcid] = NULL;
  1128. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * HASH_TABLE_SIZE(tcid));
  1129. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * HASH_TABLE_SIZE(tcid));
  1130. if (tcid == 0) { // ROM, RAM
  1131. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1132. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1133. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1134. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  1135. memset(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1136. memset(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache));
  1137. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1138. } else {
  1139. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1140. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1141. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  1142. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  1143. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1144. memset(sh2s[tcid - 1].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1145. sh2s[tcid - 1].rts_cache_idx = 0;
  1146. }
  1147. #if (DRC_DEBUG & 4)
  1148. tcache_dsm_ptrs[tcid] = tcache_ring[tcid].base;
  1149. #endif
  1150. for (i = 0; i < RAM_SIZE(tcid) / INVAL_PAGE_SIZE; i++)
  1151. discard_block_list(&inval_lookup[tcid][i]);
  1152. discard_block_list(&inactive_blocks[tcid]);
  1153. }
  1154. static void *dr_failure(void)
  1155. {
  1156. printf("recompilation failed\n");
  1157. exit(1);
  1158. }
  1159. // ---------------------------------------------------------------
  1160. // NB rcache allocation dependencies:
  1161. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  1162. // - get_reg(..., NULL) before get_reg(..., &hr) if it might get the same reg
  1163. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  1164. // register cache / constant propagation stuff
  1165. typedef enum {
  1166. RC_GR_READ,
  1167. RC_GR_WRITE,
  1168. RC_GR_RMW,
  1169. } rc_gr_mode;
  1170. typedef struct {
  1171. u32 gregs;
  1172. u32 val;
  1173. } gconst_t;
  1174. gconst_t gconsts[ARRAY_SIZE(guest_regs)];
  1175. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  1176. static inline int rcache_is_cached(sh2_reg_e r);
  1177. static void rcache_add_vreg_alias(int x, sh2_reg_e r);
  1178. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  1179. static void rcache_evict_vreg(int x);
  1180. static void rcache_remap_vreg(int x);
  1181. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr);
  1182. static void rcache_set_x16(int hr, int s16_, int u16_)
  1183. {
  1184. int x = reg_map_host[hr];
  1185. if (x >= 0) {
  1186. cache_regs[x].flags &= ~(HRF_S16|HRF_U16);
  1187. if (s16_) cache_regs[x].flags |= HRF_S16;
  1188. if (u16_) cache_regs[x].flags |= HRF_U16;
  1189. }
  1190. }
  1191. static void rcache_copy_x16(int hr, int hr2)
  1192. {
  1193. int x = reg_map_host[hr], y = reg_map_host[hr2];
  1194. if (x >= 0 && y >= 0) {
  1195. cache_regs[x].flags = (cache_regs[x].flags & ~(HRF_S16|HRF_U16)) |
  1196. (cache_regs[y].flags & (HRF_S16|HRF_U16));
  1197. }
  1198. }
  1199. static int rcache_is_s16(int hr)
  1200. {
  1201. int x = reg_map_host[hr];
  1202. return (x >= 0 ? cache_regs[x].flags & HRF_S16 : 0);
  1203. }
  1204. static int rcache_is_u16(int hr)
  1205. {
  1206. int x = reg_map_host[hr];
  1207. return (x >= 0 ? cache_regs[x].flags & HRF_U16 : 0);
  1208. }
  1209. #define RCACHE_DUMP(msg) { \
  1210. cache_reg_t *cp; \
  1211. guest_reg_t *gp; \
  1212. int i; \
  1213. printf("cache dump %s:\n",msg); \
  1214. printf(" cache_regs:\n"); \
  1215. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1216. cp = &cache_regs[i]; \
  1217. if (cp->type != HR_FREE || cp->gregs || cp->locked || cp->flags) \
  1218. printf(" %d: hr=%d t=%d f=%x c=%d m=%lx\n", i, cp->hreg, cp->type, cp->flags, cp->locked, (ulong)cp->gregs); \
  1219. } \
  1220. printf(" guest_regs:\n"); \
  1221. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1222. gp = &guest_regs[i]; \
  1223. if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
  1224. printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
  1225. } \
  1226. printf(" gconsts:\n"); \
  1227. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1228. if (gconsts[i].gregs) \
  1229. printf(" %d: m=%lx v=%lx\n", i, (ulong)gconsts[i].gregs, (ulong)gconsts[i].val); \
  1230. } \
  1231. }
  1232. #define RCACHE_CHECK(msg) { \
  1233. cache_reg_t *cp; \
  1234. guest_reg_t *gp; \
  1235. int i, x, m = 0, d = 0; \
  1236. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1237. cp = &cache_regs[i]; \
  1238. if (cp->flags & HRF_PINNED) m |= (1 << i); \
  1239. if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
  1240. /* check connectivity greg->vreg */ \
  1241. FOR_ALL_BITS_SET_DO(cp->gregs, x, \
  1242. if (guest_regs[x].vreg != i) \
  1243. { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
  1244. ) \
  1245. } \
  1246. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1247. gp = &guest_regs[i]; \
  1248. if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
  1249. { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
  1250. if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_CACHED) \
  1251. { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
  1252. if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
  1253. { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
  1254. if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)))\
  1255. { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
  1256. if (gp->flags & (GRF_STATIC|GRF_PINNED)) { \
  1257. if (gp->sreg == -1 || !(cache_regs[gp->sreg].flags & HRF_PINNED))\
  1258. { d = 1; printf("cache check r=%d v=%d not pinned?\n", i, gp->vreg); } \
  1259. else m &= ~(1 << gp->sreg); \
  1260. } \
  1261. } \
  1262. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1263. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
  1264. if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
  1265. { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
  1266. ) \
  1267. } \
  1268. if (m) \
  1269. { d = 1; printf("cache check m=%x pinning wrong?\n",m); } \
  1270. if (d) RCACHE_DUMP(msg) \
  1271. /* else { \
  1272. printf("locked regs %s:\n",msg); \
  1273. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1274. cp = &cache_regs[i]; \
  1275. if (cp->locked) \
  1276. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1277. } \
  1278. } */ \
  1279. }
  1280. static inline int gconst_alloc(sh2_reg_e r)
  1281. {
  1282. int i, n = -1;
  1283. for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
  1284. gconsts[i].gregs &= ~(1 << r);
  1285. if (gconsts[i].gregs == 0 && n < 0)
  1286. n = i;
  1287. }
  1288. if (n >= 0)
  1289. gconsts[n].gregs = (1 << r);
  1290. else {
  1291. printf("all gconst buffers in use, aborting\n");
  1292. exit(1); // cannot happen - more constants than guest regs?
  1293. }
  1294. return n;
  1295. }
  1296. static void gconst_set(sh2_reg_e r, u32 val)
  1297. {
  1298. int i = gconst_alloc(r);
  1299. guest_regs[r].flags |= GRF_CONST;
  1300. guest_regs[r].cnst = i;
  1301. gconsts[i].val = val;
  1302. }
  1303. static void gconst_new(sh2_reg_e r, u32 val)
  1304. {
  1305. gconst_set(r, val);
  1306. guest_regs[r].flags |= GRF_CDIRTY;
  1307. // throw away old r that we might have cached
  1308. if (guest_regs[r].vreg >= 0)
  1309. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1310. }
  1311. static int gconst_get(sh2_reg_e r, u32 *val)
  1312. {
  1313. if (guest_regs[r].flags & GRF_CONST) {
  1314. *val = gconsts[guest_regs[r].cnst].val;
  1315. return 1;
  1316. }
  1317. *val = 0;
  1318. return 0;
  1319. }
  1320. static int gconst_check(sh2_reg_e r)
  1321. {
  1322. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1323. return 1;
  1324. return 0;
  1325. }
  1326. // update hr if dirty, else do nothing
  1327. static int gconst_try_read(int vreg, sh2_reg_e r)
  1328. {
  1329. int i, x;
  1330. u32 v;
  1331. if (guest_regs[r].flags & GRF_CDIRTY) {
  1332. x = guest_regs[r].cnst;
  1333. v = gconsts[x].val;
  1334. emith_move_r_imm(cache_regs[vreg].hreg, v);
  1335. rcache_set_x16(cache_regs[vreg].hreg, v == (s16)v, v == (u16)v);
  1336. FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
  1337. {
  1338. if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
  1339. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  1340. if (guest_regs[i].vreg < 0)
  1341. rcache_add_vreg_alias(vreg, i);
  1342. guest_regs[i].flags &= ~GRF_CDIRTY;
  1343. guest_regs[i].flags |= GRF_DIRTY;
  1344. });
  1345. cache_regs[vreg].type = HR_CACHED;
  1346. cache_regs[vreg].flags |= HRF_DIRTY;
  1347. return 1;
  1348. }
  1349. return 0;
  1350. }
  1351. static u32 gconst_dirty_mask(void)
  1352. {
  1353. u32 mask = 0;
  1354. int i;
  1355. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1356. if (guest_regs[i].flags & GRF_CDIRTY)
  1357. mask |= (1 << i);
  1358. return mask;
  1359. }
  1360. static void gconst_kill(sh2_reg_e r)
  1361. {
  1362. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1363. gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
  1364. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1365. }
  1366. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1367. {
  1368. gconst_kill(rd);
  1369. if (guest_regs[rs].flags & GRF_CONST) {
  1370. guest_regs[rd].flags |= GRF_CONST;
  1371. if (guest_regs[rd].vreg < 0)
  1372. guest_regs[rd].flags |= GRF_CDIRTY;
  1373. guest_regs[rd].cnst = guest_regs[rs].cnst;
  1374. gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
  1375. }
  1376. }
  1377. static void gconst_clean(void)
  1378. {
  1379. int i;
  1380. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1381. if (guest_regs[i].flags & GRF_CDIRTY) {
  1382. // using RC_GR_READ here: it will call gconst_try_read,
  1383. // cache the reg and mark it dirty.
  1384. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1385. }
  1386. }
  1387. static void gconst_invalidate(void)
  1388. {
  1389. int i;
  1390. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1391. if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
  1392. gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
  1393. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1394. }
  1395. }
  1396. static u16 rcache_counter;
  1397. // SH2 register usage bitmasks
  1398. static u32 rcache_vregs_reg; // regs of type HRT_REG (for pinning)
  1399. static u32 rcache_regs_static; // statically allocated regs
  1400. static u32 rcache_regs_pinned; // pinned regs
  1401. static u32 rcache_regs_now; // regs used in current insn
  1402. static u32 rcache_regs_soon; // regs used in the next few insns
  1403. static u32 rcache_regs_late; // regs used in later insns
  1404. static u32 rcache_regs_discard; // regs overwritten without being used
  1405. static u32 rcache_regs_clean; // regs needing cleaning
  1406. static void rcache_lock_vreg(int x)
  1407. {
  1408. if (x >= 0) {
  1409. cache_regs[x].locked ++;
  1410. #if DRC_DEBUG & 64
  1411. if (cache_regs[x].type == HR_FREE) {
  1412. printf("locking free vreg %x, aborting\n", x);
  1413. exit(1);
  1414. }
  1415. if (!cache_regs[x].locked) {
  1416. printf("locking overflow vreg %x, aborting\n", x);
  1417. exit(1);
  1418. }
  1419. #endif
  1420. }
  1421. }
  1422. static void rcache_unlock_vreg(int x)
  1423. {
  1424. if (x >= 0) {
  1425. #if DRC_DEBUG & 64
  1426. if (cache_regs[x].type == HR_FREE) {
  1427. printf("unlocking free vreg %x, aborting\n", x);
  1428. exit(1);
  1429. }
  1430. #endif
  1431. if (cache_regs[x].locked)
  1432. cache_regs[x].locked --;
  1433. }
  1434. }
  1435. static void rcache_free_vreg(int x)
  1436. {
  1437. cache_regs[x].type = cache_regs[x].locked ? HR_TEMP : HR_FREE;
  1438. cache_regs[x].flags &= HRF_PINNED;
  1439. cache_regs[x].gregs = 0;
  1440. }
  1441. static void rcache_unmap_vreg(int x)
  1442. {
  1443. int i;
  1444. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1445. if (guest_regs[i].flags & GRF_DIRTY) {
  1446. // if a dirty reg is unmapped save its value to context
  1447. if ((~rcache_regs_discard | rcache_regs_now) & (1 << i))
  1448. emith_ctx_write(cache_regs[x].hreg, i * 4);
  1449. guest_regs[i].flags &= ~GRF_DIRTY;
  1450. }
  1451. guest_regs[i].vreg = -1);
  1452. rcache_free_vreg(x);
  1453. }
  1454. static void rcache_move_vreg(int d, int x)
  1455. {
  1456. int i;
  1457. cache_regs[d].type = HR_CACHED;
  1458. cache_regs[d].gregs = cache_regs[x].gregs;
  1459. cache_regs[d].flags &= HRF_PINNED;
  1460. cache_regs[d].flags |= cache_regs[x].flags & ~HRF_PINNED;
  1461. cache_regs[d].locked = 0;
  1462. cache_regs[d].stamp = cache_regs[x].stamp;
  1463. emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
  1464. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1465. if (guest_regs[i].vreg == x)
  1466. guest_regs[i].vreg = d;
  1467. rcache_free_vreg(x);
  1468. }
  1469. static void rcache_clean_vreg(int x)
  1470. {
  1471. u32 rns = rcache_regs_now | rcache_regs_soon;
  1472. int r;
  1473. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1474. cache_regs[x].flags &= ~HRF_DIRTY;
  1475. rcache_lock_vreg(x);
  1476. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1477. if (guest_regs[r].flags & GRF_DIRTY) {
  1478. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) {
  1479. if (guest_regs[r].vreg != guest_regs[r].sreg &&
  1480. !cache_regs[guest_regs[r].sreg].locked &&
  1481. ((~rcache_regs_discard | rcache_regs_now) & (1 << r)) &&
  1482. !(rns & cache_regs[guest_regs[r].sreg].gregs)) {
  1483. // statically mapped reg not in its sreg. move back to sreg
  1484. rcache_evict_vreg(guest_regs[r].sreg);
  1485. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
  1486. cache_regs[guest_regs[r].vreg].hreg);
  1487. rcache_copy_x16(cache_regs[guest_regs[r].sreg].hreg,
  1488. cache_regs[guest_regs[r].vreg].hreg);
  1489. rcache_remove_vreg_alias(x, r);
  1490. rcache_add_vreg_alias(guest_regs[r].sreg, r);
  1491. cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
  1492. } else
  1493. // cannot remap. keep dirty for writeback in unmap
  1494. cache_regs[x].flags |= HRF_DIRTY;
  1495. } else {
  1496. if ((~rcache_regs_discard | rcache_regs_now) & (1 << r))
  1497. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1498. guest_regs[r].flags &= ~GRF_DIRTY;
  1499. }
  1500. rcache_regs_clean &= ~(1 << r);
  1501. })
  1502. rcache_unlock_vreg(x);
  1503. }
  1504. #if DRC_DEBUG & 64
  1505. RCACHE_CHECK("after clean");
  1506. #endif
  1507. }
  1508. static void rcache_add_vreg_alias(int x, sh2_reg_e r)
  1509. {
  1510. cache_regs[x].gregs |= (1 << r);
  1511. guest_regs[r].vreg = x;
  1512. cache_regs[x].type = HR_CACHED;
  1513. }
  1514. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1515. {
  1516. cache_regs[x].gregs &= ~(1 << r);
  1517. if (!cache_regs[x].gregs) {
  1518. // no reg mapped -> free vreg
  1519. if (cache_regs[x].locked)
  1520. cache_regs[x].type = HR_TEMP;
  1521. else
  1522. rcache_free_vreg(x);
  1523. }
  1524. guest_regs[r].vreg = -1;
  1525. }
  1526. static void rcache_evict_vreg(int x)
  1527. {
  1528. rcache_remap_vreg(x);
  1529. rcache_unmap_vreg(x);
  1530. }
  1531. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1532. {
  1533. rcache_remove_vreg_alias(x, r);
  1534. rcache_evict_vreg(x);
  1535. rcache_add_vreg_alias(x, r);
  1536. }
  1537. static int rcache_allocate(int what, int minprio)
  1538. {
  1539. // evict reg with oldest stamp (only for HRT_REG, no temps)
  1540. int i, i_prio, oldest = -1, prio = 0;
  1541. u16 min_stamp = (u16)-1;
  1542. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--) {
  1543. // consider only non-static, unpinned, unlocked REG or TEMP
  1544. if ((cache_regs[i].flags & HRF_PINNED) || cache_regs[i].locked)
  1545. continue;
  1546. if ((what > 0 && !(cache_regs[i].htype & HRT_REG)) || // get a REG
  1547. (what == 0 && (cache_regs[i].htype & HRT_TEMP)) || // get a non-TEMP
  1548. (what < 0 && !(cache_regs[i].htype & HRT_TEMP))) // get a TEMP
  1549. continue;
  1550. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
  1551. // REG is free
  1552. prio = 10;
  1553. oldest = i;
  1554. break;
  1555. }
  1556. if (cache_regs[i].type == HR_CACHED) {
  1557. if (rcache_regs_now & cache_regs[i].gregs)
  1558. // REGs needed for the current insn
  1559. i_prio = 0;
  1560. else if (rcache_regs_soon & cache_regs[i].gregs)
  1561. // REGs needed in the next insns
  1562. i_prio = 2;
  1563. else if (rcache_regs_late & cache_regs[i].gregs)
  1564. // REGs needed in some future insn
  1565. i_prio = 4;
  1566. else if (~rcache_regs_discard & cache_regs[i].gregs)
  1567. // REGs not needed in the foreseeable future
  1568. i_prio = 6;
  1569. else
  1570. // REGs soon overwritten anyway
  1571. i_prio = 8;
  1572. if (!(cache_regs[i].flags & HRF_DIRTY)) i_prio ++;
  1573. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1574. min_stamp = cache_regs[i].stamp;
  1575. oldest = i;
  1576. prio = i_prio;
  1577. }
  1578. }
  1579. }
  1580. if (prio < minprio || oldest == -1)
  1581. return -1;
  1582. if (cache_regs[oldest].type == HR_CACHED)
  1583. rcache_evict_vreg(oldest);
  1584. else
  1585. rcache_free_vreg(oldest);
  1586. return oldest;
  1587. }
  1588. static int rcache_allocate_vreg(int needed)
  1589. {
  1590. int x;
  1591. x = rcache_allocate(1, needed ? 0 : 4);
  1592. if (x < 0)
  1593. x = rcache_allocate(-1, 0);
  1594. return x;
  1595. }
  1596. static int rcache_allocate_nontemp(void)
  1597. {
  1598. int x = rcache_allocate(0, 4);
  1599. return x;
  1600. }
  1601. static int rcache_allocate_temp(void)
  1602. {
  1603. int x = rcache_allocate(-1, 0);
  1604. if (x < 0)
  1605. x = rcache_allocate(0, 0);
  1606. return x;
  1607. }
  1608. // maps a host register to a REG
  1609. static int rcache_map_reg(sh2_reg_e r, int hr)
  1610. {
  1611. #if REMAP_REGISTER
  1612. int i;
  1613. gconst_kill(r);
  1614. // lookup the TEMP hr maps to
  1615. i = reg_map_host[hr];
  1616. if (i < 0) {
  1617. // must not happen
  1618. printf("invalid host register %d\n", hr);
  1619. exit(1);
  1620. }
  1621. // remove old mappings of r and i if one exists
  1622. if (guest_regs[r].vreg >= 0)
  1623. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1624. if (cache_regs[i].type == HR_CACHED)
  1625. rcache_evict_vreg(i);
  1626. // set new mappping
  1627. cache_regs[i].type = HR_CACHED;
  1628. cache_regs[i].gregs = 1 << r;
  1629. cache_regs[i].locked = 0;
  1630. cache_regs[i].stamp = ++rcache_counter;
  1631. cache_regs[i].flags |= HRF_DIRTY;
  1632. rcache_lock_vreg(i);
  1633. guest_regs[r].flags |= GRF_DIRTY;
  1634. guest_regs[r].vreg = i;
  1635. #if DRC_DEBUG & 64
  1636. RCACHE_CHECK("after map");
  1637. #endif
  1638. return cache_regs[i].hreg;
  1639. #else
  1640. return rcache_get_reg(r, RC_GR_WRITE, NULL);
  1641. #endif
  1642. }
  1643. // remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
  1644. static void rcache_remap_vreg(int x)
  1645. {
  1646. #if REMAP_REGISTER
  1647. u32 rsl_d = rcache_regs_soon | rcache_regs_late;
  1648. int d;
  1649. // x must be a cached vreg
  1650. if (cache_regs[x].type != HR_CACHED || cache_regs[x].locked)
  1651. return;
  1652. // don't do it if x isn't used
  1653. if (!(rsl_d & cache_regs[x].gregs)) {
  1654. // clean here to avoid data loss on invalidation
  1655. rcache_clean_vreg(x);
  1656. return;
  1657. }
  1658. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, d,
  1659. if ((guest_regs[d].flags & (GRF_STATIC|GRF_PINNED)) &&
  1660. !cache_regs[guest_regs[d].sreg].locked &&
  1661. !((rsl_d|rcache_regs_now) & cache_regs[guest_regs[d].sreg].gregs)) {
  1662. // STATIC not in its sreg and sreg is available
  1663. rcache_evict_vreg(guest_regs[d].sreg);
  1664. rcache_move_vreg(guest_regs[d].sreg, x);
  1665. return;
  1666. }
  1667. )
  1668. // allocate a non-TEMP vreg
  1669. rcache_lock_vreg(x); // lock to avoid evicting x
  1670. d = rcache_allocate_nontemp();
  1671. rcache_unlock_vreg(x);
  1672. if (d < 0) {
  1673. rcache_clean_vreg(x);
  1674. return;
  1675. }
  1676. // move vreg to new location
  1677. rcache_move_vreg(d, x);
  1678. #if DRC_DEBUG & 64
  1679. RCACHE_CHECK("after remap");
  1680. #endif
  1681. #else
  1682. rcache_clean_vreg(x);
  1683. #endif
  1684. }
  1685. static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
  1686. {
  1687. #if ALIAS_REGISTERS
  1688. int x;
  1689. // if s isn't constant, it must be in cache for aliasing
  1690. if (!gconst_check(rs))
  1691. rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
  1692. // if d and s are not already aliased
  1693. x = guest_regs[rs].vreg;
  1694. if (guest_regs[rd].vreg != x) {
  1695. // remove possible old mapping of dst
  1696. if (guest_regs[rd].vreg >= 0)
  1697. rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
  1698. // make dst an alias of src
  1699. if (x >= 0)
  1700. rcache_add_vreg_alias(x, rd);
  1701. // if d is now in cache, it must be dirty
  1702. if (guest_regs[rd].vreg >= 0) {
  1703. x = guest_regs[rd].vreg;
  1704. cache_regs[x].flags |= HRF_DIRTY;
  1705. guest_regs[rd].flags |= GRF_DIRTY;
  1706. }
  1707. }
  1708. gconst_copy(rd, rs);
  1709. #if DRC_DEBUG & 64
  1710. RCACHE_CHECK("after alias");
  1711. #endif
  1712. #else
  1713. int hr_s = rcache_get_reg(rs, RC_GR_READ, NULL);
  1714. int hr_d = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  1715. emith_move_r_r(hr_d, hr_s);
  1716. gconst_copy(rd, rs);
  1717. #endif
  1718. }
  1719. // note: must not be called when doing conditional code
  1720. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1721. {
  1722. int src, dst, ali;
  1723. cache_reg_t *tr;
  1724. u32 rsp_d = (rcache_regs_soon | rcache_regs_static | rcache_regs_pinned) &
  1725. ~rcache_regs_discard;
  1726. dst = src = guest_regs[r].vreg;
  1727. rcache_lock_vreg(src); // lock to avoid evicting src
  1728. // good opportunity to relocate a remapped STATIC?
  1729. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1730. src != guest_regs[r].sreg && (src < 0 || mode != RC_GR_READ) &&
  1731. !cache_regs[guest_regs[r].sreg].locked &&
  1732. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[r].sreg].gregs)) {
  1733. dst = guest_regs[r].sreg;
  1734. rcache_evict_vreg(dst);
  1735. } else if (dst < 0) {
  1736. // allocate a cache register
  1737. if ((dst = rcache_allocate_vreg(rsp_d & (1 << r))) < 0) {
  1738. printf("no registers to evict, aborting\n");
  1739. exit(1);
  1740. }
  1741. }
  1742. tr = &cache_regs[dst];
  1743. tr->stamp = rcache_counter;
  1744. // remove r from src
  1745. if (src >= 0 && src != dst)
  1746. rcache_remove_vreg_alias(src, r);
  1747. rcache_unlock_vreg(src);
  1748. // if r has a constant it may have aliases
  1749. if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
  1750. src = dst;
  1751. // if r will be modified, check for aliases being needed rsn
  1752. ali = tr->gregs & ~(1 << r);
  1753. if (mode != RC_GR_READ && src == dst && ali) {
  1754. int x = -1;
  1755. if ((rsp_d|rcache_regs_now) & ali) {
  1756. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1757. guest_regs[r].sreg == dst && !tr->locked) {
  1758. // split aliases if r is STATIC in sreg and dst isn't already locked
  1759. int t;
  1760. FOR_ALL_BITS_SET_DO(ali, t,
  1761. if ((guest_regs[t].flags & (GRF_STATIC|GRF_PINNED)) &&
  1762. !(ali & ~(1 << t)) &&
  1763. !cache_regs[guest_regs[t].sreg].locked &&
  1764. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[t].sreg].gregs)) {
  1765. // alias is a single STATIC and its sreg is available
  1766. x = guest_regs[t].sreg;
  1767. rcache_evict_vreg(x);
  1768. } else {
  1769. rcache_lock_vreg(dst); // lock to avoid evicting dst
  1770. x = rcache_allocate_vreg(rsp_d & ali);
  1771. rcache_unlock_vreg(dst);
  1772. }
  1773. break;
  1774. )
  1775. if (x >= 0) {
  1776. rcache_remove_vreg_alias(src, r);
  1777. src = dst;
  1778. rcache_move_vreg(x, dst);
  1779. }
  1780. } else {
  1781. // split r
  1782. rcache_lock_vreg(src); // lock to avoid evicting src
  1783. x = rcache_allocate_vreg(rsp_d & (1 << r));
  1784. rcache_unlock_vreg(src);
  1785. if (x >= 0) {
  1786. rcache_remove_vreg_alias(src, r);
  1787. dst = x;
  1788. tr = &cache_regs[dst];
  1789. tr->stamp = rcache_counter;
  1790. }
  1791. }
  1792. }
  1793. if (x < 0)
  1794. // aliases not needed or no vreg available, remove them
  1795. rcache_evict_vreg_aliases(dst, r);
  1796. }
  1797. // assign r to dst
  1798. rcache_add_vreg_alias(dst, r);
  1799. // handle dst register transfer
  1800. if (src < 0 && mode != RC_GR_WRITE)
  1801. emith_ctx_read(tr->hreg, r * 4);
  1802. if (hr) {
  1803. *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
  1804. rcache_lock_vreg(src >= 0 ? src : dst);
  1805. } else if (src >= 0 && mode != RC_GR_WRITE && cache_regs[src].hreg != tr->hreg)
  1806. emith_move_r_r(tr->hreg, cache_regs[src].hreg);
  1807. // housekeeping
  1808. if (do_locking)
  1809. rcache_lock_vreg(dst);
  1810. if (mode != RC_GR_READ) {
  1811. tr->flags |= HRF_DIRTY;
  1812. guest_regs[r].flags |= GRF_DIRTY;
  1813. gconst_kill(r);
  1814. rcache_set_x16(tr->hreg, 0, 0);
  1815. } else if (src >= 0 && cache_regs[src].hreg != tr->hreg)
  1816. rcache_copy_x16(tr->hreg, cache_regs[src].hreg);
  1817. #if DRC_DEBUG & 64
  1818. RCACHE_CHECK("after getreg");
  1819. #endif
  1820. return tr->hreg;
  1821. }
  1822. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1823. {
  1824. return rcache_get_reg_(r, mode, 1, hr);
  1825. }
  1826. static void rcache_pin_reg(sh2_reg_e r)
  1827. {
  1828. int hr, x;
  1829. // don't pin if static or already pinned
  1830. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED))
  1831. return;
  1832. rcache_regs_soon |= (1 << r); // kludge to prevent allocation of a temp
  1833. hr = rcache_get_reg_(r, RC_GR_RMW, 0, NULL);
  1834. x = reg_map_host[hr];
  1835. // can only pin non-TEMPs
  1836. if (!(cache_regs[x].htype & HRT_TEMP)) {
  1837. guest_regs[r].flags |= GRF_PINNED;
  1838. cache_regs[x].flags |= HRF_PINNED;
  1839. guest_regs[r].sreg = x;
  1840. rcache_regs_pinned |= (1 << r);
  1841. }
  1842. #if DRC_DEBUG & 64
  1843. RCACHE_CHECK("after pin");
  1844. #endif
  1845. }
  1846. static int rcache_get_tmp(void)
  1847. {
  1848. int i;
  1849. i = rcache_allocate_temp();
  1850. if (i < 0) {
  1851. printf("cannot allocate temp\n");
  1852. exit(1);
  1853. }
  1854. cache_regs[i].type = HR_TEMP;
  1855. rcache_lock_vreg(i);
  1856. return cache_regs[i].hreg;
  1857. }
  1858. static int rcache_get_vreg_hr(int hr)
  1859. {
  1860. int i;
  1861. i = reg_map_host[hr];
  1862. if (i < 0 || cache_regs[i].locked) {
  1863. printf("host register %d is locked\n", hr);
  1864. exit(1);
  1865. }
  1866. if (cache_regs[i].type == HR_CACHED)
  1867. rcache_evict_vreg(i);
  1868. else if (cache_regs[i].type == HR_TEMP && cache_regs[i].locked) {
  1869. printf("host reg %d already used, aborting\n", hr);
  1870. exit(1);
  1871. }
  1872. return i;
  1873. }
  1874. static int rcache_get_vreg_arg(int arg)
  1875. {
  1876. int hr = 0;
  1877. host_arg2reg(hr, arg);
  1878. return rcache_get_vreg_hr(hr);
  1879. }
  1880. // get a reg to be used as function arg
  1881. static int rcache_get_tmp_arg(int arg)
  1882. {
  1883. int x = rcache_get_vreg_arg(arg);
  1884. cache_regs[x].type = HR_TEMP;
  1885. rcache_lock_vreg(x);
  1886. return cache_regs[x].hreg;
  1887. }
  1888. // ... as return value after a call
  1889. static int rcache_get_tmp_ret(void)
  1890. {
  1891. int x = rcache_get_vreg_hr(RET_REG);
  1892. cache_regs[x].type = HR_TEMP;
  1893. rcache_lock_vreg(x);
  1894. return cache_regs[x].hreg;
  1895. }
  1896. // same but caches a reg if access is readonly (announced by hr being NULL)
  1897. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1898. {
  1899. int i, srcr, dstr, dstid, keep;
  1900. u32 val;
  1901. host_arg2reg(dstr, arg);
  1902. i = guest_regs[r].vreg;
  1903. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1904. // r is already in arg, avoid evicting
  1905. dstid = i;
  1906. else
  1907. dstid = rcache_get_vreg_arg(arg);
  1908. dstr = cache_regs[dstid].hreg;
  1909. if (rcache_is_cached(r)) {
  1910. // r is needed later on anyway
  1911. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1912. keep = 1;
  1913. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1914. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1915. srcr = dstr;
  1916. emith_move_r_imm(srcr, val);
  1917. keep = 0;
  1918. } else {
  1919. // must read from ctx
  1920. srcr = dstr;
  1921. emith_ctx_read(srcr, r * 4);
  1922. keep = 1;
  1923. }
  1924. if (cache_regs[dstid].type == HR_CACHED)
  1925. rcache_evict_vreg(dstid);
  1926. cache_regs[dstid].type = HR_TEMP;
  1927. if (hr == NULL) {
  1928. if (dstr != srcr)
  1929. // arg is a copy of cached r
  1930. emith_move_r_r(dstr, srcr);
  1931. else if (keep && guest_regs[r].vreg < 0)
  1932. // keep arg as vreg for r
  1933. rcache_add_vreg_alias(dstid, r);
  1934. } else {
  1935. *hr = srcr;
  1936. if (dstr != srcr) // must lock srcr if not copied here
  1937. rcache_lock_vreg(reg_map_host[srcr]);
  1938. }
  1939. cache_regs[dstid].stamp = ++rcache_counter;
  1940. rcache_lock_vreg(dstid);
  1941. #if DRC_DEBUG & 64
  1942. RCACHE_CHECK("after getarg");
  1943. #endif
  1944. return dstr;
  1945. }
  1946. static void rcache_free_tmp(int hr)
  1947. {
  1948. int i = reg_map_host[hr];
  1949. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1950. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1951. exit(1);
  1952. }
  1953. rcache_unlock_vreg(i);
  1954. }
  1955. // saves temporary result either in REG or in drctmp
  1956. static int rcache_save_tmp(int hr)
  1957. {
  1958. int i;
  1959. // find REG, either free or unlocked temp or oldest non-hinted cached
  1960. i = rcache_allocate_nontemp();
  1961. if (i < 0) {
  1962. // if none is available, store in drctmp
  1963. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1964. rcache_free_tmp(hr);
  1965. return -1;
  1966. }
  1967. cache_regs[i].type = HR_CACHED;
  1968. cache_regs[i].gregs = 0; // not storing any guest register
  1969. cache_regs[i].flags &= HRF_PINNED;
  1970. cache_regs[i].locked = 0;
  1971. cache_regs[i].stamp = ++rcache_counter;
  1972. rcache_lock_vreg(i);
  1973. emith_move_r_r(cache_regs[i].hreg, hr);
  1974. rcache_free_tmp(hr);
  1975. return i;
  1976. }
  1977. static int rcache_restore_tmp(int x)
  1978. {
  1979. int hr;
  1980. // find REG with tmp store: cached but with no gregs
  1981. if (x >= 0) {
  1982. if (cache_regs[x].type != HR_CACHED || cache_regs[x].gregs) {
  1983. printf("invalid tmp storage %d\n", x);
  1984. exit(1);
  1985. }
  1986. // found, transform to a TEMP
  1987. cache_regs[x].type = HR_TEMP;
  1988. return cache_regs[x].hreg;
  1989. }
  1990. // if not available, create a TEMP store and fetch from drctmp
  1991. hr = rcache_get_tmp();
  1992. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1993. return hr;
  1994. }
  1995. static void rcache_free(int hr)
  1996. {
  1997. int x = reg_map_host[hr];
  1998. rcache_unlock_vreg(x);
  1999. }
  2000. static void rcache_unlock(int x)
  2001. {
  2002. if (x >= 0)
  2003. cache_regs[x].locked = 0;
  2004. }
  2005. static void rcache_unlock_all(void)
  2006. {
  2007. int i;
  2008. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2009. cache_regs[i].locked = 0;
  2010. }
  2011. static void rcache_unpin_all(void)
  2012. {
  2013. int i;
  2014. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2015. if (guest_regs[i].flags & GRF_PINNED) {
  2016. guest_regs[i].flags &= ~GRF_PINNED;
  2017. cache_regs[guest_regs[i].sreg].flags &= ~HRF_PINNED;
  2018. guest_regs[i].sreg = -1;
  2019. rcache_regs_pinned &= ~(1 << i);
  2020. }
  2021. }
  2022. #if DRC_DEBUG & 64
  2023. RCACHE_CHECK("after unpin");
  2024. #endif
  2025. }
  2026. static void rcache_save_pinned(void)
  2027. {
  2028. int i;
  2029. // save pinned regs to context
  2030. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2031. if ((guest_regs[i].flags & GRF_PINNED) && guest_regs[i].vreg >= 0)
  2032. emith_ctx_write(cache_regs[guest_regs[i].vreg].hreg, i * 4);
  2033. }
  2034. static inline void rcache_set_usage_now(u32 mask)
  2035. {
  2036. rcache_regs_now = mask;
  2037. }
  2038. static inline void rcache_set_usage_soon(u32 mask)
  2039. {
  2040. rcache_regs_soon = mask;
  2041. }
  2042. static inline void rcache_set_usage_late(u32 mask)
  2043. {
  2044. rcache_regs_late = mask;
  2045. }
  2046. static inline void rcache_set_usage_discard(u32 mask)
  2047. {
  2048. rcache_regs_discard = mask;
  2049. }
  2050. static inline int rcache_is_cached(sh2_reg_e r)
  2051. {
  2052. // is r in cache or needed RSN?
  2053. u32 rsc = rcache_regs_soon | rcache_regs_clean;
  2054. return (guest_regs[r].vreg >= 0 || (rsc & (1 << r)));
  2055. }
  2056. static inline int rcache_is_hreg_used(int hr)
  2057. {
  2058. int x = reg_map_host[hr];
  2059. // is hr in use?
  2060. return cache_regs[x].type != HR_FREE &&
  2061. (cache_regs[x].type != HR_TEMP || cache_regs[x].locked);
  2062. }
  2063. static inline u32 rcache_used_hregs_mask(void)
  2064. {
  2065. u32 mask = 0;
  2066. int i;
  2067. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2068. if ((cache_regs[i].htype & HRT_TEMP) && cache_regs[i].type != HR_FREE &&
  2069. (cache_regs[i].type != HR_TEMP || cache_regs[i].locked))
  2070. mask |= 1 << cache_regs[i].hreg;
  2071. return mask;
  2072. }
  2073. static inline u32 rcache_dirty_mask(void)
  2074. {
  2075. u32 mask = 0;
  2076. int i;
  2077. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2078. if (guest_regs[i].flags & GRF_DIRTY)
  2079. mask |= 1 << i;
  2080. mask |= gconst_dirty_mask();
  2081. return mask;
  2082. }
  2083. static inline u32 rcache_cached_mask(void)
  2084. {
  2085. u32 mask = 0;
  2086. int i;
  2087. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2088. if (cache_regs[i].type == HR_CACHED)
  2089. mask |= cache_regs[i].gregs;
  2090. return mask;
  2091. }
  2092. static void rcache_clean_tmp(void)
  2093. {
  2094. int i;
  2095. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2096. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2097. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].htype & HRT_TEMP)) {
  2098. rcache_unlock(i);
  2099. rcache_remap_vreg(i);
  2100. }
  2101. rcache_regs_clean = 0;
  2102. }
  2103. static void rcache_clean_masked(u32 mask)
  2104. {
  2105. int i, r, hr;
  2106. u32 m;
  2107. rcache_regs_clean |= mask;
  2108. mask = rcache_regs_clean;
  2109. // clean constants where all aliases are covered by the mask, exempt statics
  2110. // to avoid flushing them to context if sreg isn't available
  2111. m = mask & ~(rcache_regs_static | rcache_regs_pinned);
  2112. for (i = 0; i < ARRAY_SIZE(gconsts); i++)
  2113. if ((gconsts[i].gregs & m) && !(gconsts[i].gregs & ~mask)) {
  2114. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, r,
  2115. if (guest_regs[r].flags & GRF_CDIRTY) {
  2116. hr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  2117. rcache_clean_vreg(reg_map_host[hr]);
  2118. break;
  2119. });
  2120. }
  2121. // clean vregs where all aliases are covered by the mask
  2122. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2123. if (cache_regs[i].type == HR_CACHED &&
  2124. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  2125. rcache_clean_vreg(i);
  2126. }
  2127. static void rcache_clean(void)
  2128. {
  2129. int i;
  2130. gconst_clean();
  2131. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2132. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  2133. if (cache_regs[i].type == HR_CACHED)
  2134. rcache_clean_vreg(i);
  2135. // relocate statics to their sregs (necessary before conditional jumps)
  2136. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2137. if ((guest_regs[i].flags & (GRF_STATIC|GRF_PINNED)) &&
  2138. guest_regs[i].vreg != guest_regs[i].sreg) {
  2139. rcache_lock_vreg(guest_regs[i].vreg);
  2140. rcache_evict_vreg(guest_regs[i].sreg);
  2141. rcache_unlock_vreg(guest_regs[i].vreg);
  2142. if (guest_regs[i].vreg < 0)
  2143. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  2144. else {
  2145. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  2146. cache_regs[guest_regs[i].vreg].hreg);
  2147. rcache_copy_x16(cache_regs[guest_regs[i].sreg].hreg,
  2148. cache_regs[guest_regs[i].vreg].hreg);
  2149. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  2150. }
  2151. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2152. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2153. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2154. guest_regs[i].flags |= GRF_DIRTY;
  2155. guest_regs[i].vreg = guest_regs[i].sreg;
  2156. }
  2157. }
  2158. rcache_regs_clean = 0;
  2159. }
  2160. static void rcache_invalidate_tmp(void)
  2161. {
  2162. int i;
  2163. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2164. if (cache_regs[i].htype & HRT_TEMP) {
  2165. rcache_unlock(i);
  2166. if (cache_regs[i].type == HR_CACHED)
  2167. rcache_evict_vreg(i);
  2168. else
  2169. rcache_free_vreg(i);
  2170. }
  2171. }
  2172. }
  2173. static void rcache_invalidate(void)
  2174. {
  2175. int i;
  2176. gconst_invalidate();
  2177. rcache_unlock_all();
  2178. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2179. rcache_free_vreg(i);
  2180. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2181. guest_regs[i].flags &= GRF_STATIC;
  2182. if (!(guest_regs[i].flags & GRF_STATIC))
  2183. guest_regs[i].vreg = -1;
  2184. else {
  2185. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2186. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2187. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2188. guest_regs[i].flags |= GRF_DIRTY;
  2189. guest_regs[i].vreg = guest_regs[i].sreg;
  2190. }
  2191. }
  2192. rcache_counter = 0;
  2193. rcache_regs_now = rcache_regs_soon = rcache_regs_late = 0;
  2194. rcache_regs_discard = rcache_regs_clean = 0;
  2195. }
  2196. static void rcache_flush(void)
  2197. {
  2198. rcache_clean();
  2199. rcache_invalidate();
  2200. }
  2201. static void rcache_create(void)
  2202. {
  2203. int x = 0, i;
  2204. // create cache_regs as host register representation
  2205. // RET_REG/params should be first TEMPs to avoid allocation conflicts in calls
  2206. cache_regs[x++] = (cache_reg_t) {.hreg = RET_REG, .htype = HRT_TEMP};
  2207. for (i = 0; i < ARRAY_SIZE(hregs_param); i++)
  2208. if (hregs_param[i] != RET_REG)
  2209. cache_regs[x++] = (cache_reg_t){.hreg = hregs_param[i],.htype = HRT_TEMP};
  2210. for (i = 0; i < ARRAY_SIZE(hregs_temp); i++)
  2211. if (hregs_temp[i] != RET_REG)
  2212. cache_regs[x++] = (cache_reg_t){.hreg = hregs_temp[i], .htype = HRT_TEMP};
  2213. for (i = ARRAY_SIZE(hregs_saved)-1; i >= 0; i--)
  2214. if (hregs_saved[i] != CONTEXT_REG)
  2215. cache_regs[x++] = (cache_reg_t){.hreg = hregs_saved[i], .htype = HRT_REG};
  2216. if (x != ARRAY_SIZE(cache_regs)) {
  2217. printf("rcache_create failed (conflicting register count)\n");
  2218. exit(1);
  2219. }
  2220. // mapping from host_register to cache regs index
  2221. memset(reg_map_host, -1, sizeof(reg_map_host));
  2222. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2223. if (cache_regs[i].htype)
  2224. reg_map_host[cache_regs[i].hreg] = i;
  2225. if (cache_regs[i].htype == HRT_REG)
  2226. rcache_vregs_reg |= (1 << i);
  2227. }
  2228. // create static host register mapping for SH2 regs
  2229. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2230. guest_regs[i] = (guest_reg_t){.sreg = -1};
  2231. }
  2232. for (i = 0; i < ARRAY_SIZE(regs_static); i += 2) {
  2233. for (x = ARRAY_SIZE(cache_regs)-1; x >= 0; x--)
  2234. if (cache_regs[x].hreg == regs_static[i+1]) break;
  2235. if (x >= 0) {
  2236. guest_regs[regs_static[i]] = (guest_reg_t){.flags = GRF_STATIC,.sreg = x};
  2237. rcache_regs_static |= (1 << regs_static[i]);
  2238. rcache_vregs_reg &= ~(1 << x);
  2239. }
  2240. }
  2241. printf("DRC registers created, %ld host regs (%d REG, %d STATIC, 1 CTX)\n",
  2242. CACHE_REGS+1L, count_bits(rcache_vregs_reg),count_bits(rcache_regs_static));
  2243. }
  2244. static void rcache_init(void)
  2245. {
  2246. // create DRC data structures
  2247. rcache_create();
  2248. rcache_invalidate();
  2249. #if DRC_DEBUG & 64
  2250. RCACHE_CHECK("after init");
  2251. #endif
  2252. }
  2253. // ---------------------------------------------------------------
  2254. // swap 32 bit value read from mem in generated code (same as CPU_BE2)
  2255. static void emit_le_swap(int cond, int r)
  2256. {
  2257. #if CPU_IS_LE
  2258. if (cond == -1)
  2259. emith_ror(r, r, 16);
  2260. else
  2261. emith_ror_c(cond, r, r, 16);
  2262. #endif
  2263. }
  2264. // fix memory byte ptr in generated code (same as MEM_BE2)
  2265. static void emit_le_ptr8(int cond, int r)
  2266. {
  2267. #if CPU_IS_LE
  2268. if (cond == -1)
  2269. emith_eor_r_imm_ptr(r, 1);
  2270. else
  2271. emith_eor_r_imm_ptr_c(cond, r, 1);
  2272. #endif
  2273. }
  2274. // split address by mask, in base part (upper) and offset (lower, signed!)
  2275. static uptr split_address(uptr la, uptr mask, s32 *offs)
  2276. {
  2277. uptr sign = (mask>>1) + 1; // sign bit in offset
  2278. *offs = (la & mask) | (la & sign ? ~mask : 0); // offset part, sign extended
  2279. la = (la & ~mask) + ((la & sign) << 1); // base part, corrected for offs sign
  2280. #ifdef __arm__
  2281. // arm32 offset has an add/sub flag and an unsigned 8 bit value, which only
  2282. // allows values of [-255...255]. the value -256 thus can't be used.
  2283. if (*offs + sign == 0) {
  2284. la -= sign;
  2285. *offs += sign;
  2286. }
  2287. #endif
  2288. return la;
  2289. }
  2290. // NB may return either REG or TEMP
  2291. static int emit_get_rbase_and_offs(SH2 *sh2, sh2_reg_e r, int rmode, s32 *offs)
  2292. {
  2293. uptr omask = emith_rw_offs_max(); // offset mask
  2294. u32 mask = 0;
  2295. u32 a;
  2296. int poffs;
  2297. int hr, hr2;
  2298. uptr la;
  2299. // is r constant and points to a memory region?
  2300. if (! gconst_get(r, &a))
  2301. return -1;
  2302. poffs = dr_ctx_get_mem_ptr(sh2, a, &mask);
  2303. if (poffs == -1)
  2304. return -1;
  2305. if (mask < 0x20000) {
  2306. // data array, BIOS, DRAM, can't safely access directly since host addr may
  2307. // change (BIOS,da code may run on either core, DRAM may be switched)
  2308. hr = rcache_get_tmp();
  2309. a = (a + *offs) & mask;
  2310. if (poffs == offsetof(SH2, p_da)) {
  2311. // access sh2->data_array directly
  2312. a = split_address(a + offsetof(SH2, data_array), omask, offs);
  2313. emith_add_r_r_ptr_imm(hr, CONTEXT_REG, a);
  2314. } else {
  2315. a = split_address(a, omask, offs);
  2316. emith_ctx_read_ptr(hr, poffs);
  2317. if (a)
  2318. emith_add_r_r_ptr_imm(hr, hr, a);
  2319. }
  2320. return hr;
  2321. }
  2322. // ROM, SDRAM. Host address should be mmapped to be equal to SH2 address.
  2323. la = (uptr)*(void **)((char *)sh2 + poffs);
  2324. // if r is in rcache or needed soon anyway, and offs is relative to region,
  2325. // and address translation fits in add_ptr_imm (s32), then use rcached const
  2326. if (la == (s32)la && !(((a & mask) + *offs) & ~mask) && rcache_is_cached(r)) {
  2327. #if CPU_IS_LE // need to fix odd address for correct byte addressing
  2328. if (a & 1) *offs += (*offs&1) ? 2 : -2;
  2329. #endif
  2330. la -= (s32)((a & ~mask) - *offs); // diff between reg and memory
  2331. hr = hr2 = rcache_get_reg(r, rmode, NULL);
  2332. if ((s32)a < 0) emith_uext_ptr(hr2);
  2333. la = split_address(la, omask, offs);
  2334. if (la) {
  2335. hr = rcache_get_tmp();
  2336. emith_add_r_r_ptr_imm(hr, hr2, la);
  2337. rcache_free(hr2);
  2338. }
  2339. } else {
  2340. // known fixed host address
  2341. la = split_address(la + ((a + *offs) & mask), omask, offs);
  2342. if (la == 0) {
  2343. // offset only. optimize for hosts having short indexed addressing
  2344. la = *offs & ~0x7f; // keep the lower bits for endianess handling
  2345. *offs &= 0x7f;
  2346. }
  2347. hr = rcache_get_tmp();
  2348. emith_move_r_ptr_imm(hr, la);
  2349. }
  2350. return hr;
  2351. }
  2352. // read const data from const ROM address
  2353. static int emit_get_rom_data(SH2 *sh2, sh2_reg_e r, s32 offs, int size, u32 *val)
  2354. {
  2355. u32 a, mask;
  2356. *val = 0;
  2357. if (gconst_get(r, &a)) {
  2358. a += offs;
  2359. // check if rom is memory mapped (not bank switched), and address is in rom
  2360. if (dr_is_rom(a) && p32x_sh2_get_mem_ptr(a, &mask, sh2) == sh2->p_rom) {
  2361. switch (size & MF_SIZEMASK) {
  2362. case 0: *val = (s8)p32x_sh2_read8(a, sh2s); break; // 8
  2363. case 1: *val = (s16)p32x_sh2_read16(a, sh2s); break; // 16
  2364. case 2: *val = p32x_sh2_read32(a, sh2s); break; // 32
  2365. }
  2366. return 1;
  2367. }
  2368. }
  2369. return 0;
  2370. }
  2371. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  2372. {
  2373. #if PROPAGATE_CONSTANTS
  2374. gconst_new(dst, imm);
  2375. #else
  2376. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2377. emith_move_r_imm(hr, imm);
  2378. #endif
  2379. }
  2380. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  2381. {
  2382. if (gconst_check(src) || rcache_is_cached(src))
  2383. rcache_alias_vreg(dst, src);
  2384. else {
  2385. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2386. emith_ctx_read(hr_d, src * 4);
  2387. }
  2388. }
  2389. static void emit_add_r_imm(sh2_reg_e r, u32 imm)
  2390. {
  2391. u32 val;
  2392. int isgc = gconst_get(r, &val);
  2393. int hr, hr2;
  2394. if (!isgc || rcache_is_cached(r)) {
  2395. // not constant, or r is already in cache
  2396. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2397. emith_add_r_r_imm(hr, hr2, imm);
  2398. rcache_free(hr2);
  2399. if (isgc)
  2400. gconst_set(r, val + imm);
  2401. } else
  2402. gconst_new(r, val + imm);
  2403. }
  2404. static void emit_sub_r_imm(sh2_reg_e r, u32 imm)
  2405. {
  2406. u32 val;
  2407. int isgc = gconst_get(r, &val);
  2408. int hr, hr2;
  2409. if (!isgc || rcache_is_cached(r)) {
  2410. // not constant, or r is already in cache
  2411. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2412. emith_sub_r_r_imm(hr, hr2, imm);
  2413. rcache_free(hr2);
  2414. if (isgc)
  2415. gconst_set(r, val - imm);
  2416. } else
  2417. gconst_new(r, val - imm);
  2418. }
  2419. static void emit_sync_t_to_sr(void)
  2420. {
  2421. // avoid reloading SR from context if there's nothing to do
  2422. if (emith_get_t_cond() >= 0) {
  2423. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2424. emith_sync_t(sr);
  2425. }
  2426. }
  2427. // rd = @(arg0)
  2428. static int emit_memhandler_read(int size)
  2429. {
  2430. int hr;
  2431. emit_sync_t_to_sr();
  2432. rcache_clean_tmp();
  2433. #ifndef DRC_SR_REG
  2434. // must writeback cycles for poll detection stuff
  2435. if (guest_regs[SHR_SR].vreg != -1)
  2436. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2437. #endif
  2438. rcache_invalidate_tmp();
  2439. if (size & MF_POLLING)
  2440. switch (size & MF_SIZEMASK) {
  2441. case 0: emith_call(sh2_drc_read8_poll); break; // 8
  2442. case 1: emith_call(sh2_drc_read16_poll); break; // 16
  2443. case 2: emith_call(sh2_drc_read32_poll); break; // 32
  2444. }
  2445. else
  2446. switch (size & MF_SIZEMASK) {
  2447. case 0: emith_call(sh2_drc_read8); break; // 8
  2448. case 1: emith_call(sh2_drc_read16); break; // 16
  2449. case 2: emith_call(sh2_drc_read32); break; // 32
  2450. }
  2451. hr = rcache_get_tmp_ret();
  2452. rcache_set_x16(hr, (size & MF_SIZEMASK) < 2, 0);
  2453. return hr;
  2454. }
  2455. // @(arg0) = arg1
  2456. static void emit_memhandler_write(int size)
  2457. {
  2458. emit_sync_t_to_sr();
  2459. rcache_clean_tmp();
  2460. #ifndef DRC_SR_REG
  2461. if (guest_regs[SHR_SR].vreg != -1)
  2462. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2463. #endif
  2464. rcache_invalidate_tmp();
  2465. switch (size & MF_SIZEMASK) {
  2466. case 0: emith_call(sh2_drc_write8); break; // 8
  2467. case 1: emith_call(sh2_drc_write16); break; // 16
  2468. case 2: emith_call(sh2_drc_write32); break; // 32
  2469. }
  2470. }
  2471. // rd = @(Rs,#offs); rd < 0 -> return a temp
  2472. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, s32 offs, int size)
  2473. {
  2474. int hr, hr2;
  2475. u32 val;
  2476. #if PROPAGATE_CONSTANTS
  2477. if (emit_get_rom_data(sh2, rs, offs, size, &val)) {
  2478. if (rd == SHR_TMP) {
  2479. hr2 = rcache_get_tmp();
  2480. emith_move_r_imm(hr2, val);
  2481. } else {
  2482. emit_move_r_imm32(rd, val);
  2483. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  2484. }
  2485. rcache_set_x16(hr2, val == (s16)val, val == (u16)val);
  2486. if (size & MF_POSTINCR)
  2487. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2488. return hr2;
  2489. }
  2490. val = size & MF_POSTINCR;
  2491. hr = emit_get_rbase_and_offs(sh2, rs, val ? RC_GR_RMW : RC_GR_READ, &offs);
  2492. if (hr != -1) {
  2493. if (rd == SHR_TMP)
  2494. hr2 = rcache_get_tmp();
  2495. else
  2496. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2497. switch (size & MF_SIZEMASK) {
  2498. case 0: emith_read8s_r_r_offs(hr2, hr, MEM_BE2(offs)); break; // 8
  2499. case 1: emith_read16s_r_r_offs(hr2, hr, offs); break; // 16
  2500. case 2: emith_read_r_r_offs(hr2, hr, offs); emit_le_swap(-1, hr2); break;
  2501. }
  2502. rcache_free(hr);
  2503. if (size & MF_POSTINCR)
  2504. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2505. return hr2;
  2506. }
  2507. #endif
  2508. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2509. hr = rcache_get_tmp_arg(0);
  2510. emith_move_r_imm(hr, val + offs);
  2511. if (size & MF_POSTINCR)
  2512. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  2513. } else if (size & MF_POSTINCR) {
  2514. hr = rcache_get_tmp_arg(0);
  2515. hr2 = rcache_get_reg(rs, RC_GR_RMW, NULL);
  2516. emith_add_r_r_imm(hr, hr2, offs);
  2517. emith_add_r_imm(hr2, 1 << (size & MF_SIZEMASK));
  2518. if (gconst_get(rs, &val))
  2519. gconst_set(rs, val + (1 << (size & MF_SIZEMASK)));
  2520. } else {
  2521. hr = rcache_get_reg_arg(0, rs, &hr2);
  2522. if (offs || hr != hr2)
  2523. emith_add_r_r_imm(hr, hr2, offs);
  2524. }
  2525. hr = emit_memhandler_read(size);
  2526. if (rd == SHR_TMP)
  2527. hr2 = hr;
  2528. else
  2529. hr2 = rcache_map_reg(rd, hr);
  2530. if (hr != hr2) {
  2531. emith_move_r_r(hr2, hr);
  2532. rcache_free_tmp(hr);
  2533. }
  2534. return hr2;
  2535. }
  2536. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  2537. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, s32 offs, int size)
  2538. {
  2539. int hr, hr2;
  2540. u32 val;
  2541. if (rd == SHR_TMP) {
  2542. host_arg2reg(hr2, 1); // already locked and prepared by caller
  2543. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  2544. hr2 = rcache_get_reg_arg(1, rd, &hr);
  2545. if (hr != hr2) {
  2546. emith_move_r_r(hr2, hr);
  2547. rcache_free(hr2);
  2548. }
  2549. } else
  2550. hr2 = rcache_get_reg_arg(1, rd, NULL);
  2551. if (rd != SHR_TMP)
  2552. rcache_unlock(guest_regs[rd].vreg); // unlock in case rd is in arg0
  2553. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2554. hr = rcache_get_tmp_arg(0);
  2555. if (size & MF_PREDECR) {
  2556. val -= 1 << (size & MF_SIZEMASK);
  2557. gconst_new(rs, val);
  2558. }
  2559. emith_move_r_imm(hr, val + offs);
  2560. } else if (offs || (size & MF_PREDECR)) {
  2561. if (size & MF_PREDECR)
  2562. emit_sub_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2563. rcache_unlock(guest_regs[rs].vreg); // unlock in case rs is in arg0
  2564. hr = rcache_get_reg_arg(0, rs, &hr2);
  2565. if (offs || hr != hr2)
  2566. emith_add_r_r_imm(hr, hr2, offs);
  2567. } else
  2568. hr = rcache_get_reg_arg(0, rs, NULL);
  2569. emit_memhandler_write(size);
  2570. }
  2571. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2572. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2573. {
  2574. int hr, hr2;
  2575. int tx, ty;
  2576. #if PROPAGATE_CONSTANTS
  2577. u32 offs;
  2578. // if offs is larger than 0x01000000, it's most probably the base address part
  2579. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2580. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2581. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2582. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2583. #endif
  2584. hr = rcache_get_reg_arg(0, rx, &tx);
  2585. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2586. emith_add_r_r_r(hr, tx, ty);
  2587. hr = emit_memhandler_read(size);
  2588. if (rd == SHR_TMP)
  2589. hr2 = hr;
  2590. else
  2591. hr2 = rcache_map_reg(rd, hr);
  2592. if (hr != hr2) {
  2593. emith_move_r_r(hr2, hr);
  2594. rcache_free_tmp(hr);
  2595. }
  2596. return hr2;
  2597. }
  2598. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2599. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2600. {
  2601. int hr, tx, ty;
  2602. #if PROPAGATE_CONSTANTS
  2603. u32 offs;
  2604. // if offs is larger than 0x01000000, it's most probably the base address part
  2605. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2606. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2607. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2608. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2609. #endif
  2610. if (rd != SHR_TMP)
  2611. rcache_get_reg_arg(1, rd, NULL);
  2612. hr = rcache_get_reg_arg(0, rx, &tx);
  2613. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2614. emith_add_r_r_r(hr, tx, ty);
  2615. emit_memhandler_write(size);
  2616. }
  2617. // @Rn+,@Rm+
  2618. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2619. {
  2620. int tmp;
  2621. // unlock rn, rm here to avoid REG shortage in MAC operation
  2622. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2623. rcache_unlock(guest_regs[rn].vreg);
  2624. tmp = rcache_save_tmp(tmp);
  2625. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2626. rcache_unlock(guest_regs[rm].vreg);
  2627. *rnr = rcache_restore_tmp(tmp);
  2628. }
  2629. static void emit_do_static_regs(int is_write, int tmpr)
  2630. {
  2631. int i, r, count;
  2632. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2633. if (guest_regs[i].flags & (GRF_STATIC|GRF_PINNED))
  2634. r = cache_regs[guest_regs[i].vreg].hreg;
  2635. else
  2636. continue;
  2637. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2638. if ((guest_regs[i + 1].flags & (GRF_STATIC|GRF_PINNED)) &&
  2639. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2640. count++;
  2641. else
  2642. break;
  2643. }
  2644. if (count > 1) {
  2645. // i, r point to last item
  2646. if (is_write)
  2647. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2648. else
  2649. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2650. } else {
  2651. if (is_write)
  2652. emith_ctx_write(r, i * 4);
  2653. else
  2654. emith_ctx_read(r, i * 4);
  2655. }
  2656. }
  2657. }
  2658. #if DIV_OPTIMIZER
  2659. // divide operation replacement functions, called by compiled code. Only the
  2660. // 32:16 cases and the 64:32 cases described in the SH2 prog man are replaced.
  2661. // This is surprisingly difficult since the SH2 division operation is generating
  2662. // the result in the dividend during the operation, leaving some remainder-like
  2663. // stuff in the bits unused for the result, and leaving the T and Q status bits
  2664. // in a state depending on the operands and the result. Q always reflects the
  2665. // last result bit generated (i.e. bit 0 of the result). For T:
  2666. // 32:16 T = top bit of the 16 bit remainder-like
  2667. // 64:32 T = resulting T of the DIV0U/S operation
  2668. // The remainder-like depends on outcome of the last generated result bit.
  2669. static uint32_t REGPARM(3) sh2_drc_divu32(uint32_t dv, uint32_t *dt, uint32_t ds)
  2670. {
  2671. if (ds > dv && (uint16_t)ds == 0) {
  2672. // good case: no overflow, divisor not 0, lower 16 bits 0
  2673. uint32_t quot = dv / (ds>>16), rem = dv - (quot * (ds>>16));
  2674. if (~quot&1) rem -= ds>>16;
  2675. *dt = (rem>>15) & 1;
  2676. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2677. } else {
  2678. // bad case: use the sh2 algo to get the right result
  2679. int q = 0, t = 0, s = 16;
  2680. while (s--) {
  2681. uint32_t v = dv>>31;
  2682. dv = (dv<<1) | t;
  2683. t = v;
  2684. v = dv;
  2685. if (q) dv += ds, q = dv < v;
  2686. else dv -= ds, q = dv > v;
  2687. q ^= t, t = !q;
  2688. }
  2689. *dt = dv>>31;
  2690. return (dv<<1) | t;
  2691. }
  2692. }
  2693. static uint32_t REGPARM(3) sh2_drc_divu64(uint32_t dh, uint32_t *dl, uint32_t ds)
  2694. {
  2695. if (ds > dh) {
  2696. // good case: no overflow, divisor not 0
  2697. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2698. uint32_t quot = dv / ds, rem = dv - ((uint64_t)quot * ds);
  2699. if (~quot&1) rem -= ds;
  2700. *dl = quot;
  2701. return rem;
  2702. } else {
  2703. // bad case: use the sh2 algo to get the right result
  2704. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2705. int q = 0, t = 0, s = 32;
  2706. while (s--) {
  2707. uint64_t v = dv>>63;
  2708. dv = (dv<<1) | t;
  2709. t = v;
  2710. v = dv;
  2711. if (q) dv += ((uint64_t)ds << 32), q = dv < v;
  2712. else dv -= ((uint64_t)ds << 32), q = dv > v;
  2713. q ^= t, t = !q;
  2714. }
  2715. *dl = (dv<<1) | t;
  2716. return (dv>>32);
  2717. }
  2718. }
  2719. static uint32_t REGPARM(3) sh2_drc_divs32(int32_t dv, uint32_t *dt, int32_t ds)
  2720. {
  2721. uint32_t adv = abs(dv), ads = abs(ds)>>16;
  2722. if (ads > adv>>16 && ds != 0x80000000 && (int16_t)ds == 0) {
  2723. // good case: no overflow, divisor not 0 and not MIN_INT, lower 16 bits 0
  2724. uint32_t quot = adv / ads, rem = adv - (quot * ads);
  2725. int m1 = (rem ? dv^ds : ds) < 0;
  2726. if (rem && dv < 0) rem = (quot&1 ? -rem : +ads-rem);
  2727. else rem = (quot&1 ? +rem : -ads+rem);
  2728. quot = ((dv^ds)<0 ? -quot : +quot) - m1;
  2729. *dt = (rem>>15) & 1;
  2730. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2731. } else {
  2732. // bad case: use the sh2 algo to get the right result
  2733. int m = (uint32_t)ds>>31, q = (uint32_t)dv>>31, t = m^q, s = 16;
  2734. while (s--) {
  2735. uint32_t v = (uint32_t)dv>>31;
  2736. dv = (dv<<1) | t;
  2737. t = v;
  2738. v = dv;
  2739. if (m^q) dv += ds, q = (uint32_t)dv < v;
  2740. else dv -= ds, q = (uint32_t)dv > v;
  2741. q ^= m^t, t = !(m^q);
  2742. }
  2743. *dt = (uint32_t)dv>>31;
  2744. return (dv<<1) | t;
  2745. }
  2746. }
  2747. static uint32_t REGPARM(3) sh2_drc_divs64(int32_t dh, uint32_t *dl, int32_t ds)
  2748. {
  2749. int64_t _dv = *dl | ((int64_t)dh << 32);
  2750. uint64_t adv = (_dv < 0 ? -_dv : _dv); // llabs isn't in older toolchains
  2751. uint32_t ads = abs(ds);
  2752. if (ads > adv>>32 && ds != 0x80000000) {
  2753. // good case: no overflow, divisor not 0 and not MIN_INT
  2754. uint32_t quot = adv / ads, rem = adv - ((uint64_t)quot * ads);
  2755. int m1 = (rem ? dh^ds : ds) < 0;
  2756. if (rem && dh < 0) rem = (quot&1 ? -rem : +ads-rem);
  2757. else rem = (quot&1 ? +rem : -ads+rem);
  2758. quot = ((dh^ds)<0 ? -quot : +quot) - m1;
  2759. *dl = quot;
  2760. return rem;
  2761. } else {
  2762. // bad case: use the sh2 algo to get the right result
  2763. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2764. int m = (uint32_t)ds>>31, q = (uint64_t)dv>>63, t = m^q, s = 32;
  2765. while (s--) {
  2766. uint64_t v = (uint64_t)dv>>63;
  2767. dv = (dv<<1) | t;
  2768. t = v;
  2769. v = dv;
  2770. if (m^q) dv += ((uint64_t)ds << 32), q = dv < v;
  2771. else dv -= ((uint64_t)ds << 32), q = dv > v;
  2772. q ^= m^t, t = !(m^q);
  2773. }
  2774. *dl = (dv<<1) | t;
  2775. return (dv>>32);
  2776. }
  2777. }
  2778. #endif
  2779. // block local link stuff
  2780. struct linkage {
  2781. u32 pc;
  2782. void *ptr;
  2783. struct block_link *bl;
  2784. u32 mask;
  2785. };
  2786. static inline int find_in_linkage(const struct linkage *array, int size, u32 pc)
  2787. {
  2788. size_t i;
  2789. for (i = 0; i < size; i++)
  2790. if (pc == array[i].pc)
  2791. return i;
  2792. return -1;
  2793. }
  2794. static int find_in_sorted_linkage(const struct linkage *array, int size, u32 pc)
  2795. {
  2796. // binary search in sorted array
  2797. int left = 0, right = size-1;
  2798. while (left <= right)
  2799. {
  2800. int middle = (left + right) / 2;
  2801. if (array[middle].pc == pc)
  2802. return middle;
  2803. else if (array[middle].pc < pc)
  2804. left = middle + 1;
  2805. else
  2806. right = middle - 1;
  2807. }
  2808. return -1;
  2809. }
  2810. static void emit_branch_linkage_code(SH2 *sh2, struct block_desc *block, int tcache_id,
  2811. const struct linkage *targets, int target_count,
  2812. const struct linkage *links, int link_count)
  2813. {
  2814. struct block_link *bl;
  2815. int u, v, tmp;
  2816. emith_flush();
  2817. for (u = 0; u < link_count; u++) {
  2818. emith_pool_check();
  2819. // look up local branch targets
  2820. if (links[u].mask & 0x2) {
  2821. v = find_in_sorted_linkage(targets, target_count, links[u].pc);
  2822. if (v < 0 || ! targets[v].ptr) {
  2823. // forward branch not yet resolved, prepare external linking
  2824. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2825. bl = dr_prepare_ext_branch(block->entryp, links[u].pc, sh2->is_slave, tcache_id);
  2826. if (bl)
  2827. bl->type = BL_LDJMP;
  2828. tmp = rcache_get_tmp_arg(0);
  2829. emith_move_r_imm(tmp, links[u].pc);
  2830. rcache_free_tmp(tmp);
  2831. emith_jump_patchable(sh2_drc_dispatcher);
  2832. } else if (emith_jump_patch_inrange(links[u].ptr, targets[v].ptr)) {
  2833. // inrange local branch
  2834. emith_jump_patch(links[u].ptr, targets[v].ptr, NULL);
  2835. } else {
  2836. // far local branch
  2837. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2838. emith_jump(targets[v].ptr);
  2839. }
  2840. } else {
  2841. // external or exit, emit blx area entry
  2842. void *target = (links[u].mask & 0x1 ? sh2_drc_exit : sh2_drc_dispatcher);
  2843. if (links[u].bl)
  2844. links[u].bl->blx = tcache_ptr;
  2845. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2846. tmp = rcache_get_tmp_arg(0);
  2847. emith_move_r_imm(tmp, links[u].pc & ~1);
  2848. rcache_free_tmp(tmp);
  2849. emith_jump(target);
  2850. }
  2851. }
  2852. }
  2853. #define DELAY_SAVE_T(sr) { \
  2854. int t_ = rcache_get_tmp(); \
  2855. emith_bic_r_imm(sr, T_save); \
  2856. emith_and_r_r_imm(t_, sr, 1); \
  2857. emith_or_r_r_lsl(sr, t_, T_SHIFT); \
  2858. rcache_free_tmp(t_); \
  2859. }
  2860. #define FLUSH_CYCLES(sr) \
  2861. if (cycles > 0) { \
  2862. emith_sub_r_imm(sr, cycles << 12); \
  2863. cycles = 0; \
  2864. }
  2865. static void *dr_get_pc_base(u32 pc, SH2 *sh2);
  2866. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2867. {
  2868. // branch targets in current block
  2869. static struct linkage branch_targets[MAX_LOCAL_TARGETS];
  2870. int branch_target_count = 0;
  2871. // unresolved local or external targets with block link/exit area if needed
  2872. static struct linkage blx_targets[MAX_LOCAL_BRANCHES];
  2873. int blx_target_count = 0;
  2874. static u8 op_flags[BLOCK_INSN_LIMIT];
  2875. enum flg_states { FLG_UNKNOWN, FLG_UNUSED, FLG_0, FLG_1 };
  2876. struct drcf {
  2877. int delay_reg:8;
  2878. u32 loop_type:8;
  2879. u32 polling:8;
  2880. u32 pinning:1;
  2881. u32 test_irq:1;
  2882. u32 pending_branch_direct:1;
  2883. u32 pending_branch_indirect:1;
  2884. u32 Tflag:2, Mflag:2;
  2885. } drcf = { 0, };
  2886. #if LOOP_OPTIMIZER
  2887. // loops with pinned registers for optimzation
  2888. // pinned regs are like statics and don't need saving/restoring inside a loop
  2889. static struct linkage pinned_loops[MAX_LOCAL_TARGETS/16];
  2890. int pinned_loop_count = 0;
  2891. #endif
  2892. // PC of current, first, last SH2 insn
  2893. u32 pc, base_pc, end_pc;
  2894. u32 base_literals, end_literals;
  2895. u8 *block_entry_ptr;
  2896. struct block_desc *block;
  2897. struct block_entry *entry;
  2898. struct block_link *bl;
  2899. u16 *dr_pc_base;
  2900. struct op_data *opd;
  2901. int blkid_main = 0;
  2902. int skip_op = 0;
  2903. int tmp, tmp2;
  2904. int cycles;
  2905. int i, v;
  2906. u32 u, m1, m2, m3, m4;
  2907. int op;
  2908. u16 crc;
  2909. base_pc = sh2->pc;
  2910. // get base/validate PC
  2911. dr_pc_base = dr_get_pc_base(base_pc, sh2);
  2912. if (dr_pc_base == (void *)-1) {
  2913. printf("invalid PC, aborting: %08lx\n", (long)base_pc);
  2914. // FIXME: be less destructive
  2915. exit(1);
  2916. }
  2917. // initial passes to disassemble and analyze the block
  2918. crc = scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2919. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2920. if (base_literals == end_literals) // map empty lit section to end of code
  2921. base_literals = end_literals = end_pc;
  2922. // if there is already a translated but inactive block, reuse it
  2923. block = dr_find_inactive_block(tcache_id, crc, base_pc, end_pc - base_pc,
  2924. base_literals, end_literals - base_literals);
  2925. if (block) {
  2926. dbg(2, "== %csh2 reuse block %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2927. base_pc, end_pc, base_literals, end_literals, block->entryp->tcache_ptr);
  2928. dr_activate_block(block, tcache_id, sh2->is_slave);
  2929. emith_update_cache();
  2930. return block->entryp[0].tcache_ptr;
  2931. }
  2932. // collect branch_targets that don't land on delay slots
  2933. m1 = m2 = m3 = m4 = v = op = 0;
  2934. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2935. if (op_flags[i] & OF_DELAY_OP)
  2936. op_flags[i] &= ~OF_BTARGET;
  2937. if (op_flags[i] & OF_BTARGET) {
  2938. if (branch_target_count < ARRAY_SIZE(branch_targets))
  2939. branch_targets[branch_target_count++] = (struct linkage) { .pc = pc };
  2940. else {
  2941. printf("warning: linkage overflow\n");
  2942. end_pc = pc;
  2943. break;
  2944. }
  2945. }
  2946. if (ops[i].op == OP_LDC && (ops[i].dest & BITMASK1(SHR_SR)) && pc+2 < end_pc)
  2947. op_flags[i+1] |= OF_BTARGET; // RTE entrypoint in case of SR.IMASK change
  2948. // unify T and SR since rcache doesn't know about "virtual" guest regs
  2949. if (ops[i].source & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2950. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2951. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].dest |= BITMASK1(SHR_SR);
  2952. #if LOOP_DETECTION
  2953. // loop types detected:
  2954. // 1. target: ... BRA target -> idle loop
  2955. // 2. target: ... delay insn ... BF target -> delay loop
  2956. // 3. target: ... poll insn ... BF/BT target -> poll loop
  2957. // 4. target: ... poll insn ... BF/BT exit ... BRA target, exit: -> poll
  2958. // conditions:
  2959. // a. no further branch targets between target and back jump.
  2960. // b. no unconditional branch insn inside the loop.
  2961. // c. exactly one poll or delay insn is allowed inside a delay/poll loop
  2962. // (scan_block marks loops only if they meet conditions a through c)
  2963. // d. idle loops do not modify anything but PC,SR and contain no branches
  2964. // e. delay/poll loops do not modify anything but the concerned reg,PC,SR
  2965. // f. loading constants into registers inside the loop is allowed
  2966. // g. a delay/poll loop must have a conditional branch somewhere
  2967. // h. an idle loop must not have a conditional branch
  2968. if (op_flags[i] & OF_BTARGET) {
  2969. // possible loop entry point
  2970. drcf.loop_type = op_flags[i] & OF_LOOP;
  2971. drcf.pending_branch_direct = drcf.pending_branch_indirect = 0;
  2972. op = OF_IDLE_LOOP; // loop type
  2973. v = i;
  2974. m1 = m2 = m3 = m4 = 0;
  2975. if (!drcf.loop_type) // reset basic loop it it isn't recognized as loop
  2976. op_flags[i] &= ~OF_BASIC_LOOP;
  2977. }
  2978. if (drcf.loop_type) {
  2979. // calculate reg masks for loop pinning
  2980. m4 |= ops[i].source & ~m3;
  2981. m3 |= ops[i].dest;
  2982. // detect loop type, and store poll/delay register
  2983. if (op_flags[i] & OF_POLL_INSN) {
  2984. op = OF_POLL_LOOP;
  2985. m1 |= ops[i].dest; // loop poll/delay regs
  2986. } else if (op_flags[i] & OF_DELAY_INSN) {
  2987. op = OF_DELAY_LOOP;
  2988. m1 |= ops[i].dest;
  2989. } else if (ops[i].op != OP_LOAD_POOL && ops[i].op != OP_LOAD_CONST
  2990. && (ops[i].op != OP_MOVE || op != OF_POLL_LOOP)) {
  2991. // not (MOV @(PC) or MOV # or (MOV reg and poll)), condition f
  2992. m2 |= ops[i].dest; // regs modified by other insns
  2993. }
  2994. // branch detector
  2995. if (OP_ISBRAIMM(ops[i].op)) {
  2996. if (ops[i].imm == base_pc + 2*v)
  2997. drcf.pending_branch_direct = 1; // backward branch detected
  2998. else
  2999. op_flags[v] &= ~OF_BASIC_LOOP; // no basic loop
  3000. }
  3001. if (OP_ISBRACND(ops[i].op))
  3002. drcf.pending_branch_indirect = 1; // conditions g,h - cond.branch
  3003. // poll/idle loops terminate with their backwards branch to the loop start
  3004. if (drcf.pending_branch_direct && !(op_flags[i+1] & OF_DELAY_OP)) {
  3005. m2 &= ~(m1 | BITMASK3(SHR_PC, SHR_SR, SHR_T)); // conditions d,e + g,h
  3006. if (m2 || ((op == OF_IDLE_LOOP) == (drcf.pending_branch_indirect)))
  3007. op = 0; // conditions not met
  3008. op_flags[v] = (op_flags[v] & ~OF_LOOP) | op; // set loop type
  3009. drcf.loop_type = 0;
  3010. #if LOOP_OPTIMIZER
  3011. if (op_flags[v] & OF_BASIC_LOOP) {
  3012. m3 &= ~rcache_regs_static & ~BITMASK5(SHR_PC, SHR_PR, SHR_SR, SHR_T, SHR_MEM);
  3013. if (m3 && count_bits(m3) < count_bits(rcache_vregs_reg) &&
  3014. pinned_loop_count < ARRAY_SIZE(pinned_loops)-1) {
  3015. pinned_loops[pinned_loop_count++] =
  3016. (struct linkage) { .pc = base_pc + 2*v, .mask = m3 };
  3017. } else
  3018. op_flags[v] &= ~OF_BASIC_LOOP;
  3019. }
  3020. #endif
  3021. }
  3022. }
  3023. #endif
  3024. }
  3025. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2, branch_target_count);
  3026. #if (DRC_DEBUG & 4)
  3027. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  3028. #endif
  3029. block = dr_add_block(branch_target_count, base_pc, end_pc - base_pc,
  3030. base_literals, end_literals-base_literals, crc, sh2->is_slave, &blkid_main);
  3031. if (block == NULL)
  3032. return NULL;
  3033. block_entry_ptr = tcache_ptr;
  3034. dbg(2, "== %csh2 block #%d,%d %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  3035. tcache_id, blkid_main, base_pc, end_pc, base_literals, end_literals, block_entry_ptr);
  3036. // clear stale state after compile errors
  3037. rcache_invalidate();
  3038. emith_invalidate_t();
  3039. drcf = (struct drcf) { 0 };
  3040. #if LOOP_OPTIMIZER
  3041. pinned_loops[pinned_loop_count].pc = -1;
  3042. pinned_loop_count = 0;
  3043. #endif
  3044. // -------------------------------------------------
  3045. // 3rd pass: actual compilation
  3046. pc = base_pc;
  3047. cycles = 0;
  3048. for (i = 0; pc < end_pc; i++)
  3049. {
  3050. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  3051. int tmp3, tmp4;
  3052. int sr;
  3053. if (op_flags[i] & OF_BTARGET)
  3054. {
  3055. if (pc != base_pc)
  3056. {
  3057. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3058. FLUSH_CYCLES(sr);
  3059. emith_sync_t(sr);
  3060. drcf.Mflag = FLG_UNKNOWN;
  3061. rcache_flush();
  3062. emith_flush();
  3063. }
  3064. // make block entry
  3065. v = block->entry_count;
  3066. entry = &block->entryp[v];
  3067. if (v < branch_target_count)
  3068. {
  3069. entry = &block->entryp[v];
  3070. entry->pc = pc;
  3071. entry->tcache_ptr = tcache_ptr;
  3072. entry->links = entry->o_links = NULL;
  3073. #if (DRC_DEBUG & 2)
  3074. entry->block = block;
  3075. #endif
  3076. block->entry_count++;
  3077. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  3078. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  3079. pc, tcache_ptr);
  3080. }
  3081. else {
  3082. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  3083. tcache_id, blkid_main, pc);
  3084. break;
  3085. }
  3086. v = find_in_sorted_linkage(branch_targets, branch_target_count, pc);
  3087. if (v >= 0)
  3088. branch_targets[v].ptr = tcache_ptr;
  3089. #if LOOP_DETECTION
  3090. drcf.loop_type = op_flags[i] & OF_LOOP;
  3091. drcf.delay_reg = -1;
  3092. drcf.polling = (drcf.loop_type == OF_POLL_LOOP ? MF_POLLING : 0);
  3093. #endif
  3094. rcache_clean();
  3095. #if (DRC_DEBUG & 0x10)
  3096. tmp = rcache_get_tmp_arg(0);
  3097. emith_move_r_imm(tmp, pc);
  3098. tmp = emit_memhandler_read(1);
  3099. tmp2 = rcache_get_tmp();
  3100. tmp3 = rcache_get_tmp();
  3101. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  3102. emith_move_r_imm(tmp3, 0);
  3103. emith_cmp_r_r(tmp, tmp2);
  3104. EMITH_SJMP_START(DCOND_EQ);
  3105. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  3106. EMITH_SJMP_END(DCOND_EQ);
  3107. rcache_free_tmp(tmp);
  3108. rcache_free_tmp(tmp2);
  3109. rcache_free_tmp(tmp3);
  3110. #endif
  3111. // check cycles
  3112. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3113. #if LOOP_OPTIMIZER
  3114. if (op_flags[i] & OF_BASIC_LOOP) {
  3115. if (pinned_loops[pinned_loop_count].pc == pc) {
  3116. // pin needed regs on loop entry
  3117. FOR_ALL_BITS_SET_DO(pinned_loops[pinned_loop_count].mask, v, rcache_pin_reg(v));
  3118. emith_flush();
  3119. // store current PC as loop target
  3120. pinned_loops[pinned_loop_count].ptr = tcache_ptr;
  3121. drcf.pinning = 1;
  3122. } else
  3123. op_flags[i] &= ~OF_BASIC_LOOP;
  3124. }
  3125. if (op_flags[i] & OF_BASIC_LOOP) {
  3126. // if exiting a pinned loop pinned regs must be written back to ctx
  3127. // since they are reloaded in the loop entry code
  3128. emith_cmp_r_imm(sr, 0);
  3129. EMITH_JMP_START(DCOND_GE);
  3130. rcache_save_pinned();
  3131. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3132. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3133. blx_targets[blx_target_count++] =
  3134. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3135. emith_jump_patchable(tcache_ptr);
  3136. } else {
  3137. // blx table full, must inline exit code
  3138. tmp = rcache_get_tmp_arg(0);
  3139. emith_move_r_imm(tmp, pc);
  3140. emith_jump(sh2_drc_exit);
  3141. rcache_free_tmp(tmp);
  3142. }
  3143. EMITH_JMP_END(DCOND_GT);
  3144. } else
  3145. #endif
  3146. {
  3147. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3148. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3149. emith_cmp_r_imm(sr, 0);
  3150. blx_targets[blx_target_count++] =
  3151. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3152. emith_jump_cond_patchable(DCOND_LT, tcache_ptr);
  3153. } else {
  3154. // blx table full, must inline exit code
  3155. tmp = rcache_get_tmp_arg(0);
  3156. emith_cmp_r_imm(sr, 0);
  3157. EMITH_SJMP_START(DCOND_GT);
  3158. emith_move_r_imm_c(DCOND_LT, tmp, pc);
  3159. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  3160. EMITH_SJMP_END(DCOND_GT);
  3161. rcache_free_tmp(tmp);
  3162. }
  3163. }
  3164. #if (DRC_DEBUG & 32)
  3165. // block hit counter
  3166. tmp = rcache_get_tmp_arg(0);
  3167. tmp2 = rcache_get_tmp_arg(1);
  3168. emith_move_r_ptr_imm(tmp, (uptr)entry);
  3169. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3170. emith_add_r_imm(tmp2, 1);
  3171. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3172. rcache_free_tmp(tmp);
  3173. rcache_free_tmp(tmp2);
  3174. #endif
  3175. #if (DRC_DEBUG & (8|256|512|1024))
  3176. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3177. emith_sync_t(sr);
  3178. rcache_clean();
  3179. tmp = rcache_used_hregs_mask();
  3180. emith_save_caller_regs(tmp);
  3181. emit_do_static_regs(1, 0);
  3182. rcache_get_reg_arg(2, SHR_SR, NULL);
  3183. tmp2 = rcache_get_tmp_arg(0);
  3184. tmp3 = rcache_get_tmp_arg(1);
  3185. tmp4 = rcache_get_tmp();
  3186. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  3187. emith_move_r_r_ptr(tmp3, CONTEXT_REG);
  3188. emith_move_r_imm(tmp4, pc);
  3189. emith_ctx_write(tmp4, SHR_PC * 4);
  3190. rcache_invalidate_tmp();
  3191. emith_abicall(sh2_drc_log_entry);
  3192. emith_restore_caller_regs(tmp);
  3193. #endif
  3194. do_host_disasm(tcache_id);
  3195. rcache_unlock_all();
  3196. }
  3197. #ifdef DRC_CMP
  3198. if (!(op_flags[i] & OF_DELAY_OP)) {
  3199. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3200. FLUSH_CYCLES(sr);
  3201. emith_sync_t(sr);
  3202. emit_move_r_imm32(SHR_PC, pc);
  3203. rcache_clean();
  3204. tmp = rcache_used_hregs_mask();
  3205. emith_save_caller_regs(tmp);
  3206. emit_do_static_regs(1, 0);
  3207. emith_pass_arg_r(0, CONTEXT_REG);
  3208. emith_abicall(do_sh2_cmp);
  3209. emith_restore_caller_regs(tmp);
  3210. }
  3211. #endif
  3212. // emit blx area if limits are approached
  3213. if (blx_target_count && (blx_target_count > ARRAY_SIZE(blx_targets)-4 ||
  3214. !emith_jump_patch_inrange(blx_targets[0].ptr, tcache_ptr+0x100))) {
  3215. u8 *jp;
  3216. rcache_invalidate_tmp();
  3217. jp = tcache_ptr;
  3218. emith_jump_patchable(tcache_ptr);
  3219. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  3220. branch_target_count, blx_targets, blx_target_count);
  3221. blx_target_count = 0;
  3222. do_host_disasm(tcache_id);
  3223. emith_jump_patch(jp, tcache_ptr, NULL);
  3224. }
  3225. emith_pool_check();
  3226. opd = &ops[i];
  3227. op = FETCH_OP(pc);
  3228. #if (DRC_DEBUG & 4)
  3229. DasmSH2(sh2dasm_buff, pc, op);
  3230. if (op_flags[i] & OF_BTARGET) {
  3231. if ((op_flags[i] & OF_LOOP) == OF_DELAY_LOOP) tmp3 = '+';
  3232. else if ((op_flags[i] & OF_LOOP) == OF_POLL_LOOP) tmp3 = '=';
  3233. else if ((op_flags[i] & OF_LOOP) == OF_IDLE_LOOP) tmp3 = '~';
  3234. else tmp3 = '*';
  3235. } else if (drcf.loop_type) tmp3 = '.';
  3236. else tmp3 = ' ';
  3237. printf("%c%08lx %04x %s\n", tmp3, (ulong)pc, op, sh2dasm_buff);
  3238. #endif
  3239. pc += 2;
  3240. #if (DRC_DEBUG & 2)
  3241. insns_compiled++;
  3242. #endif
  3243. if (skip_op > 0) {
  3244. skip_op--;
  3245. continue;
  3246. }
  3247. if (op_flags[i] & OF_DELAY_OP)
  3248. {
  3249. // handle delay slot dependencies
  3250. delay_dep_fw = opd->dest & ops[i-1].source;
  3251. delay_dep_bk = opd->source & ops[i-1].dest;
  3252. if (delay_dep_fw & BITMASK1(SHR_T)) {
  3253. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3254. emith_sync_t(sr);
  3255. DELAY_SAVE_T(sr);
  3256. }
  3257. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  3258. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  3259. // can only be those 2 really..
  3260. elprintf_sh2(sh2, EL_ANOMALY,
  3261. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  3262. }
  3263. // store PC for MOVA/MOV @PC address calculation
  3264. if (opd->imm != 0)
  3265. ; // case OP_BRANCH - addr already resolved in scan_block
  3266. else {
  3267. switch (ops[i-1].op) {
  3268. case OP_BRANCH:
  3269. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  3270. break;
  3271. case OP_BRANCH_CT:
  3272. case OP_BRANCH_CF:
  3273. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3274. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3275. emith_move_r_imm(tmp, pc);
  3276. tmp2 = emith_tst_t(sr, (ops[i-1].op == OP_BRANCH_CT));
  3277. tmp3 = emith_invert_cond(tmp2);
  3278. EMITH_SJMP_START(tmp3);
  3279. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  3280. EMITH_SJMP_END(tmp3);
  3281. break;
  3282. case OP_BRANCH_N: // BT/BF known not to be taken
  3283. // XXX could modify opd->imm instead?
  3284. emit_move_r_imm32(SHR_PC, pc);
  3285. break;
  3286. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  3287. }
  3288. }
  3289. }
  3290. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  3291. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  3292. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  3293. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  3294. }
  3295. // inform cache about future register usage
  3296. u32 late = 0; // regs read by future ops
  3297. u32 write = 0; // regs written to (to detect write before read)
  3298. u32 soon = 0; // regs read soon
  3299. for (v = 1; v <= 9; v++) {
  3300. // no sense in looking any further than the next rcache flush
  3301. tmp = ((op_flags[i+v] & OF_BTARGET) || (op_flags[i+v-1] & OF_DELAY_OP) ||
  3302. (OP_ISBRACND(opd[v-1].op) && !(op_flags[i+v] & OF_DELAY_OP)));
  3303. // XXX looking behind cond branch to avoid evicting regs used later?
  3304. if (pc + 2*v <= end_pc && !tmp) { // (pc already incremented above)
  3305. late |= opd[v].source & ~write;
  3306. // ignore source regs after they have been written to
  3307. write |= opd[v].dest;
  3308. // regs needed in the next few instructions
  3309. if (v <= 4)
  3310. soon = late;
  3311. } else
  3312. break;
  3313. }
  3314. rcache_set_usage_now(opd[0].source); // current insn
  3315. rcache_set_usage_soon(soon); // insns 1-4
  3316. rcache_set_usage_late(late & ~soon); // insns 5-9
  3317. rcache_set_usage_discard(write & ~(late|soon));
  3318. if (v <= 9)
  3319. // upcoming rcache_flush, start writing back unused dirty stuff
  3320. rcache_clean_masked(rcache_dirty_mask() & ~(write|opd[0].dest));
  3321. switch (opd->op)
  3322. {
  3323. case OP_BRANCH_N:
  3324. // never taken, just use up cycles
  3325. goto end_op;
  3326. case OP_BRANCH:
  3327. case OP_BRANCH_CT:
  3328. case OP_BRANCH_CF:
  3329. if (opd->dest & BITMASK1(SHR_PR))
  3330. emit_move_r_imm32(SHR_PR, pc + 2);
  3331. drcf.pending_branch_direct = 1;
  3332. goto end_op;
  3333. case OP_BRANCH_R:
  3334. if (opd->dest & BITMASK1(SHR_PR))
  3335. emit_move_r_imm32(SHR_PR, pc + 2);
  3336. emit_move_r_r(SHR_PC, opd->rm);
  3337. drcf.pending_branch_indirect = 1;
  3338. goto end_op;
  3339. case OP_BRANCH_RF:
  3340. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3341. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3342. emith_move_r_imm(tmp, pc + 2);
  3343. if (opd->dest & BITMASK1(SHR_PR)) {
  3344. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  3345. emith_move_r_r(tmp3, tmp);
  3346. }
  3347. emith_add_r_r(tmp, tmp2);
  3348. if (gconst_get(GET_Rn(), &u))
  3349. gconst_set(SHR_PC, pc + 2 + u);
  3350. drcf.pending_branch_indirect = 1;
  3351. goto end_op;
  3352. case OP_SLEEP: // SLEEP 0000000000011011
  3353. printf("TODO sleep\n");
  3354. goto end_op;
  3355. case OP_RTE: // RTE 0000000000101011
  3356. emith_invalidate_t();
  3357. // pop PC
  3358. tmp = emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  3359. rcache_free(tmp);
  3360. // pop SR
  3361. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  3362. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3363. emith_write_sr(sr, tmp);
  3364. rcache_free_tmp(tmp);
  3365. drcf.test_irq = 1;
  3366. drcf.pending_branch_indirect = 1;
  3367. goto end_op;
  3368. case OP_UNDEFINED:
  3369. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  3370. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  3371. // fallthrough
  3372. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  3373. // push SR
  3374. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  3375. emith_sync_t(tmp2);
  3376. emith_clear_msb(tmp, tmp2, 22);
  3377. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3378. // push PC
  3379. if (opd->op == OP_TRAPA) {
  3380. tmp = rcache_get_tmp_arg(1);
  3381. emith_move_r_imm(tmp, pc);
  3382. } else if (drcf.pending_branch_indirect) {
  3383. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  3384. } else {
  3385. tmp = rcache_get_tmp_arg(1);
  3386. emith_move_r_imm(tmp, pc - 2);
  3387. }
  3388. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3389. // obtain new PC
  3390. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  3391. // indirect jump -> back to dispatcher
  3392. drcf.pending_branch_indirect = 1;
  3393. goto end_op;
  3394. case OP_LOAD_POOL:
  3395. #if PROPAGATE_CONSTANTS
  3396. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  3397. dr_is_rom(opd->imm))
  3398. {
  3399. if (opd->size == 2)
  3400. u = FETCH32(opd->imm);
  3401. else
  3402. u = (s16)FETCH_OP(opd->imm);
  3403. // tweak for Blackthorne: avoid stack overwriting
  3404. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  3405. gconst_new(GET_Rn(), u);
  3406. }
  3407. else
  3408. #endif
  3409. {
  3410. if (opd->imm != 0) {
  3411. tmp = rcache_get_tmp_arg(0);
  3412. emith_move_r_imm(tmp, opd->imm);
  3413. } else {
  3414. // have to calculate read addr from PC for delay slot
  3415. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  3416. if (opd->size == 2) {
  3417. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3418. emith_bic_r_imm(tmp, 3);
  3419. }
  3420. else
  3421. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  3422. }
  3423. tmp2 = emit_memhandler_read(opd->size);
  3424. tmp3 = rcache_map_reg(GET_Rn(), tmp2);
  3425. if (tmp3 != tmp2) {
  3426. emith_move_r_r(tmp3, tmp2);
  3427. rcache_free_tmp(tmp2);
  3428. }
  3429. }
  3430. goto end_op;
  3431. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  3432. if (opd->imm != 0)
  3433. emit_move_r_imm32(SHR_R0, opd->imm);
  3434. else {
  3435. // have to calculate addr from PC for delay slot
  3436. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  3437. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  3438. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3439. emith_bic_r_imm(tmp, 3);
  3440. }
  3441. goto end_op;
  3442. }
  3443. switch ((op >> 12) & 0x0f)
  3444. {
  3445. /////////////////////////////////////////////
  3446. case 0x00:
  3447. switch (op & 0x0f)
  3448. {
  3449. case 0x02:
  3450. switch (GET_Fx())
  3451. {
  3452. case 0: // STC SR,Rn 0000nnnn00000010
  3453. tmp2 = SHR_SR;
  3454. break;
  3455. case 1: // STC GBR,Rn 0000nnnn00010010
  3456. tmp2 = SHR_GBR;
  3457. break;
  3458. case 2: // STC VBR,Rn 0000nnnn00100010
  3459. tmp2 = SHR_VBR;
  3460. break;
  3461. default:
  3462. goto default_;
  3463. }
  3464. if (tmp2 == SHR_SR) {
  3465. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3466. emith_sync_t(sr);
  3467. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3468. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  3469. } else
  3470. emit_move_r_r(GET_Rn(), tmp2);
  3471. goto end_op;
  3472. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  3473. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  3474. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  3475. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  3476. goto end_op;
  3477. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  3478. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3479. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3480. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3481. emith_mul(tmp3, tmp2, tmp);
  3482. goto end_op;
  3483. case 0x08:
  3484. switch (GET_Fx())
  3485. {
  3486. case 0: // CLRT 0000000000001000
  3487. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3488. #if T_OPTIMIZER
  3489. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3490. #endif
  3491. emith_set_t(sr, 0);
  3492. break;
  3493. case 1: // SETT 0000000000011000
  3494. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3495. #if T_OPTIMIZER
  3496. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3497. #endif
  3498. emith_set_t(sr, 1);
  3499. break;
  3500. case 2: // CLRMAC 0000000000101000
  3501. emit_move_r_imm32(SHR_MACL, 0);
  3502. emit_move_r_imm32(SHR_MACH, 0);
  3503. break;
  3504. default:
  3505. goto default_;
  3506. }
  3507. goto end_op;
  3508. case 0x09:
  3509. switch (GET_Fx())
  3510. {
  3511. case 0: // NOP 0000000000001001
  3512. break;
  3513. case 1: // DIV0U 0000000000011001
  3514. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3515. emith_invalidate_t();
  3516. emith_bic_r_imm(sr, M|Q|T);
  3517. drcf.Mflag = FLG_0;
  3518. #if DIV_OPTIMIZER
  3519. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3520. // divide 32/16
  3521. tmp = rcache_get_tmp_arg(1);
  3522. emith_add_r_r_ptr_imm(tmp, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3523. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3524. rcache_get_reg_arg(2, div(opd).rm, NULL);
  3525. rcache_invalidate_tmp();
  3526. emith_abicall(sh2_drc_divu32);
  3527. tmp = rcache_get_tmp_ret();
  3528. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3529. if (tmp != tmp2)
  3530. emith_move_r_r(tmp2, tmp);
  3531. tmp3 = rcache_get_tmp();
  3532. emith_and_r_r_imm(tmp3, tmp2, 1); // Q = !Rn[0]
  3533. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3534. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3535. emith_ctx_read(tmp3, offsetof(SH2, drc_tmp));
  3536. emith_or_r_r_r(sr, sr, tmp3); // T
  3537. rcache_free_tmp(tmp3);
  3538. skip_op = div(opd).div1 + div(opd).rotcl;
  3539. }
  3540. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3541. // divide 64/32
  3542. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3543. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3544. tmp = rcache_get_tmp_arg(1);
  3545. emith_add_r_r_ptr_imm(tmp, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3546. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3547. rcache_get_reg_arg(2, div(opd).rm, NULL);
  3548. rcache_invalidate_tmp();
  3549. emith_abicall(sh2_drc_divu64);
  3550. tmp = rcache_get_tmp_ret();
  3551. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3552. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3553. if (tmp != tmp2)
  3554. emith_move_r_r(tmp2, tmp);
  3555. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3556. tmp3 = rcache_get_tmp();
  3557. emith_and_r_r_imm(tmp3, tmp4, 1); // Q = !Ro[0]
  3558. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3559. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3560. rcache_free_tmp(tmp3);
  3561. skip_op = div(opd).div1 + div(opd).rotcl;
  3562. }
  3563. #endif
  3564. break;
  3565. case 2: // MOVT Rn 0000nnnn00101001
  3566. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3567. emith_sync_t(sr);
  3568. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3569. emith_clear_msb(tmp2, sr, 31);
  3570. break;
  3571. default:
  3572. goto default_;
  3573. }
  3574. goto end_op;
  3575. case 0x0a:
  3576. switch (GET_Fx())
  3577. {
  3578. case 0: // STS MACH,Rn 0000nnnn00001010
  3579. tmp2 = SHR_MACH;
  3580. break;
  3581. case 1: // STS MACL,Rn 0000nnnn00011010
  3582. tmp2 = SHR_MACL;
  3583. break;
  3584. case 2: // STS PR,Rn 0000nnnn00101010
  3585. tmp2 = SHR_PR;
  3586. break;
  3587. default:
  3588. goto default_;
  3589. }
  3590. emit_move_r_r(GET_Rn(), tmp2);
  3591. goto end_op;
  3592. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  3593. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  3594. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  3595. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), (op & 3) | drcf.polling);
  3596. goto end_op;
  3597. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  3598. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  3599. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3600. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3601. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3602. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  3603. rcache_free_tmp(tmp2);
  3604. rcache_free_tmp(tmp);
  3605. goto end_op;
  3606. }
  3607. goto default_;
  3608. /////////////////////////////////////////////
  3609. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  3610. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  3611. goto end_op;
  3612. case 0x02:
  3613. switch (op & 0x0f)
  3614. {
  3615. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  3616. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  3617. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  3618. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  3619. goto end_op;
  3620. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  3621. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  3622. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  3623. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  3624. goto end_op;
  3625. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  3626. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3627. emith_invalidate_t();
  3628. emith_bic_r_imm(sr, M|Q|T);
  3629. drcf.Mflag = FLG_UNKNOWN;
  3630. #if DIV_OPTIMIZER
  3631. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3632. // divide 32/16
  3633. tmp = rcache_get_tmp_arg(1);
  3634. emith_add_r_r_ptr_imm(tmp, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3635. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3636. tmp2 = rcache_get_reg_arg(2, div(opd).rm, NULL);
  3637. tmp3 = rcache_get_tmp();
  3638. emith_lsr(tmp3, tmp2, 31);
  3639. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3640. rcache_invalidate_tmp();
  3641. emith_abicall(sh2_drc_divs32);
  3642. tmp = rcache_get_tmp_ret();
  3643. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3644. if (tmp != tmp2)
  3645. emith_move_r_r(tmp2, tmp);
  3646. tmp3 = rcache_get_tmp();
  3647. emith_eor_r_r_r_lsr(tmp3, tmp2, sr, M_SHIFT);
  3648. emith_and_r_r_imm(tmp3, tmp3, 1);
  3649. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3650. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Rn[0]^M
  3651. emith_ctx_read(tmp3, offsetof(SH2, drc_tmp));
  3652. emith_or_r_r_r(sr, sr, tmp3); // T
  3653. rcache_free_tmp(tmp3);
  3654. skip_op = div(opd).div1 + div(opd).rotcl;
  3655. }
  3656. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3657. // divide 64/32
  3658. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3659. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3660. tmp = rcache_get_reg_arg(0, div(opd).rn, NULL);
  3661. tmp2 = rcache_get_reg_arg(2, div(opd).rm, NULL);
  3662. tmp3 = rcache_get_tmp_arg(1);
  3663. emith_lsr(tmp3, tmp2, 31);
  3664. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3665. emith_eor_r_r_lsr(tmp3, tmp, 31);
  3666. emith_or_r_r(sr, tmp3); // T = Rn[31]^M
  3667. emith_add_r_r_ptr_imm(tmp3, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3668. rcache_invalidate_tmp();
  3669. emith_abicall(sh2_drc_divs64);
  3670. tmp = rcache_get_tmp_ret();
  3671. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3672. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3673. if (tmp != tmp2)
  3674. emith_move_r_r(tmp2, tmp);
  3675. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3676. tmp3 = rcache_get_tmp();
  3677. emith_eor_r_r_r_lsr(tmp3, tmp4, sr, M_SHIFT);
  3678. emith_and_r_r_imm(tmp3, tmp3, 1);
  3679. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3680. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Ro[0]^M
  3681. rcache_free_tmp(tmp3);
  3682. skip_op = div(opd).div1 + div(opd).rotcl;
  3683. } else
  3684. #endif
  3685. {
  3686. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3687. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3688. tmp = rcache_get_tmp();
  3689. emith_lsr(tmp, tmp2, 31); // Q = Nn
  3690. emith_or_r_r_lsl(sr, tmp, Q_SHIFT);
  3691. emith_lsr(tmp, tmp3, 31); // M = Nm
  3692. emith_or_r_r_lsl(sr, tmp, M_SHIFT);
  3693. emith_eor_r_r_lsr(tmp, tmp2, 31);
  3694. emith_or_r_r(sr, tmp); // T = Q^M
  3695. rcache_free(tmp);
  3696. }
  3697. goto end_op;
  3698. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  3699. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3700. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3701. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3702. emith_clr_t_cond(sr);
  3703. emith_tst_r_r(tmp2, tmp3);
  3704. emith_set_t_cond(sr, DCOND_EQ);
  3705. goto end_op;
  3706. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  3707. if (GET_Rm() != GET_Rn()) {
  3708. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3709. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3710. emith_and_r_r_r(tmp, tmp3, tmp2);
  3711. }
  3712. goto end_op;
  3713. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  3714. #if PROPAGATE_CONSTANTS
  3715. if (GET_Rn() == GET_Rm()) {
  3716. gconst_new(GET_Rn(), 0);
  3717. goto end_op;
  3718. }
  3719. #endif
  3720. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3721. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3722. emith_eor_r_r_r(tmp, tmp3, tmp2);
  3723. goto end_op;
  3724. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  3725. if (GET_Rm() != GET_Rn()) {
  3726. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3727. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3728. emith_or_r_r_r(tmp, tmp3, tmp2);
  3729. }
  3730. goto end_op;
  3731. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  3732. tmp = rcache_get_tmp();
  3733. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3734. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3735. emith_eor_r_r_r(tmp, tmp2, tmp3);
  3736. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3737. emith_clr_t_cond(sr);
  3738. emith_tst_r_imm(tmp, 0x000000ff);
  3739. EMITH_SJMP_START(DCOND_EQ);
  3740. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  3741. EMITH_SJMP_START(DCOND_EQ);
  3742. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  3743. EMITH_SJMP_START(DCOND_EQ);
  3744. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  3745. EMITH_SJMP_END(DCOND_EQ);
  3746. EMITH_SJMP_END(DCOND_EQ);
  3747. EMITH_SJMP_END(DCOND_EQ);
  3748. emith_set_t_cond(sr, DCOND_EQ);
  3749. rcache_free_tmp(tmp);
  3750. goto end_op;
  3751. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  3752. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3753. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3754. emith_lsr(tmp, tmp3, 16);
  3755. emith_or_r_r_lsl(tmp, tmp2, 16);
  3756. goto end_op;
  3757. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  3758. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  3759. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3760. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3761. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3762. tmp4 = tmp3;
  3763. if (op & 1) {
  3764. if (! rcache_is_s16(tmp2)) {
  3765. emith_sext(tmp, tmp2, 16);
  3766. tmp2 = tmp;
  3767. }
  3768. if (! rcache_is_s16(tmp3)) {
  3769. tmp4 = rcache_get_tmp();
  3770. emith_sext(tmp4, tmp3, 16);
  3771. }
  3772. } else {
  3773. if (! rcache_is_u16(tmp2)) {
  3774. emith_clear_msb(tmp, tmp2, 16);
  3775. tmp2 = tmp;
  3776. }
  3777. if (! rcache_is_u16(tmp3)) {
  3778. tmp4 = rcache_get_tmp();
  3779. emith_clear_msb(tmp4, tmp3, 16);
  3780. }
  3781. }
  3782. emith_mul(tmp, tmp2, tmp4);
  3783. if (tmp4 != tmp3)
  3784. rcache_free_tmp(tmp4);
  3785. goto end_op;
  3786. }
  3787. goto default_;
  3788. /////////////////////////////////////////////
  3789. case 0x03:
  3790. switch (op & 0x0f)
  3791. {
  3792. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  3793. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  3794. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  3795. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  3796. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  3797. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3798. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3799. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3800. switch (op & 0x07)
  3801. {
  3802. case 0x00: // CMP/EQ
  3803. tmp = DCOND_EQ;
  3804. break;
  3805. case 0x02: // CMP/HS
  3806. tmp = DCOND_HS;
  3807. break;
  3808. case 0x03: // CMP/GE
  3809. tmp = DCOND_GE;
  3810. break;
  3811. case 0x06: // CMP/HI
  3812. tmp = DCOND_HI;
  3813. break;
  3814. case 0x07: // CMP/GT
  3815. tmp = DCOND_GT;
  3816. break;
  3817. }
  3818. emith_clr_t_cond(sr);
  3819. emith_cmp_r_r(tmp2, tmp3);
  3820. emith_set_t_cond(sr, tmp);
  3821. goto end_op;
  3822. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  3823. // Q1 = carry(Rn = (Rn << 1) | T)
  3824. // if Q ^ M
  3825. // Q2 = carry(Rn += Rm)
  3826. // else
  3827. // Q2 = carry(Rn -= Rm)
  3828. // Q = M ^ Q1 ^ Q2
  3829. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  3830. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3831. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3832. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3833. emith_sync_t(sr);
  3834. tmp = rcache_get_tmp();
  3835. if (drcf.Mflag != FLG_0) {
  3836. emith_and_r_r_imm(tmp, sr, M);
  3837. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT); // Q ^= M
  3838. }
  3839. rcache_free_tmp(tmp);
  3840. // shift Rn, add T, add or sub Rm, set T = !(Q1 ^ Q2)
  3841. // in: (Q ^ M) passed in Q
  3842. emith_sh2_div1_step(tmp2, tmp3, sr);
  3843. tmp = rcache_get_tmp();
  3844. emith_or_r_imm(sr, Q); // Q = !T
  3845. emith_and_r_r_imm(tmp, sr, T);
  3846. emith_eor_r_r_lsl(sr, tmp, Q_SHIFT);
  3847. if (drcf.Mflag != FLG_0) { // Q = M ^ !T = M ^ Q1 ^ Q2
  3848. emith_and_r_r_imm(tmp, sr, M);
  3849. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT);
  3850. }
  3851. rcache_free_tmp(tmp);
  3852. goto end_op;
  3853. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  3854. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3855. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3856. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3857. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3858. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  3859. goto end_op;
  3860. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  3861. #if PROPAGATE_CONSTANTS
  3862. if (GET_Rn() == GET_Rm()) {
  3863. gconst_new(GET_Rn(), 0);
  3864. goto end_op;
  3865. }
  3866. #endif
  3867. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  3868. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3869. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3870. if (op & 4) {
  3871. emith_add_r_r_r(tmp, tmp3, tmp2);
  3872. } else
  3873. emith_sub_r_r_r(tmp, tmp3, tmp2);
  3874. goto end_op;
  3875. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  3876. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  3877. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3878. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3879. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3880. emith_sync_t(sr);
  3881. #if T_OPTIMIZER
  3882. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3883. if (op & 4) {
  3884. emith_t_to_carry(sr, 0);
  3885. emith_adc_r_r_r(tmp, tmp3, tmp2);
  3886. } else {
  3887. emith_t_to_carry(sr, 1);
  3888. emith_sbc_r_r_r(tmp, tmp3, tmp2);
  3889. }
  3890. } else
  3891. #endif
  3892. {
  3893. EMITH_HINT_COND(DCOND_CS);
  3894. if (op & 4) { // adc
  3895. emith_tpop_carry(sr, 0);
  3896. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  3897. emith_tpush_carry(sr, 0);
  3898. } else {
  3899. emith_tpop_carry(sr, 1);
  3900. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  3901. emith_tpush_carry(sr, 1);
  3902. }
  3903. }
  3904. goto end_op;
  3905. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  3906. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  3907. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3908. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3909. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3910. #if T_OPTIMIZER
  3911. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3912. if (op & 4)
  3913. emith_add_r_r_r(tmp,tmp3,tmp2);
  3914. else
  3915. emith_sub_r_r_r(tmp,tmp3,tmp2);
  3916. } else
  3917. #endif
  3918. {
  3919. emith_clr_t_cond(sr);
  3920. EMITH_HINT_COND(DCOND_VS);
  3921. if (op & 4)
  3922. emith_addf_r_r_r(tmp, tmp3, tmp2);
  3923. else
  3924. emith_subf_r_r_r(tmp, tmp3, tmp2);
  3925. emith_set_t_cond(sr, DCOND_VS);
  3926. }
  3927. goto end_op;
  3928. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  3929. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3930. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3931. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3932. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3933. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  3934. goto end_op;
  3935. }
  3936. goto default_;
  3937. /////////////////////////////////////////////
  3938. case 0x04:
  3939. switch (op & 0x0f)
  3940. {
  3941. case 0x00:
  3942. switch (GET_Fx())
  3943. {
  3944. case 0: // SHLL Rn 0100nnnn00000000
  3945. case 2: // SHAL Rn 0100nnnn00100000
  3946. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3947. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3948. #if T_OPTIMIZER
  3949. if (rcache_regs_discard & BITMASK1(SHR_T))
  3950. emith_lsl(tmp, tmp2, 1);
  3951. else
  3952. #endif
  3953. {
  3954. emith_invalidate_t();
  3955. emith_lslf(tmp, tmp2, 1);
  3956. emith_carry_to_t(sr, 0);
  3957. }
  3958. goto end_op;
  3959. case 1: // DT Rn 0100nnnn00010000
  3960. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3961. #if LOOP_DETECTION
  3962. if (drcf.loop_type == OF_DELAY_LOOP) {
  3963. if (drcf.delay_reg == -1)
  3964. drcf.delay_reg = GET_Rn();
  3965. else
  3966. drcf.polling = drcf.loop_type = 0;
  3967. }
  3968. #endif
  3969. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3970. emith_clr_t_cond(sr);
  3971. EMITH_HINT_COND(DCOND_EQ);
  3972. emith_subf_r_r_imm(tmp, tmp2, 1);
  3973. emith_set_t_cond(sr, DCOND_EQ);
  3974. emith_or_r_imm(sr, SH2_NO_POLLING);
  3975. goto end_op;
  3976. }
  3977. goto default_;
  3978. case 0x01:
  3979. switch (GET_Fx())
  3980. {
  3981. case 0: // SHLR Rn 0100nnnn00000001
  3982. case 2: // SHAR Rn 0100nnnn00100001
  3983. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3984. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3985. #if T_OPTIMIZER
  3986. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3987. if (op & 0x20)
  3988. emith_asr(tmp,tmp2,1);
  3989. else
  3990. emith_lsr(tmp,tmp2,1);
  3991. } else
  3992. #endif
  3993. {
  3994. emith_invalidate_t();
  3995. if (op & 0x20) {
  3996. emith_asrf(tmp, tmp2, 1);
  3997. } else
  3998. emith_lsrf(tmp, tmp2, 1);
  3999. emith_carry_to_t(sr, 0);
  4000. }
  4001. goto end_op;
  4002. case 1: // CMP/PZ Rn 0100nnnn00010001
  4003. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4004. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4005. emith_clr_t_cond(sr);
  4006. emith_cmp_r_imm(tmp, 0);
  4007. emith_set_t_cond(sr, DCOND_GE);
  4008. goto end_op;
  4009. }
  4010. goto default_;
  4011. case 0x02:
  4012. case 0x03:
  4013. switch (op & 0x3f)
  4014. {
  4015. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  4016. tmp = SHR_MACH;
  4017. break;
  4018. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  4019. tmp = SHR_MACL;
  4020. break;
  4021. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  4022. tmp = SHR_PR;
  4023. break;
  4024. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  4025. tmp = SHR_SR;
  4026. break;
  4027. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  4028. tmp = SHR_GBR;
  4029. break;
  4030. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  4031. tmp = SHR_VBR;
  4032. break;
  4033. default:
  4034. goto default_;
  4035. }
  4036. if (tmp == SHR_SR) {
  4037. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  4038. emith_sync_t(tmp4);
  4039. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  4040. } else
  4041. tmp3 = rcache_get_reg_arg(1, tmp, NULL);
  4042. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  4043. goto end_op;
  4044. case 0x04:
  4045. case 0x05:
  4046. switch (op & 0x3f)
  4047. {
  4048. case 0x04: // ROTL Rn 0100nnnn00000100
  4049. case 0x05: // ROTR Rn 0100nnnn00000101
  4050. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  4051. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4052. #if T_OPTIMIZER
  4053. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4054. if (op & 1)
  4055. emith_ror(tmp, tmp2, 1);
  4056. else
  4057. emith_rol(tmp, tmp2, 1);
  4058. } else
  4059. #endif
  4060. {
  4061. emith_invalidate_t();
  4062. if (op & 1)
  4063. emith_rorf(tmp, tmp2, 1);
  4064. else
  4065. emith_rolf(tmp, tmp2, 1);
  4066. emith_carry_to_t(sr, 0);
  4067. }
  4068. goto end_op;
  4069. case 0x24: // ROTCL Rn 0100nnnn00100100
  4070. case 0x25: // ROTCR Rn 0100nnnn00100101
  4071. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  4072. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4073. emith_sync_t(sr);
  4074. #if T_OPTIMIZER
  4075. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4076. emith_t_to_carry(sr, 0);
  4077. if (op & 1)
  4078. emith_rorc(tmp);
  4079. else
  4080. emith_rolc(tmp);
  4081. } else
  4082. #endif
  4083. {
  4084. emith_tpop_carry(sr, 0);
  4085. if (op & 1)
  4086. emith_rorcf(tmp);
  4087. else
  4088. emith_rolcf(tmp);
  4089. emith_tpush_carry(sr, 0);
  4090. }
  4091. goto end_op;
  4092. case 0x15: // CMP/PL Rn 0100nnnn00010101
  4093. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4094. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4095. emith_clr_t_cond(sr);
  4096. emith_cmp_r_imm(tmp, 0);
  4097. emith_set_t_cond(sr, DCOND_GT);
  4098. goto end_op;
  4099. }
  4100. goto default_;
  4101. case 0x06:
  4102. case 0x07:
  4103. switch (op & 0x3f)
  4104. {
  4105. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  4106. tmp = SHR_MACH;
  4107. break;
  4108. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  4109. tmp = SHR_MACL;
  4110. break;
  4111. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  4112. tmp = SHR_PR;
  4113. break;
  4114. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  4115. tmp = SHR_SR;
  4116. break;
  4117. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  4118. tmp = SHR_GBR;
  4119. break;
  4120. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  4121. tmp = SHR_VBR;
  4122. break;
  4123. default:
  4124. goto default_;
  4125. }
  4126. if (tmp == SHR_SR) {
  4127. emith_invalidate_t();
  4128. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  4129. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4130. emith_write_sr(sr, tmp2);
  4131. rcache_free_tmp(tmp2);
  4132. drcf.test_irq = 1;
  4133. } else
  4134. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  4135. goto end_op;
  4136. case 0x08:
  4137. case 0x09:
  4138. switch (GET_Fx())
  4139. {
  4140. case 0: // SHLL2 Rn 0100nnnn00001000
  4141. // SHLR2 Rn 0100nnnn00001001
  4142. tmp = 2;
  4143. break;
  4144. case 1: // SHLL8 Rn 0100nnnn00011000
  4145. // SHLR8 Rn 0100nnnn00011001
  4146. tmp = 8;
  4147. break;
  4148. case 2: // SHLL16 Rn 0100nnnn00101000
  4149. // SHLR16 Rn 0100nnnn00101001
  4150. tmp = 16;
  4151. break;
  4152. default:
  4153. goto default_;
  4154. }
  4155. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  4156. if (op & 1) {
  4157. emith_lsr(tmp2, tmp3, tmp);
  4158. } else
  4159. emith_lsl(tmp2, tmp3, tmp);
  4160. goto end_op;
  4161. case 0x0a:
  4162. switch (GET_Fx())
  4163. {
  4164. case 0: // LDS Rm,MACH 0100mmmm00001010
  4165. tmp2 = SHR_MACH;
  4166. break;
  4167. case 1: // LDS Rm,MACL 0100mmmm00011010
  4168. tmp2 = SHR_MACL;
  4169. break;
  4170. case 2: // LDS Rm,PR 0100mmmm00101010
  4171. tmp2 = SHR_PR;
  4172. break;
  4173. default:
  4174. goto default_;
  4175. }
  4176. emit_move_r_r(tmp2, GET_Rn());
  4177. goto end_op;
  4178. case 0x0b:
  4179. switch (GET_Fx())
  4180. {
  4181. case 1: // TAS.B @Rn 0100nnnn00011011
  4182. // XXX: is TAS working on 32X?
  4183. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4184. tmp = emit_memhandler_read(0);
  4185. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4186. emith_clr_t_cond(sr);
  4187. emith_cmp_r_imm(tmp, 0);
  4188. emith_set_t_cond(sr, DCOND_EQ);
  4189. emith_or_r_imm(tmp, 0x80);
  4190. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  4191. emith_move_r_r(tmp2, tmp);
  4192. rcache_free_tmp(tmp);
  4193. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4194. emit_memhandler_write(0);
  4195. break;
  4196. default:
  4197. goto default_;
  4198. }
  4199. goto end_op;
  4200. case 0x0e:
  4201. switch (GET_Fx())
  4202. {
  4203. case 0: // LDC Rm,SR 0100mmmm00001110
  4204. tmp2 = SHR_SR;
  4205. break;
  4206. case 1: // LDC Rm,GBR 0100mmmm00011110
  4207. tmp2 = SHR_GBR;
  4208. break;
  4209. case 2: // LDC Rm,VBR 0100mmmm00101110
  4210. tmp2 = SHR_VBR;
  4211. break;
  4212. default:
  4213. goto default_;
  4214. }
  4215. if (tmp2 == SHR_SR) {
  4216. emith_invalidate_t();
  4217. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4218. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4219. emith_write_sr(sr, tmp);
  4220. drcf.test_irq = 1;
  4221. } else
  4222. emit_move_r_r(tmp2, GET_Rn());
  4223. goto end_op;
  4224. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  4225. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  4226. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4227. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  4228. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  4229. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  4230. rcache_free_tmp(tmp2);
  4231. rcache_free_tmp(tmp);
  4232. goto end_op;
  4233. }
  4234. goto default_;
  4235. /////////////////////////////////////////////
  4236. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  4237. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2 | drcf.polling);
  4238. goto end_op;
  4239. /////////////////////////////////////////////
  4240. case 0x06:
  4241. switch (op & 0x0f)
  4242. {
  4243. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  4244. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  4245. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  4246. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  4247. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  4248. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  4249. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : drcf.polling;
  4250. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  4251. goto end_op;
  4252. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  4253. emit_move_r_r(GET_Rn(), GET_Rm());
  4254. goto end_op;
  4255. default: // 0x07 ... 0x0f
  4256. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  4257. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  4258. switch (op & 0x0f)
  4259. {
  4260. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  4261. emith_mvn_r_r(tmp2, tmp);
  4262. break;
  4263. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  4264. tmp3 = tmp2;
  4265. if (tmp == tmp2)
  4266. tmp3 = rcache_get_tmp();
  4267. tmp4 = rcache_get_tmp();
  4268. emith_lsr(tmp3, tmp, 16);
  4269. emith_or_r_r_lsl(tmp3, tmp, 24);
  4270. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  4271. emith_or_r_r_lsl(tmp3, tmp4, 8);
  4272. emith_rol(tmp2, tmp3, 16);
  4273. rcache_free_tmp(tmp4);
  4274. if (tmp == tmp2)
  4275. rcache_free_tmp(tmp3);
  4276. break;
  4277. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  4278. emith_rol(tmp2, tmp, 16);
  4279. break;
  4280. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  4281. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4282. emith_sync_t(sr);
  4283. #if T_OPTIMIZER
  4284. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4285. emith_t_to_carry(sr, 1);
  4286. emith_negc_r_r(tmp2, tmp);
  4287. } else
  4288. #endif
  4289. {
  4290. EMITH_HINT_COND(DCOND_CS);
  4291. emith_tpop_carry(sr, 1);
  4292. emith_negcf_r_r(tmp2, tmp);
  4293. emith_tpush_carry(sr, 1);
  4294. }
  4295. break;
  4296. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  4297. emith_neg_r_r(tmp2, tmp);
  4298. break;
  4299. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  4300. emith_clear_msb(tmp2, tmp, 24);
  4301. rcache_set_x16(tmp2, 1, 1);
  4302. break;
  4303. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  4304. emith_clear_msb(tmp2, tmp, 16);
  4305. rcache_set_x16(tmp2, 0, 1);
  4306. break;
  4307. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  4308. emith_sext(tmp2, tmp, 8);
  4309. rcache_set_x16(tmp2, 1, 0);
  4310. break;
  4311. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  4312. emith_sext(tmp2, tmp, 16);
  4313. rcache_set_x16(tmp2, 1, 0);
  4314. break;
  4315. }
  4316. goto end_op;
  4317. }
  4318. goto default_;
  4319. /////////////////////////////////////////////
  4320. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  4321. if (op & 0x80) // adding negative
  4322. emit_sub_r_imm(GET_Rn(), (u8)-op);
  4323. else
  4324. emit_add_r_imm(GET_Rn(), (u8)op);
  4325. goto end_op;
  4326. /////////////////////////////////////////////
  4327. case 0x08:
  4328. switch (op & 0x0f00)
  4329. {
  4330. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  4331. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  4332. tmp = (op & 0x100) >> 8;
  4333. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  4334. goto end_op;
  4335. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  4336. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  4337. tmp = (op & 0x100) >> 8;
  4338. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp | drcf.polling);
  4339. goto end_op;
  4340. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  4341. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4342. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4343. emith_clr_t_cond(sr);
  4344. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  4345. emith_set_t_cond(sr, DCOND_EQ);
  4346. goto end_op;
  4347. }
  4348. goto default_;
  4349. /////////////////////////////////////////////
  4350. case 0x0c:
  4351. switch (op & 0x0f00)
  4352. {
  4353. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  4354. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  4355. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  4356. tmp = (op & 0x300) >> 8;
  4357. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  4358. goto end_op;
  4359. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  4360. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  4361. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  4362. tmp = (op & 0x300) >> 8;
  4363. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp | drcf.polling);
  4364. goto end_op;
  4365. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  4366. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4367. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4368. emith_clr_t_cond(sr);
  4369. emith_tst_r_imm(tmp, op & 0xff);
  4370. emith_set_t_cond(sr, DCOND_EQ);
  4371. goto end_op;
  4372. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  4373. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4374. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  4375. goto end_op;
  4376. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  4377. if (op & 0xff) {
  4378. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4379. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  4380. }
  4381. goto end_op;
  4382. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  4383. if (op & 0xff) {
  4384. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4385. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  4386. }
  4387. goto end_op;
  4388. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  4389. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0 | drcf.polling);
  4390. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4391. emith_clr_t_cond(sr);
  4392. emith_tst_r_imm(tmp, op & 0xff);
  4393. emith_set_t_cond(sr, DCOND_EQ);
  4394. rcache_free_tmp(tmp);
  4395. goto end_op;
  4396. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  4397. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4398. tmp2 = rcache_get_tmp_arg(1);
  4399. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  4400. goto end_rmw_op;
  4401. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  4402. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4403. tmp2 = rcache_get_tmp_arg(1);
  4404. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  4405. goto end_rmw_op;
  4406. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  4407. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4408. tmp2 = rcache_get_tmp_arg(1);
  4409. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  4410. end_rmw_op:
  4411. rcache_free_tmp(tmp);
  4412. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4413. goto end_op;
  4414. }
  4415. goto default_;
  4416. /////////////////////////////////////////////
  4417. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  4418. emit_move_r_imm32(GET_Rn(), (s8)op);
  4419. goto end_op;
  4420. default:
  4421. default_:
  4422. if (!(op_flags[i] & OF_B_IN_DS)) {
  4423. elprintf_sh2(sh2, EL_ANOMALY,
  4424. "drc: illegal op %04x @ %08x", op, pc - 2);
  4425. exit(1);
  4426. }
  4427. }
  4428. end_op:
  4429. rcache_unlock_all();
  4430. rcache_set_usage_now(0);
  4431. #if DRC_DEBUG & 64
  4432. RCACHE_CHECK("after insn");
  4433. #endif
  4434. cycles += opd->cycles;
  4435. if (op_flags[i+1] & OF_DELAY_OP) {
  4436. do_host_disasm(tcache_id);
  4437. continue;
  4438. }
  4439. // test irq?
  4440. if (drcf.test_irq && !drcf.pending_branch_direct) {
  4441. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4442. FLUSH_CYCLES(sr);
  4443. emith_sync_t(sr);
  4444. if (!drcf.pending_branch_indirect)
  4445. emit_move_r_imm32(SHR_PC, pc);
  4446. rcache_flush();
  4447. emith_call(sh2_drc_test_irq);
  4448. drcf.test_irq = 0;
  4449. }
  4450. // branch handling
  4451. if (drcf.pending_branch_direct)
  4452. {
  4453. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4454. u32 target_pc = opd_b->imm;
  4455. int cond = -1;
  4456. int ctaken = 0;
  4457. void *target = NULL;
  4458. if (OP_ISBRACND(opd_b->op))
  4459. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  4460. cycles += ctaken; // assume branch taken
  4461. #if LOOP_OPTIMIZER
  4462. if ((drcf.loop_type == OF_IDLE_LOOP ||
  4463. (drcf.loop_type == OF_DELAY_LOOP && drcf.delay_reg >= 0)))
  4464. {
  4465. // idle or delay loop
  4466. emit_sync_t_to_sr();
  4467. emith_sh2_delay_loop(cycles, drcf.delay_reg);
  4468. rcache_unlock_all(); // may lock delay_reg
  4469. drcf.polling = drcf.loop_type = drcf.pinning = 0;
  4470. }
  4471. #endif
  4472. #if CALL_STACK
  4473. void *rtsadd = NULL, *rtsret = NULL;
  4474. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4475. // BSR - save rts data
  4476. tmp = rcache_get_tmp_arg(1);
  4477. rtsadd = tcache_ptr;
  4478. emith_move_r_imm_s8_patchable(tmp, 0);
  4479. rcache_clean_tmp();
  4480. rcache_invalidate_tmp();
  4481. emith_call(sh2_drc_dispatcher_call);
  4482. rtsret = tcache_ptr;
  4483. }
  4484. #endif
  4485. // XXX move below cond test if not changing host cond (MIPS delay slot)?
  4486. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4487. FLUSH_CYCLES(sr);
  4488. rcache_clean();
  4489. if (OP_ISBRACND(opd_b->op)) {
  4490. // BT[S], BF[S] - emit condition test
  4491. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  4492. if (delay_dep_fw & BITMASK1(SHR_T)) {
  4493. emith_sync_t(sr);
  4494. emith_tst_r_imm(sr, T_save);
  4495. } else {
  4496. cond = emith_tst_t(sr, (opd_b->op == OP_BRANCH_CT));
  4497. if (emith_get_t_cond() >= 0) {
  4498. if (opd_b->op == OP_BRANCH_CT)
  4499. emith_or_r_imm_c(cond, sr, T);
  4500. else
  4501. emith_bic_r_imm_c(cond, sr, T);
  4502. }
  4503. }
  4504. } else
  4505. emith_sync_t(sr);
  4506. // no modification of host status/flags between here and branching!
  4507. v = find_in_sorted_linkage(branch_targets, branch_target_count, target_pc);
  4508. if (v >= 0)
  4509. {
  4510. // local branch
  4511. if (branch_targets[v].ptr) {
  4512. // local backward jump, link here now since host PC is already known
  4513. target = branch_targets[v].ptr;
  4514. #if LOOP_OPTIMIZER
  4515. if (pinned_loops[pinned_loop_count].pc == target_pc) {
  4516. // backward jump at end of optimized loop
  4517. rcache_unpin_all();
  4518. target = pinned_loops[pinned_loop_count].ptr;
  4519. pinned_loop_count ++;
  4520. }
  4521. #endif
  4522. if (cond != -1) {
  4523. if (emith_jump_patch_inrange(tcache_ptr, target)) {
  4524. emith_jump_cond(cond, target);
  4525. } else {
  4526. // not reachable directly, must use far branch
  4527. EMITH_JMP_START(emith_invert_cond(cond));
  4528. emith_jump(target);
  4529. EMITH_JMP_END(emith_invert_cond(cond));
  4530. }
  4531. } else {
  4532. emith_jump(target);
  4533. rcache_invalidate();
  4534. }
  4535. } else if (blx_target_count < MAX_LOCAL_BRANCHES) {
  4536. // local forward jump
  4537. target = tcache_ptr;
  4538. blx_targets[blx_target_count++] =
  4539. (struct linkage) { .pc = target_pc, .ptr = target, .mask = 0x2 };
  4540. if (cond != -1)
  4541. emith_jump_cond_patchable(cond, target);
  4542. else {
  4543. emith_jump_patchable(target);
  4544. rcache_invalidate();
  4545. }
  4546. } else
  4547. // no space for resolving forward branch, handle it as external
  4548. dbg(1, "warning: too many unresolved branches");
  4549. }
  4550. if (target == NULL)
  4551. {
  4552. // can't resolve branch locally, make a block exit
  4553. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4554. if (cond != -1) {
  4555. #ifndef __arm__
  4556. if (bl && blx_target_count < ARRAY_SIZE(blx_targets)) {
  4557. // conditional jumps get a blx stub for the far jump
  4558. bl->type = BL_JCCBLX;
  4559. target = tcache_ptr;
  4560. blx_targets[blx_target_count++] =
  4561. (struct linkage) { .pc = target_pc, .ptr = target, .bl = bl };
  4562. emith_jump_cond_patchable(cond, target);
  4563. } else {
  4564. // not linkable, or blx table full; inline jump @dispatcher
  4565. EMITH_JMP_START(emith_invert_cond(cond));
  4566. if (bl) {
  4567. bl->jump = tcache_ptr;
  4568. emith_flush(); // flush to inhibit insn swapping
  4569. bl->type = BL_LDJMP;
  4570. }
  4571. tmp = rcache_get_tmp_arg(0);
  4572. emith_move_r_imm(tmp, target_pc);
  4573. rcache_free_tmp(tmp);
  4574. target = sh2_drc_dispatcher;
  4575. emith_jump_patchable(target);
  4576. EMITH_JMP_END(emith_invert_cond(cond));
  4577. }
  4578. #else
  4579. // jump @dispatcher - ARM 32bit version with conditional execution
  4580. EMITH_SJMP_START(emith_invert_cond(cond));
  4581. tmp = rcache_get_tmp_arg(0);
  4582. emith_move_r_imm_c(cond, tmp, target_pc);
  4583. rcache_free_tmp(tmp);
  4584. target = sh2_drc_dispatcher;
  4585. if (bl) {
  4586. bl->jump = tcache_ptr;
  4587. bl->type = BL_JMP;
  4588. }
  4589. emith_jump_cond_patchable(cond, target);
  4590. EMITH_SJMP_END(emith_invert_cond(cond));
  4591. #endif
  4592. } else {
  4593. // unconditional, has the far jump inlined
  4594. if (bl) {
  4595. emith_flush(); // flush to inhibit insn swapping
  4596. bl->type = BL_LDJMP;
  4597. }
  4598. tmp = rcache_get_tmp_arg(0);
  4599. emith_move_r_imm(tmp, target_pc);
  4600. rcache_free_tmp(tmp);
  4601. target = sh2_drc_dispatcher;
  4602. emith_jump_patchable(target);
  4603. rcache_invalidate();
  4604. }
  4605. }
  4606. #if CALL_STACK
  4607. if (rtsadd)
  4608. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4609. #endif
  4610. // branch not taken, correct cycle count
  4611. if (ctaken)
  4612. cycles -= ctaken;
  4613. // set T bit to reflect branch not taken for OP_BRANCH_CT/CF
  4614. if (emith_get_t_cond() >= 0) // T is synced for all other cases
  4615. emith_set_t(sr, opd_b->op == OP_BRANCH_CF);
  4616. drcf.pending_branch_direct = 0;
  4617. if (target_pc >= base_pc && target_pc < pc)
  4618. drcf.polling = drcf.loop_type = 0;
  4619. }
  4620. else if (drcf.pending_branch_indirect) {
  4621. u32 target_pc;
  4622. tmp = rcache_get_reg_arg(0, SHR_PC, NULL);
  4623. #if CALL_STACK
  4624. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4625. void *rtsadd = NULL, *rtsret = NULL;
  4626. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4627. // JSR, BSRF - save rts data
  4628. tmp = rcache_get_tmp_arg(1);
  4629. rtsadd = tcache_ptr;
  4630. emith_move_r_imm_s8_patchable(tmp, 0);
  4631. rcache_clean_tmp();
  4632. rcache_invalidate_tmp();
  4633. emith_call(sh2_drc_dispatcher_call);
  4634. rtsret = tcache_ptr;
  4635. }
  4636. #endif
  4637. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4638. FLUSH_CYCLES(sr);
  4639. emith_sync_t(sr);
  4640. rcache_clean();
  4641. #if CALL_STACK
  4642. if (opd_b->rm == SHR_PR) {
  4643. // RTS - restore rts data, else jump to dispatcher
  4644. emith_jump(sh2_drc_dispatcher_return);
  4645. } else
  4646. #endif
  4647. if (gconst_get(SHR_PC, &target_pc)) {
  4648. // JMP, JSR, BRAF, BSRF const - treat like unconditional direct branch
  4649. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4650. if (bl) // pc already loaded somewhere else, can patch jump only
  4651. bl->type = BL_JMP;
  4652. emith_jump_patchable(sh2_drc_dispatcher);
  4653. } else {
  4654. // JMP, JSR, BRAF, BSRF not const
  4655. emith_jump(sh2_drc_dispatcher);
  4656. }
  4657. rcache_invalidate();
  4658. #if CALL_STACK
  4659. if (rtsadd)
  4660. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4661. #endif
  4662. drcf.pending_branch_indirect = 0;
  4663. drcf.polling = drcf.loop_type = 0;
  4664. }
  4665. rcache_unlock_all();
  4666. do_host_disasm(tcache_id);
  4667. }
  4668. // check the last op
  4669. if (op_flags[i-1] & OF_DELAY_OP)
  4670. opd = &ops[i-2];
  4671. else
  4672. opd = &ops[i-1];
  4673. if (! OP_ISBRAUC(opd->op) || (opd->dest & BITMASK1(SHR_PR)))
  4674. {
  4675. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4676. FLUSH_CYCLES(tmp);
  4677. emith_sync_t(tmp);
  4678. rcache_clean();
  4679. bl = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  4680. if (bl) {
  4681. emith_flush(); // flush to inhibit insn swapping
  4682. bl->type = BL_LDJMP;
  4683. }
  4684. tmp = rcache_get_tmp_arg(0);
  4685. emith_move_r_imm(tmp, pc);
  4686. emith_jump_patchable(sh2_drc_dispatcher);
  4687. rcache_invalidate();
  4688. } else
  4689. rcache_flush();
  4690. // link unresolved branches, emitting blx area entries as needed
  4691. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  4692. branch_target_count, blx_targets, blx_target_count);
  4693. emith_flush();
  4694. do_host_disasm(tcache_id);
  4695. emith_pool_commit(0);
  4696. // fill blx backup; do this last to backup final patched code
  4697. for (i = 0; i < block->entry_count; i++)
  4698. for (bl = block->entryp[i].o_links; bl; bl = bl->o_next)
  4699. memcpy(bl->jdisp, bl->blx ? bl->blx : bl->jump, emith_jump_at_size());
  4700. ring_alloc(&tcache_ring[tcache_id], tcache_ptr - block_entry_ptr);
  4701. host_instructions_updated(block_entry_ptr, tcache_ptr, 1);
  4702. dr_activate_block(block, tcache_id, sh2->is_slave);
  4703. emith_update_cache();
  4704. do_host_disasm(tcache_id);
  4705. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  4706. tcache_id, blkid_main, tcache_ptr,
  4707. tcache_ring[tcache_id].used, tcache_ring[tcache_id].size,
  4708. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  4709. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  4710. dbg(2, " hash collisions %d/%d", hash_collisions, block_ring[tcache_id].used);
  4711. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  4712. }
  4713. /*
  4714. printf("~~~\n");
  4715. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  4716. do_host_disasm(tcache_id);
  4717. printf("~~~\n");
  4718. */
  4719. #if (DRC_DEBUG)
  4720. fflush(stdout);
  4721. #endif
  4722. return block_entry_ptr;
  4723. }
  4724. static void sh2_generate_utils(void)
  4725. {
  4726. int arg0, arg1, arg2, arg3, sr, tmp, tmp2;
  4727. #if DRC_DEBUG
  4728. int hic = host_insn_count; // don't count utils for insn statistics
  4729. #endif
  4730. host_arg2reg(arg0, 0);
  4731. host_arg2reg(arg1, 1);
  4732. host_arg2reg(arg2, 2);
  4733. host_arg2reg(arg3, 3);
  4734. emith_move_r_r(arg0, arg0); // nop
  4735. emith_flush();
  4736. // sh2_drc_write8(u32 a, u32 d)
  4737. sh2_drc_write8 = (void *)tcache_ptr;
  4738. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  4739. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4740. emith_flush();
  4741. // sh2_drc_write16(u32 a, u32 d)
  4742. sh2_drc_write16 = (void *)tcache_ptr;
  4743. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  4744. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4745. emith_flush();
  4746. // sh2_drc_write32(u32 a, u32 d)
  4747. sh2_drc_write32 = (void *)tcache_ptr;
  4748. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  4749. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4750. emith_flush();
  4751. // d = sh2_drc_read8(u32 a)
  4752. sh2_drc_read8 = (void *)tcache_ptr;
  4753. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4754. EMITH_HINT_COND(DCOND_CS);
  4755. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4756. EMITH_SJMP_START(DCOND_CS);
  4757. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4758. emit_le_ptr8(DCOND_CC, arg0);
  4759. emith_read8s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4760. emith_ret_c(DCOND_CC);
  4761. EMITH_SJMP_END(DCOND_CS);
  4762. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4763. emith_abijump_reg(arg2);
  4764. emith_flush();
  4765. // d = sh2_drc_read16(u32 a)
  4766. sh2_drc_read16 = (void *)tcache_ptr;
  4767. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4768. EMITH_HINT_COND(DCOND_CS);
  4769. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4770. EMITH_SJMP_START(DCOND_CS);
  4771. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4772. emith_read16s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4773. emith_ret_c(DCOND_CC);
  4774. EMITH_SJMP_END(DCOND_CS);
  4775. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4776. emith_abijump_reg(arg2);
  4777. emith_flush();
  4778. // d = sh2_drc_read32(u32 a)
  4779. sh2_drc_read32 = (void *)tcache_ptr;
  4780. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4781. EMITH_HINT_COND(DCOND_CS);
  4782. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4783. EMITH_SJMP_START(DCOND_CS);
  4784. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4785. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4786. emit_le_swap(DCOND_CC, RET_REG);
  4787. emith_ret_c(DCOND_CC);
  4788. EMITH_SJMP_END(DCOND_CS);
  4789. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4790. emith_abijump_reg(arg2);
  4791. emith_flush();
  4792. // d = sh2_drc_read8_poll(u32 a)
  4793. sh2_drc_read8_poll = (void *)tcache_ptr;
  4794. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4795. EMITH_HINT_COND(DCOND_CS);
  4796. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4797. EMITH_SJMP_START(DCOND_CC);
  4798. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4799. emith_abijump_reg_c(DCOND_CS, arg2);
  4800. EMITH_SJMP_END(DCOND_CC);
  4801. emith_and_r_r_r(arg1, arg0, arg3);
  4802. emit_le_ptr8(-1, arg1);
  4803. emith_read8s_r_r_r(arg1, arg2, arg1);
  4804. emith_push_ret(arg1);
  4805. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4806. emith_abicall(p32x_sh2_poll_memory8);
  4807. emith_pop_and_ret(arg1);
  4808. emith_flush();
  4809. // d = sh2_drc_read16_poll(u32 a)
  4810. sh2_drc_read16_poll = (void *)tcache_ptr;
  4811. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4812. EMITH_HINT_COND(DCOND_CS);
  4813. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4814. EMITH_SJMP_START(DCOND_CC);
  4815. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4816. emith_abijump_reg_c(DCOND_CS, arg2);
  4817. EMITH_SJMP_END(DCOND_CC);
  4818. emith_and_r_r_r(arg1, arg0, arg3);
  4819. emith_read16s_r_r_r(arg1, arg2, arg1);
  4820. emith_push_ret(arg1);
  4821. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4822. emith_abicall(p32x_sh2_poll_memory16);
  4823. emith_pop_and_ret(arg1);
  4824. emith_flush();
  4825. // d = sh2_drc_read32_poll(u32 a)
  4826. sh2_drc_read32_poll = (void *)tcache_ptr;
  4827. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4828. EMITH_HINT_COND(DCOND_CS);
  4829. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4830. EMITH_SJMP_START(DCOND_CC);
  4831. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4832. emith_abijump_reg_c(DCOND_CS, arg2);
  4833. EMITH_SJMP_END(DCOND_CC);
  4834. emith_and_r_r_r(arg1, arg0, arg3);
  4835. emith_read_r_r_r(arg1, arg2, arg1);
  4836. emit_le_swap(-1, arg1);
  4837. emith_push_ret(arg1);
  4838. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4839. emith_abicall(p32x_sh2_poll_memory32);
  4840. emith_pop_and_ret(arg1);
  4841. emith_flush();
  4842. // sh2_drc_exit(u32 pc)
  4843. sh2_drc_exit = (void *)tcache_ptr;
  4844. emith_ctx_write(arg0, SHR_PC * 4);
  4845. emit_do_static_regs(1, arg2);
  4846. emith_sh2_drc_exit();
  4847. emith_flush();
  4848. // sh2_drc_dispatcher(u32 pc)
  4849. sh2_drc_dispatcher = (void *)tcache_ptr;
  4850. emith_ctx_write(arg0, SHR_PC * 4);
  4851. #if BRANCH_CACHE
  4852. // check if PC is in branch target cache
  4853. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4854. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4855. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  4856. emith_cmp_r_r(arg2, arg0);
  4857. EMITH_SJMP_START(DCOND_NE);
  4858. #if (DRC_DEBUG & 128)
  4859. emith_move_r_ptr_imm(arg2, (uptr)&bchit);
  4860. emith_read_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4861. emith_add_r_imm_c(DCOND_EQ, arg3, 1);
  4862. emith_write_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4863. #endif
  4864. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4865. emith_jump_reg_c(DCOND_EQ, RET_REG);
  4866. EMITH_SJMP_END(DCOND_NE);
  4867. #endif
  4868. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4869. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  4870. emith_abicall(dr_lookup_block);
  4871. // store PC and block entry ptr (in arg0) in branch target cache
  4872. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4873. EMITH_SJMP_START(DCOND_EQ);
  4874. #if BRANCH_CACHE
  4875. #if (DRC_DEBUG & 128)
  4876. emith_move_r_ptr_imm(arg2, (uptr)&bcmiss);
  4877. emith_read_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4878. emith_add_r_imm_c(DCOND_NE, arg3, 1);
  4879. emith_write_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4880. #endif
  4881. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  4882. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4883. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4884. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  4885. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4886. #endif
  4887. emith_jump_reg_c(DCOND_NE, RET_REG);
  4888. EMITH_SJMP_END(DCOND_EQ);
  4889. // lookup failed, call sh2_translate()
  4890. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4891. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  4892. emith_abicall(sh2_translate);
  4893. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4894. EMITH_SJMP_START(DCOND_EQ);
  4895. emith_jump_reg_c(DCOND_NE, RET_REG);
  4896. EMITH_SJMP_END(DCOND_EQ);
  4897. // XXX: can't translate, fail
  4898. emith_abicall(dr_failure);
  4899. emith_flush();
  4900. #if CALL_STACK
  4901. // pc = sh2_drc_dispatcher_call(u32 pc)
  4902. sh2_drc_dispatcher_call = (void *)tcache_ptr;
  4903. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4904. emith_add_r_imm(arg2, (u32)(2*sizeof(void *)));
  4905. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4906. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4907. emith_add_r_r_r_lsl_ptr(arg3, CONTEXT_REG, arg2, 0);
  4908. rcache_get_reg_arg(2, SHR_PR, NULL);
  4909. emith_add_r_ret(arg1);
  4910. emith_write_r_r_offs_ptr(arg1, arg3, offsetof(SH2, rts_cache)+sizeof(void *));
  4911. emith_write_r_r_offs(arg2, arg3, offsetof(SH2, rts_cache));
  4912. rcache_flush();
  4913. emith_ret();
  4914. emith_flush();
  4915. // sh2_drc_dispatcher_return(u32 pc)
  4916. sh2_drc_dispatcher_return = (void *)tcache_ptr;
  4917. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4918. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg2, 0);
  4919. emith_read_r_r_offs(arg3, arg1, offsetof(SH2, rts_cache));
  4920. emith_cmp_r_r(arg0, arg3);
  4921. #if (DRC_DEBUG & 128)
  4922. EMITH_SJMP_START(DCOND_EQ);
  4923. emith_move_r_ptr_imm(arg3, (uptr)&rcmiss);
  4924. emith_read_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4925. emith_add_r_imm_c(DCOND_NE, arg1, 1);
  4926. emith_write_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4927. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4928. EMITH_SJMP_END(DCOND_EQ);
  4929. #else
  4930. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4931. #endif
  4932. emith_read_r_r_offs_ptr(arg0, arg1, offsetof(SH2, rts_cache) + sizeof(void *));
  4933. emith_sub_r_imm(arg2, (u32)(2*sizeof(void *)));
  4934. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4935. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4936. #if (DRC_DEBUG & 128)
  4937. emith_move_r_ptr_imm(arg3, (uptr)&rchit);
  4938. emith_read_r_r_offs(arg1, arg3, 0);
  4939. emith_add_r_imm(arg1, 1);
  4940. emith_write_r_r_offs(arg1, arg3, 0);
  4941. #endif
  4942. emith_jump_reg(arg0);
  4943. emith_flush();
  4944. #endif
  4945. // sh2_drc_test_irq(void)
  4946. // assumes it's called from main function (may jump to dispatcher)
  4947. sh2_drc_test_irq = (void *)tcache_ptr;
  4948. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4949. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4950. emith_lsr(arg0, sr, I_SHIFT);
  4951. emith_and_r_imm(arg0, 0x0f);
  4952. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  4953. EMITH_SJMP_START(DCOND_GT);
  4954. emith_ret_c(DCOND_LE); // nope, return
  4955. EMITH_SJMP_END(DCOND_GT);
  4956. // adjust SP
  4957. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  4958. emith_sub_r_imm(tmp, 4*2);
  4959. rcache_clean();
  4960. // push SR
  4961. tmp = rcache_get_reg_arg(0, SHR_SP, &tmp2);
  4962. emith_add_r_r_imm(tmp, tmp2, 4);
  4963. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  4964. emith_clear_msb(tmp, tmp, 22);
  4965. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4966. rcache_invalidate_tmp();
  4967. emith_abicall(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  4968. // push PC
  4969. rcache_get_reg_arg(0, SHR_SP, NULL);
  4970. rcache_get_reg_arg(1, SHR_PC, NULL);
  4971. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4972. rcache_invalidate_tmp();
  4973. emith_abicall(p32x_sh2_write32);
  4974. // update I, cycles, do callback
  4975. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4976. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4977. emith_bic_r_imm(sr, I);
  4978. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  4979. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  4980. rcache_flush();
  4981. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4982. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  4983. // obtain new PC
  4984. tmp = rcache_get_reg_arg(1, SHR_VBR, &tmp2);
  4985. emith_add_r_r_r_lsl(arg0, tmp2, RET_REG, 2);
  4986. emith_call(sh2_drc_read32);
  4987. if (arg0 != RET_REG)
  4988. emith_move_r_r(arg0, RET_REG);
  4989. emith_call_cleanup();
  4990. rcache_invalidate();
  4991. emith_jump(sh2_drc_dispatcher);
  4992. emith_flush();
  4993. // sh2_drc_entry(SH2 *sh2)
  4994. sh2_drc_entry = (void *)tcache_ptr;
  4995. emith_sh2_drc_entry();
  4996. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  4997. emit_do_static_regs(0, arg2);
  4998. emith_call(sh2_drc_test_irq);
  4999. emith_ctx_read(arg0, SHR_PC * 4);
  5000. emith_jump(sh2_drc_dispatcher);
  5001. emith_flush();
  5002. #ifdef DRC_SR_REG
  5003. // sh2_drc_save_sr(SH2 *sh2)
  5004. sh2_drc_save_sr = (void *)tcache_ptr;
  5005. tmp = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  5006. emith_write_r_r_offs(tmp, arg0, SHR_SR * 4);
  5007. rcache_invalidate();
  5008. emith_ret();
  5009. emith_flush();
  5010. // sh2_drc_restore_sr(SH2 *sh2)
  5011. sh2_drc_restore_sr = (void *)tcache_ptr;
  5012. tmp = rcache_get_reg(SHR_SR, RC_GR_WRITE, NULL);
  5013. emith_read_r_r_offs(tmp, arg0, SHR_SR * 4);
  5014. rcache_flush();
  5015. emith_ret();
  5016. emith_flush();
  5017. #endif
  5018. #ifdef PDB_NET
  5019. // debug
  5020. #define MAKE_READ_WRAPPER(func) { \
  5021. void *tmp = (void *)tcache_ptr; \
  5022. emith_push_ret(); \
  5023. emith_call(func); \
  5024. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  5025. emith_addf_r_r(arg2, arg0); \
  5026. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  5027. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  5028. emith_adc_r_imm(arg2, 0x01000000); \
  5029. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  5030. emith_pop_and_ret(); \
  5031. emith_flush(); \
  5032. func = tmp; \
  5033. }
  5034. #define MAKE_WRITE_WRAPPER(func) { \
  5035. void *tmp = (void *)tcache_ptr; \
  5036. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  5037. emith_addf_r_r(arg2, arg1); \
  5038. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  5039. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  5040. emith_adc_r_imm(arg2, 0x01000000); \
  5041. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  5042. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  5043. emith_jump(func); \
  5044. emith_flush(); \
  5045. func = tmp; \
  5046. }
  5047. MAKE_READ_WRAPPER(sh2_drc_read8);
  5048. MAKE_READ_WRAPPER(sh2_drc_read16);
  5049. MAKE_READ_WRAPPER(sh2_drc_read32);
  5050. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  5051. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  5052. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  5053. MAKE_READ_WRAPPER(sh2_drc_read8_poll);
  5054. MAKE_READ_WRAPPER(sh2_drc_read16_poll);
  5055. MAKE_READ_WRAPPER(sh2_drc_read32_poll);
  5056. #endif
  5057. emith_pool_commit(0);
  5058. rcache_invalidate();
  5059. #if (DRC_DEBUG & 4)
  5060. host_dasm_new_symbol(sh2_drc_entry);
  5061. host_dasm_new_symbol(sh2_drc_dispatcher);
  5062. #if CALL_STACK
  5063. host_dasm_new_symbol(sh2_drc_dispatcher_call);
  5064. host_dasm_new_symbol(sh2_drc_dispatcher_return);
  5065. #endif
  5066. host_dasm_new_symbol(sh2_drc_exit);
  5067. host_dasm_new_symbol(sh2_drc_test_irq);
  5068. host_dasm_new_symbol(sh2_drc_write8);
  5069. host_dasm_new_symbol(sh2_drc_write16);
  5070. host_dasm_new_symbol(sh2_drc_write32);
  5071. host_dasm_new_symbol(sh2_drc_read8);
  5072. host_dasm_new_symbol(sh2_drc_read16);
  5073. host_dasm_new_symbol(sh2_drc_read32);
  5074. host_dasm_new_symbol(sh2_drc_read8_poll);
  5075. host_dasm_new_symbol(sh2_drc_read16_poll);
  5076. host_dasm_new_symbol(sh2_drc_read32_poll);
  5077. #ifdef DRC_SR_REG
  5078. host_dasm_new_symbol(sh2_drc_save_sr);
  5079. host_dasm_new_symbol(sh2_drc_restore_sr);
  5080. #endif
  5081. #endif
  5082. #if DRC_DEBUG
  5083. host_insn_count = hic;
  5084. #endif
  5085. }
  5086. static void sh2_smc_rm_blocks(u32 a, int len, int tcache_id, u32 shift)
  5087. {
  5088. struct block_list **blist, *entry, *next;
  5089. u32 mask = RAM_SIZE(tcache_id) - 1;
  5090. u32 wtmask = ~0x20000000; // writethrough area mask
  5091. u32 start_addr, end_addr;
  5092. u32 start_lit, end_lit;
  5093. struct block_desc *block;
  5094. #if (DRC_DEBUG & 2)
  5095. int removed = 0;
  5096. #endif
  5097. // ignore cache-through
  5098. a &= wtmask;
  5099. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  5100. entry = *blist;
  5101. // go through the block list for this range
  5102. while (entry != NULL) {
  5103. next = entry->next;
  5104. block = entry->block;
  5105. start_addr = block->addr & wtmask;
  5106. end_addr = start_addr + block->size;
  5107. start_lit = block->addr_lit & wtmask;
  5108. end_lit = start_lit + block->size_lit;
  5109. // disable/delete block if it covers the modified address
  5110. if ((start_addr < a+len && a < end_addr) ||
  5111. (start_lit < a+len && a < end_lit))
  5112. {
  5113. dbg(2, "smc remove @%08x", a);
  5114. end_addr = (start_lit < a+len && block->size_lit ? a : 0);
  5115. dr_rm_block_entry(block, tcache_id, end_addr, 0);
  5116. #if (DRC_DEBUG & 2)
  5117. removed = 1;
  5118. #endif
  5119. }
  5120. entry = next;
  5121. }
  5122. #if (DRC_DEBUG & 2)
  5123. if (!removed)
  5124. dbg(2, "rm_blocks called @%08x, no work?", a);
  5125. #endif
  5126. #if BRANCH_CACHE
  5127. if (tcache_id)
  5128. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5129. else {
  5130. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5131. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  5132. }
  5133. #endif
  5134. #if CALL_STACK
  5135. if (tcache_id) {
  5136. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5137. sh2s[tcache_id-1].rts_cache_idx = 0;
  5138. } else {
  5139. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5140. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  5141. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  5142. }
  5143. #endif
  5144. }
  5145. void sh2_drc_wcheck_ram(u32 a, unsigned len, SH2 *sh2)
  5146. {
  5147. sh2_smc_rm_blocks(a, len, 0, SH2_DRCBLK_RAM_SHIFT);
  5148. }
  5149. void sh2_drc_wcheck_da(u32 a, unsigned len, SH2 *sh2)
  5150. {
  5151. sh2_smc_rm_blocks(a, len, 1 + sh2->is_slave, SH2_DRCBLK_DA_SHIFT);
  5152. }
  5153. int sh2_execute_drc(SH2 *sh2c, int cycles)
  5154. {
  5155. int ret_cycles;
  5156. // cycles are kept in SHR_SR unused bits (upper 20)
  5157. // bit11 contains T saved for delay slot
  5158. // others are usual SH2 flags
  5159. sh2c->sr &= 0x3f3;
  5160. sh2c->sr |= (cycles-1) << 12;
  5161. #if (DRC_DEBUG & 8)
  5162. lastpc = lastcnt = 0;
  5163. #endif
  5164. sh2c->state |= SH2_IN_DRC;
  5165. sh2_drc_entry(sh2c);
  5166. sh2c->state &= ~SH2_IN_DRC;
  5167. // TODO: irq cycles
  5168. ret_cycles = (int32_t)sh2c->sr >> 12;
  5169. if (ret_cycles >= 0)
  5170. dbg(1, "warning: drc returned with cycles: %d, pc %08x", ret_cycles, sh2c->pc);
  5171. #if (DRC_DEBUG & 8)
  5172. if (lastcnt)
  5173. dbg(8, "= %csh2 enter %08x %p (%d times), c=%d", sh2c->is_slave?'s':'m',
  5174. lastpc, lastblock, lastcnt, (signed int)sh2c->sr >> 12);
  5175. #endif
  5176. sh2c->sr &= 0x3f3;
  5177. return ret_cycles+1;
  5178. }
  5179. static void block_stats(void)
  5180. {
  5181. #if (DRC_DEBUG & 2)
  5182. int c, b, i;
  5183. long total = 0;
  5184. printf("block stats:\n");
  5185. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5186. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5187. if (block_tables[b][i].addr != 0)
  5188. total += block_tables[b][i].refcount;
  5189. }
  5190. printf("total: %ld\n",total);
  5191. for (c = 0; c < 20; c++) {
  5192. struct block_desc *blk, *maxb = NULL;
  5193. int max = 0;
  5194. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5195. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5196. if ((blk = &block_tables[b][i])->addr != 0 && blk->refcount > max) {
  5197. max = blk->refcount;
  5198. maxb = blk;
  5199. }
  5200. }
  5201. if (maxb == NULL)
  5202. break;
  5203. printf("%08lx %p %9d %2.3f%%\n", (ulong)maxb->addr, maxb->tcache_ptr, maxb->refcount,
  5204. (double)maxb->refcount / total * 100.0);
  5205. maxb->refcount = 0;
  5206. }
  5207. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  5208. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5209. block_tables[b][i].refcount = 0;
  5210. #endif
  5211. }
  5212. void entry_stats(void)
  5213. {
  5214. #if (DRC_DEBUG & 32)
  5215. int c, b, i, j;
  5216. long total = 0;
  5217. printf("block entry stats:\n");
  5218. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5219. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5220. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5221. total += block_tables[b][i].entryp[j].entry_count;
  5222. }
  5223. printf("total: %ld\n",total);
  5224. for (c = 0; c < 20; c++) {
  5225. struct block_desc *blk;
  5226. struct block_entry *maxb = NULL;
  5227. int max = 0;
  5228. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5229. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size) {
  5230. blk = &block_tables[b][i];
  5231. for (j = 0; j < blk->entry_count; j++)
  5232. if (blk->entryp[j].entry_count > max) {
  5233. max = blk->entryp[j].entry_count;
  5234. maxb = &blk->entryp[j];
  5235. }
  5236. }
  5237. }
  5238. if (maxb == NULL)
  5239. break;
  5240. printf("%08lx %p %9d %2.3f%%\n", (ulong)maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  5241. (double)100 * maxb->entry_count / total);
  5242. maxb->entry_count = 0;
  5243. }
  5244. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5245. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5246. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5247. block_tables[b][i].entryp[j].entry_count = 0;
  5248. }
  5249. #endif
  5250. }
  5251. static void backtrace(void)
  5252. {
  5253. #if (DRC_DEBUG & 1024)
  5254. int i;
  5255. printf("backtrace master:\n");
  5256. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  5257. SH2_DUMP(&csh2[0][i], "bt msh2");
  5258. printf("backtrace slave:\n");
  5259. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  5260. SH2_DUMP(&csh2[1][i], "bt ssh2");
  5261. #endif
  5262. }
  5263. static void state_dump(void)
  5264. {
  5265. #if (DRC_DEBUG & 2048)
  5266. int i;
  5267. SH2_DUMP(&sh2s[0], "master");
  5268. printf("VBR msh2: %lx\n", (ulong)sh2s[0].vbr);
  5269. for (i = 0; i < 0x60; i++) {
  5270. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  5271. if ((i+1) % 8 == 0) printf("\n");
  5272. }
  5273. printf("stack msh2: %lx\n", (ulong)sh2s[0].r[15]);
  5274. for (i = -0x30; i < 0x30; i++) {
  5275. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  5276. if ((i+1) % 8 == 0) printf("\n");
  5277. }
  5278. SH2_DUMP(&sh2s[1], "slave");
  5279. printf("VBR ssh2: %lx\n", (ulong)sh2s[1].vbr);
  5280. for (i = 0; i < 0x60; i++) {
  5281. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  5282. if ((i+1) % 8 == 0) printf("\n");
  5283. }
  5284. printf("stack ssh2: %lx\n", (ulong)sh2s[1].r[15]);
  5285. for (i = -0x30; i < 0x30; i++) {
  5286. printf("%08lx ",(ulong)p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  5287. if ((i+1) % 8 == 0) printf("\n");
  5288. }
  5289. #endif
  5290. }
  5291. static void bcache_stats(void)
  5292. {
  5293. #if (DRC_DEBUG & 128)
  5294. int i;
  5295. #if CALL_STACK
  5296. for (i = 1; i < ARRAY_SIZE(sh2s->rts_cache); i++)
  5297. if (sh2s[0].rts_cache[i].pc == -1 && sh2s[1].rts_cache[i].pc == -1) break;
  5298. printf("return cache hits:%d misses:%d depth: %d index: %d/%d\n", rchit, rcmiss, i,sh2s[0].rts_cache_idx,sh2s[1].rts_cache_idx);
  5299. for (i = 0; i < ARRAY_SIZE(sh2s[0].rts_cache); i++) {
  5300. printf("%08lx ",(ulong)sh2s[0].rts_cache[i].pc);
  5301. if ((i+1) % 8 == 0) printf("\n");
  5302. }
  5303. for (i = 0; i < ARRAY_SIZE(sh2s[1].rts_cache); i++) {
  5304. printf("%08lx ",(ulong)sh2s[1].rts_cache[i].pc);
  5305. if ((i+1) % 8 == 0) printf("\n");
  5306. }
  5307. #endif
  5308. #if BRANCH_CACHE
  5309. printf("branch cache hits:%d misses:%d\n", bchit, bcmiss);
  5310. printf("branch cache master:\n");
  5311. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  5312. printf("%08lx ",(ulong)sh2s[0].branch_cache[i].pc);
  5313. if ((i+1) % 8 == 0) printf("\n");
  5314. }
  5315. printf("branch cache slave:\n");
  5316. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  5317. printf("%08lx ",(ulong)sh2s[1].branch_cache[i].pc);
  5318. if ((i+1) % 8 == 0) printf("\n");
  5319. }
  5320. #endif
  5321. #endif
  5322. }
  5323. void sh2_drc_flush_all(void)
  5324. {
  5325. backtrace();
  5326. state_dump();
  5327. block_stats();
  5328. entry_stats();
  5329. bcache_stats();
  5330. dr_flush_tcache(0);
  5331. dr_flush_tcache(1);
  5332. dr_flush_tcache(2);
  5333. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  5334. }
  5335. void sh2_drc_mem_setup(SH2 *sh2)
  5336. {
  5337. // fill the DRC-only convenience pointers
  5338. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  5339. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  5340. }
  5341. int sh2_drc_init(SH2 *sh2)
  5342. {
  5343. int i;
  5344. if (block_tables[0] == NULL)
  5345. {
  5346. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5347. block_tables[i] = calloc(BLOCK_MAX_COUNT(i), sizeof(*block_tables[0]));
  5348. if (block_tables[i] == NULL)
  5349. goto fail;
  5350. entry_tables[i] = calloc(ENTRY_MAX_COUNT(i), sizeof(*entry_tables[0]));
  5351. if (entry_tables[i] == NULL)
  5352. goto fail;
  5353. block_link_pool[i] = calloc(BLOCK_LINK_MAX_COUNT(i),
  5354. sizeof(*block_link_pool[0]));
  5355. if (block_link_pool[i] == NULL)
  5356. goto fail;
  5357. inval_lookup[i] = calloc(RAM_SIZE(i) / INVAL_PAGE_SIZE,
  5358. sizeof(inval_lookup[0]));
  5359. if (inval_lookup[i] == NULL)
  5360. goto fail;
  5361. hash_tables[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*hash_tables[0]));
  5362. if (hash_tables[i] == NULL)
  5363. goto fail;
  5364. unresolved_links[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*unresolved_links[0]));
  5365. if (unresolved_links[i] == NULL)
  5366. goto fail;
  5367. //atexit(sh2_drc_finish);
  5368. RING_INIT(&block_ring[i], block_tables[i], BLOCK_MAX_COUNT(i));
  5369. RING_INIT(&entry_ring[i], entry_tables[i], ENTRY_MAX_COUNT(i));
  5370. }
  5371. block_list_pool = calloc(BLOCK_LIST_MAX_COUNT, sizeof(*block_list_pool));
  5372. if (block_list_pool == NULL)
  5373. goto fail;
  5374. block_list_pool_count = 0;
  5375. blist_free = NULL;
  5376. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  5377. memset(blink_free, 0, sizeof(blink_free));
  5378. drc_cmn_init();
  5379. rcache_init();
  5380. tcache_ptr = tcache;
  5381. sh2_generate_utils();
  5382. host_instructions_updated(tcache, tcache_ptr, 1);
  5383. emith_update_cache();
  5384. i = tcache_ptr - tcache;
  5385. RING_INIT(&tcache_ring[0], tcache_ptr, tcache_sizes[0] - i);
  5386. for (i = 1; i < ARRAY_SIZE(tcache_ring); i++) {
  5387. RING_INIT(&tcache_ring[i], tcache_ring[i-1].base + tcache_ring[i-1].size,
  5388. tcache_sizes[i]);
  5389. }
  5390. #if (DRC_DEBUG & 4)
  5391. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  5392. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5393. // disasm the utils
  5394. tcache_dsm_ptrs[0] = tcache;
  5395. do_host_disasm(0);
  5396. fflush(stdout);
  5397. #endif
  5398. #if (DRC_DEBUG & 1)
  5399. hash_collisions = 0;
  5400. #endif
  5401. }
  5402. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  5403. memset(sh2->rts_cache, -1, sizeof(sh2->rts_cache));
  5404. sh2->rts_cache_idx = 0;
  5405. return 0;
  5406. fail:
  5407. sh2_drc_finish(sh2);
  5408. return -1;
  5409. }
  5410. void sh2_drc_finish(SH2 *sh2)
  5411. {
  5412. int i;
  5413. if (block_tables[0] == NULL)
  5414. return;
  5415. #if (DRC_DEBUG & (256|512))
  5416. if (trace[0]) fclose(trace[0]);
  5417. if (trace[1]) fclose(trace[1]);
  5418. trace[0] = trace[1] = NULL;
  5419. #endif
  5420. #if (DRC_DEBUG & 4)
  5421. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5422. printf("~~~ tcache %d\n", i);
  5423. #if 0
  5424. if (tcache_ring[i].first < tcache_ring[i].next) {
  5425. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5426. tcache_ptr = tcache_ring[i].next;
  5427. do_host_disasm(i);
  5428. } else if (tcache_ring[i].used) {
  5429. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5430. tcache_ptr = tcache_ring[i].base + tcache_ring[i].size;
  5431. do_host_disasm(i);
  5432. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5433. tcache_ptr = tcache_ring[i].next;
  5434. do_host_disasm(i);
  5435. }
  5436. #endif
  5437. printf("max links: %d\n", block_link_pool_counts[i]);
  5438. }
  5439. printf("max block list: %d\n", block_list_pool_count);
  5440. #endif
  5441. sh2_drc_flush_all();
  5442. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5443. if (block_tables[i] != NULL)
  5444. free(block_tables[i]);
  5445. block_tables[i] = NULL;
  5446. if (entry_tables[i] != NULL)
  5447. free(entry_tables[i]);
  5448. entry_tables[i] = NULL;
  5449. if (block_link_pool[i] != NULL)
  5450. free(block_link_pool[i]);
  5451. block_link_pool[i] = NULL;
  5452. blink_free[i] = NULL;
  5453. if (inval_lookup[i] != NULL)
  5454. free(inval_lookup[i]);
  5455. inval_lookup[i] = NULL;
  5456. if (hash_tables[i] != NULL) {
  5457. free(hash_tables[i]);
  5458. hash_tables[i] = NULL;
  5459. }
  5460. if (unresolved_links[i] != NULL) {
  5461. free(unresolved_links[i]);
  5462. unresolved_links[i] = NULL;
  5463. }
  5464. }
  5465. if (block_list_pool != NULL)
  5466. free(block_list_pool);
  5467. block_list_pool = NULL;
  5468. blist_free = NULL;
  5469. drc_cmn_cleanup();
  5470. }
  5471. #endif /* DRC_SH2 */
  5472. static void *dr_get_pc_base(u32 pc, SH2 *sh2)
  5473. {
  5474. void *ret;
  5475. u32 mask = 0;
  5476. ret = p32x_sh2_get_mem_ptr(pc, &mask, sh2);
  5477. if (ret == (void *)-1)
  5478. return ret;
  5479. return (char *)ret - (pc & ~mask);
  5480. }
  5481. u16 scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  5482. u32 *base_literals_out, u32 *end_literals_out)
  5483. {
  5484. u16 *dr_pc_base;
  5485. u32 pc, op, tmp;
  5486. u32 end_pc, end_literals = 0;
  5487. u32 lowest_literal = 0;
  5488. u32 lowest_mova = 0;
  5489. struct op_data *opd;
  5490. int next_is_delay = 0;
  5491. int end_block = 0;
  5492. int is_divop;
  5493. int i, i_end, i_div = -1;
  5494. u32 crc = 0;
  5495. // 2nd pass stuff
  5496. int last_btarget; // loop detector
  5497. enum { T_UNKNOWN, T_CLEAR, T_SET } t; // T propagation state
  5498. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  5499. op_flags[0] |= OF_BTARGET; // block start is always a target
  5500. dr_pc_base = dr_get_pc_base(base_pc, &sh2s[!!is_slave]);
  5501. // 1st pass: disassemble
  5502. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  5503. // we need an ops[] entry after the last one initialized,
  5504. // so do it before end_block checks
  5505. opd = &ops[i];
  5506. opd->op = OP_UNHANDLED;
  5507. opd->rm = -1;
  5508. opd->source = opd->dest = 0;
  5509. opd->cycles = 1;
  5510. opd->imm = 0;
  5511. if (next_is_delay) {
  5512. op_flags[i] |= OF_DELAY_OP;
  5513. next_is_delay = 0;
  5514. }
  5515. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  5516. break;
  5517. else if ((lowest_mova && lowest_mova <= pc) ||
  5518. (lowest_literal && lowest_literal <= pc))
  5519. break; // text area collides with data area
  5520. is_divop = 0;
  5521. op = FETCH_OP(pc);
  5522. switch ((op & 0xf000) >> 12)
  5523. {
  5524. /////////////////////////////////////////////
  5525. case 0x00:
  5526. switch (op & 0x0f)
  5527. {
  5528. case 0x02:
  5529. switch (GET_Fx())
  5530. {
  5531. case 0: // STC SR,Rn 0000nnnn00000010
  5532. tmp = BITMASK2(SHR_SR, SHR_T);
  5533. break;
  5534. case 1: // STC GBR,Rn 0000nnnn00010010
  5535. tmp = BITMASK1(SHR_GBR);
  5536. break;
  5537. case 2: // STC VBR,Rn 0000nnnn00100010
  5538. tmp = BITMASK1(SHR_VBR);
  5539. break;
  5540. default:
  5541. goto undefined;
  5542. }
  5543. opd->op = OP_MOVE;
  5544. opd->source = tmp;
  5545. opd->dest = BITMASK1(GET_Rn());
  5546. break;
  5547. case 0x03:
  5548. CHECK_UNHANDLED_BITS(0xd0, undefined);
  5549. // BRAF Rm 0000mmmm00100011
  5550. // BSRF Rm 0000mmmm00000011
  5551. opd->op = OP_BRANCH_RF;
  5552. opd->rm = GET_Rn();
  5553. opd->source = BITMASK2(SHR_PC, opd->rm);
  5554. opd->dest = BITMASK1(SHR_PC);
  5555. if (!(op & 0x20))
  5556. opd->dest |= BITMASK1(SHR_PR);
  5557. opd->cycles = 2;
  5558. next_is_delay = 1;
  5559. if (!(opd->dest & BITMASK1(SHR_PR)))
  5560. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5561. else
  5562. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5563. break;
  5564. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  5565. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  5566. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  5567. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  5568. opd->dest = BITMASK1(SHR_MEM);
  5569. break;
  5570. case 0x07:
  5571. // MUL.L Rm,Rn 0000nnnnmmmm0111
  5572. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5573. opd->dest = BITMASK1(SHR_MACL);
  5574. opd->cycles = 2;
  5575. break;
  5576. case 0x08:
  5577. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5578. switch (GET_Fx())
  5579. {
  5580. case 0: // CLRT 0000000000001000
  5581. opd->op = OP_SETCLRT;
  5582. opd->dest = BITMASK1(SHR_T);
  5583. opd->imm = 0;
  5584. break;
  5585. case 1: // SETT 0000000000011000
  5586. opd->op = OP_SETCLRT;
  5587. opd->dest = BITMASK1(SHR_T);
  5588. opd->imm = 1;
  5589. break;
  5590. case 2: // CLRMAC 0000000000101000
  5591. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5592. break;
  5593. default:
  5594. goto undefined;
  5595. }
  5596. break;
  5597. case 0x09:
  5598. switch (GET_Fx())
  5599. {
  5600. case 0: // NOP 0000000000001001
  5601. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5602. break;
  5603. case 1: // DIV0U 0000000000011001
  5604. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5605. opd->op = OP_DIV0;
  5606. opd->source = BITMASK1(SHR_SR);
  5607. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5608. div(opd) = (struct div){ .rn=SHR_MEM, .rm=SHR_MEM, .ro=SHR_MEM };
  5609. i_div = i;
  5610. is_divop = 1;
  5611. break;
  5612. case 2: // MOVT Rn 0000nnnn00101001
  5613. opd->source = BITMASK1(SHR_T);
  5614. opd->dest = BITMASK1(GET_Rn());
  5615. break;
  5616. default:
  5617. goto undefined;
  5618. }
  5619. break;
  5620. case 0x0a:
  5621. switch (GET_Fx())
  5622. {
  5623. case 0: // STS MACH,Rn 0000nnnn00001010
  5624. tmp = SHR_MACH;
  5625. break;
  5626. case 1: // STS MACL,Rn 0000nnnn00011010
  5627. tmp = SHR_MACL;
  5628. break;
  5629. case 2: // STS PR,Rn 0000nnnn00101010
  5630. tmp = SHR_PR;
  5631. break;
  5632. default:
  5633. goto undefined;
  5634. }
  5635. opd->op = OP_MOVE;
  5636. opd->source = BITMASK1(tmp);
  5637. opd->dest = BITMASK1(GET_Rn());
  5638. break;
  5639. case 0x0b:
  5640. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5641. switch (GET_Fx())
  5642. {
  5643. case 0: // RTS 0000000000001011
  5644. opd->op = OP_BRANCH_R;
  5645. opd->rm = SHR_PR;
  5646. opd->source = BITMASK1(opd->rm);
  5647. opd->dest = BITMASK1(SHR_PC);
  5648. opd->cycles = 2;
  5649. next_is_delay = 1;
  5650. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5651. break;
  5652. case 1: // SLEEP 0000000000011011
  5653. opd->op = OP_SLEEP;
  5654. opd->cycles = 3;
  5655. end_block = 1;
  5656. break;
  5657. case 2: // RTE 0000000000101011
  5658. opd->op = OP_RTE;
  5659. opd->source = BITMASK1(SHR_SP);
  5660. opd->dest = BITMASK4(SHR_SP, SHR_SR, SHR_T, SHR_PC);
  5661. opd->cycles = 4;
  5662. next_is_delay = 1;
  5663. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5664. break;
  5665. default:
  5666. goto undefined;
  5667. }
  5668. break;
  5669. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  5670. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  5671. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  5672. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  5673. opd->dest = BITMASK1(GET_Rn());
  5674. op_flags[i] |= OF_POLL_INSN;
  5675. break;
  5676. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  5677. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5678. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5679. opd->cycles = 3;
  5680. break;
  5681. default:
  5682. goto undefined;
  5683. }
  5684. break;
  5685. /////////////////////////////////////////////
  5686. case 0x01:
  5687. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  5688. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5689. opd->dest = BITMASK1(SHR_MEM);
  5690. opd->imm = (op & 0x0f) * 4;
  5691. break;
  5692. /////////////////////////////////////////////
  5693. case 0x02:
  5694. switch (op & 0x0f)
  5695. {
  5696. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  5697. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  5698. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  5699. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5700. opd->dest = BITMASK1(SHR_MEM);
  5701. break;
  5702. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  5703. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  5704. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  5705. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5706. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5707. break;
  5708. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  5709. opd->op = OP_DIV0;
  5710. opd->source = BITMASK3(SHR_SR, GET_Rm(), GET_Rn());
  5711. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5712. div(opd) = (struct div){ .rn=GET_Rn(), .rm=GET_Rm(), .ro=SHR_MEM };
  5713. i_div = i;
  5714. is_divop = 1;
  5715. break;
  5716. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  5717. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5718. opd->dest = BITMASK1(SHR_T);
  5719. break;
  5720. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  5721. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  5722. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  5723. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5724. opd->dest = BITMASK1(GET_Rn());
  5725. break;
  5726. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  5727. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5728. opd->dest = BITMASK1(SHR_T);
  5729. break;
  5730. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  5731. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5732. opd->dest = BITMASK1(GET_Rn());
  5733. break;
  5734. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  5735. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  5736. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5737. opd->dest = BITMASK1(SHR_MACL);
  5738. break;
  5739. default:
  5740. goto undefined;
  5741. }
  5742. break;
  5743. /////////////////////////////////////////////
  5744. case 0x03:
  5745. switch (op & 0x0f)
  5746. {
  5747. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  5748. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  5749. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  5750. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  5751. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  5752. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5753. opd->dest = BITMASK1(SHR_T);
  5754. break;
  5755. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  5756. opd->source = BITMASK4(GET_Rm(), GET_Rn(), SHR_SR, SHR_T);
  5757. opd->dest = BITMASK3(GET_Rn(), SHR_SR, SHR_T);
  5758. if (i_div >= 0) {
  5759. // divide operation: all DIV1 operations must use the same reg pair
  5760. if (div(&ops[i_div]).rn == SHR_MEM)
  5761. div(&ops[i_div]).rn=GET_Rn(), div(&ops[i_div]).rm=GET_Rm();
  5762. if (div(&ops[i_div]).rn == GET_Rn() && div(&ops[i_div]).rm == GET_Rm()) {
  5763. div(&ops[i_div]).div1 += 1;
  5764. div(&ops[i_div]).state = 0;
  5765. is_divop = 1;
  5766. } else {
  5767. ops[i_div].imm = 0;
  5768. i_div = -1;
  5769. }
  5770. }
  5771. break;
  5772. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  5773. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  5774. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5775. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5776. opd->cycles = 2;
  5777. break;
  5778. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  5779. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  5780. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5781. opd->dest = BITMASK1(GET_Rn());
  5782. break;
  5783. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  5784. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  5785. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  5786. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5787. break;
  5788. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  5789. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  5790. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5791. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5792. break;
  5793. default:
  5794. goto undefined;
  5795. }
  5796. break;
  5797. /////////////////////////////////////////////
  5798. case 0x04:
  5799. switch (op & 0x0f)
  5800. {
  5801. case 0x00:
  5802. switch (GET_Fx())
  5803. {
  5804. case 0: // SHLL Rn 0100nnnn00000000
  5805. case 2: // SHAL Rn 0100nnnn00100000
  5806. opd->source = BITMASK1(GET_Rn());
  5807. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5808. break;
  5809. case 1: // DT Rn 0100nnnn00010000
  5810. opd->source = BITMASK1(GET_Rn());
  5811. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5812. op_flags[i] |= OF_DELAY_INSN;
  5813. break;
  5814. default:
  5815. goto undefined;
  5816. }
  5817. break;
  5818. case 0x01:
  5819. switch (GET_Fx())
  5820. {
  5821. case 0: // SHLR Rn 0100nnnn00000001
  5822. case 2: // SHAR Rn 0100nnnn00100001
  5823. opd->source = BITMASK1(GET_Rn());
  5824. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5825. break;
  5826. case 1: // CMP/PZ Rn 0100nnnn00010001
  5827. opd->source = BITMASK1(GET_Rn());
  5828. opd->dest = BITMASK1(SHR_T);
  5829. break;
  5830. default:
  5831. goto undefined;
  5832. }
  5833. break;
  5834. case 0x02:
  5835. case 0x03:
  5836. switch (op & 0x3f)
  5837. {
  5838. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  5839. tmp = BITMASK1(SHR_MACH);
  5840. break;
  5841. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  5842. tmp = BITMASK1(SHR_MACL);
  5843. break;
  5844. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  5845. tmp = BITMASK1(SHR_PR);
  5846. break;
  5847. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  5848. tmp = BITMASK2(SHR_SR, SHR_T);
  5849. opd->cycles = 2;
  5850. break;
  5851. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  5852. tmp = BITMASK1(SHR_GBR);
  5853. opd->cycles = 2;
  5854. break;
  5855. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  5856. tmp = BITMASK1(SHR_VBR);
  5857. opd->cycles = 2;
  5858. break;
  5859. default:
  5860. goto undefined;
  5861. }
  5862. opd->source = BITMASK1(GET_Rn()) | tmp;
  5863. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5864. break;
  5865. case 0x04:
  5866. case 0x05:
  5867. switch (op & 0x3f)
  5868. {
  5869. case 0x04: // ROTL Rn 0100nnnn00000100
  5870. case 0x05: // ROTR Rn 0100nnnn00000101
  5871. opd->source = BITMASK1(GET_Rn());
  5872. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5873. break;
  5874. case 0x24: // ROTCL Rn 0100nnnn00100100
  5875. if (i_div >= 0) {
  5876. // divide operation: all ROTCL operations must use the same register
  5877. if (div(&ops[i_div]).ro == SHR_MEM)
  5878. div(&ops[i_div]).ro = GET_Rn();
  5879. if (div(&ops[i_div]).ro == GET_Rn() && !div(&ops[i_div]).state) {
  5880. div(&ops[i_div]).rotcl += 1;
  5881. div(&ops[i_div]).state = 1;
  5882. is_divop = 1;
  5883. } else {
  5884. ops[i_div].imm = 0;
  5885. i_div = -1;
  5886. }
  5887. }
  5888. case 0x25: // ROTCR Rn 0100nnnn00100101
  5889. opd->source = BITMASK2(GET_Rn(), SHR_T);
  5890. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5891. break;
  5892. case 0x15: // CMP/PL Rn 0100nnnn00010101
  5893. opd->source = BITMASK1(GET_Rn());
  5894. opd->dest = BITMASK1(SHR_T);
  5895. break;
  5896. default:
  5897. goto undefined;
  5898. }
  5899. break;
  5900. case 0x06:
  5901. case 0x07:
  5902. switch (op & 0x3f)
  5903. {
  5904. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  5905. tmp = BITMASK1(SHR_MACH);
  5906. break;
  5907. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  5908. tmp = BITMASK1(SHR_MACL);
  5909. break;
  5910. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  5911. tmp = BITMASK1(SHR_PR);
  5912. break;
  5913. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  5914. tmp = BITMASK2(SHR_SR, SHR_T);
  5915. opd->op = OP_LDC;
  5916. opd->cycles = 3;
  5917. break;
  5918. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  5919. tmp = BITMASK1(SHR_GBR);
  5920. opd->op = OP_LDC;
  5921. opd->cycles = 3;
  5922. break;
  5923. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  5924. tmp = BITMASK1(SHR_VBR);
  5925. opd->op = OP_LDC;
  5926. opd->cycles = 3;
  5927. break;
  5928. default:
  5929. goto undefined;
  5930. }
  5931. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5932. opd->dest = BITMASK1(GET_Rn()) | tmp;
  5933. break;
  5934. case 0x08:
  5935. case 0x09:
  5936. switch (GET_Fx())
  5937. {
  5938. case 0:
  5939. // SHLL2 Rn 0100nnnn00001000
  5940. // SHLR2 Rn 0100nnnn00001001
  5941. break;
  5942. case 1:
  5943. // SHLL8 Rn 0100nnnn00011000
  5944. // SHLR8 Rn 0100nnnn00011001
  5945. break;
  5946. case 2:
  5947. // SHLL16 Rn 0100nnnn00101000
  5948. // SHLR16 Rn 0100nnnn00101001
  5949. break;
  5950. default:
  5951. goto undefined;
  5952. }
  5953. opd->source = BITMASK1(GET_Rn());
  5954. opd->dest = BITMASK1(GET_Rn());
  5955. break;
  5956. case 0x0a:
  5957. switch (GET_Fx())
  5958. {
  5959. case 0: // LDS Rm,MACH 0100mmmm00001010
  5960. tmp = SHR_MACH;
  5961. break;
  5962. case 1: // LDS Rm,MACL 0100mmmm00011010
  5963. tmp = SHR_MACL;
  5964. break;
  5965. case 2: // LDS Rm,PR 0100mmmm00101010
  5966. tmp = SHR_PR;
  5967. break;
  5968. default:
  5969. goto undefined;
  5970. }
  5971. opd->op = OP_MOVE;
  5972. opd->source = BITMASK1(GET_Rn());
  5973. opd->dest = BITMASK1(tmp);
  5974. break;
  5975. case 0x0b:
  5976. switch (GET_Fx())
  5977. {
  5978. case 0: // JSR @Rm 0100mmmm00001011
  5979. opd->dest = BITMASK1(SHR_PR);
  5980. case 2: // JMP @Rm 0100mmmm00101011
  5981. opd->op = OP_BRANCH_R;
  5982. opd->rm = GET_Rn();
  5983. opd->source = BITMASK1(opd->rm);
  5984. opd->dest |= BITMASK1(SHR_PC);
  5985. opd->cycles = 2;
  5986. next_is_delay = 1;
  5987. if (!(opd->dest & BITMASK1(SHR_PR)))
  5988. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5989. else
  5990. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5991. break;
  5992. case 1: // TAS.B @Rn 0100nnnn00011011
  5993. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5994. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  5995. opd->cycles = 4;
  5996. break;
  5997. default:
  5998. goto undefined;
  5999. }
  6000. break;
  6001. case 0x0e:
  6002. switch (GET_Fx())
  6003. {
  6004. case 0: // LDC Rm,SR 0100mmmm00001110
  6005. tmp = BITMASK2(SHR_SR, SHR_T);
  6006. break;
  6007. case 1: // LDC Rm,GBR 0100mmmm00011110
  6008. tmp = BITMASK1(SHR_GBR);
  6009. break;
  6010. case 2: // LDC Rm,VBR 0100mmmm00101110
  6011. tmp = BITMASK1(SHR_VBR);
  6012. break;
  6013. default:
  6014. goto undefined;
  6015. }
  6016. opd->op = OP_LDC;
  6017. opd->source = BITMASK1(GET_Rn());
  6018. opd->dest = tmp;
  6019. break;
  6020. case 0x0f:
  6021. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  6022. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  6023. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  6024. opd->cycles = 3;
  6025. break;
  6026. default:
  6027. goto undefined;
  6028. }
  6029. break;
  6030. /////////////////////////////////////////////
  6031. case 0x05:
  6032. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  6033. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6034. opd->dest = BITMASK1(GET_Rn());
  6035. opd->imm = (op & 0x0f) * 4;
  6036. op_flags[i] |= OF_POLL_INSN;
  6037. break;
  6038. /////////////////////////////////////////////
  6039. case 0x06:
  6040. switch (op & 0x0f)
  6041. {
  6042. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  6043. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  6044. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  6045. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  6046. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6047. break;
  6048. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  6049. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  6050. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  6051. opd->dest = BITMASK1(GET_Rn());
  6052. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6053. op_flags[i] |= OF_POLL_INSN;
  6054. break;
  6055. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  6056. opd->source = BITMASK2(GET_Rm(), SHR_T);
  6057. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  6058. break;
  6059. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  6060. opd->op = OP_MOVE;
  6061. goto arith_rmrn;
  6062. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  6063. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  6064. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  6065. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  6066. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  6067. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  6068. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  6069. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  6070. arith_rmrn:
  6071. opd->source = BITMASK1(GET_Rm());
  6072. opd->dest = BITMASK1(GET_Rn());
  6073. break;
  6074. }
  6075. break;
  6076. /////////////////////////////////////////////
  6077. case 0x07:
  6078. // ADD #imm,Rn 0111nnnniiiiiiii
  6079. opd->source = opd->dest = BITMASK1(GET_Rn());
  6080. opd->imm = (s8)op;
  6081. break;
  6082. /////////////////////////////////////////////
  6083. case 0x08:
  6084. switch (op & 0x0f00)
  6085. {
  6086. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  6087. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  6088. opd->dest = BITMASK1(SHR_MEM);
  6089. opd->imm = (op & 0x0f);
  6090. break;
  6091. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  6092. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  6093. opd->dest = BITMASK1(SHR_MEM);
  6094. opd->imm = (op & 0x0f) * 2;
  6095. break;
  6096. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  6097. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6098. opd->dest = BITMASK1(SHR_R0);
  6099. opd->imm = (op & 0x0f);
  6100. op_flags[i] |= OF_POLL_INSN;
  6101. break;
  6102. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  6103. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6104. opd->dest = BITMASK1(SHR_R0);
  6105. opd->imm = (op & 0x0f) * 2;
  6106. op_flags[i] |= OF_POLL_INSN;
  6107. break;
  6108. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  6109. opd->source = BITMASK1(SHR_R0);
  6110. opd->dest = BITMASK1(SHR_T);
  6111. opd->imm = (s8)op;
  6112. break;
  6113. case 0x0d00: // BT/S label 10001101dddddddd
  6114. case 0x0f00: // BF/S label 10001111dddddddd
  6115. next_is_delay = 1;
  6116. // fallthrough
  6117. case 0x0900: // BT label 10001001dddddddd
  6118. case 0x0b00: // BF label 10001011dddddddd
  6119. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  6120. opd->source = BITMASK2(SHR_PC, SHR_T);
  6121. opd->dest = BITMASK1(SHR_PC);
  6122. opd->imm = ((signed int)(op << 24) >> 23);
  6123. opd->imm += pc + 4;
  6124. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  6125. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6126. break;
  6127. default:
  6128. goto undefined;
  6129. }
  6130. break;
  6131. /////////////////////////////////////////////
  6132. case 0x09:
  6133. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  6134. opd->op = OP_LOAD_POOL;
  6135. tmp = pc + 2;
  6136. if (op_flags[i] & OF_DELAY_OP) {
  6137. if (ops[i-1].op == OP_BRANCH)
  6138. tmp = ops[i-1].imm;
  6139. else if (ops[i-1].op != OP_BRANCH_N)
  6140. tmp = 0;
  6141. }
  6142. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6143. opd->dest = BITMASK1(GET_Rn());
  6144. if (tmp) {
  6145. opd->imm = tmp + 2 + (op & 0xff) * 2;
  6146. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6147. lowest_literal = opd->imm;
  6148. }
  6149. opd->size = 1;
  6150. break;
  6151. /////////////////////////////////////////////
  6152. case 0x0b:
  6153. // BSR label 1011dddddddddddd
  6154. opd->dest = BITMASK1(SHR_PR);
  6155. case 0x0a:
  6156. // BRA label 1010dddddddddddd
  6157. opd->op = OP_BRANCH;
  6158. opd->source = BITMASK1(SHR_PC);
  6159. opd->dest |= BITMASK1(SHR_PC);
  6160. opd->imm = ((signed int)(op << 20) >> 19);
  6161. opd->imm += pc + 4;
  6162. opd->cycles = 2;
  6163. next_is_delay = 1;
  6164. if (!(opd->dest & BITMASK1(SHR_PR))) {
  6165. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  6166. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6167. if (opd->imm <= pc)
  6168. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6169. } else
  6170. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6171. } else
  6172. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  6173. break;
  6174. /////////////////////////////////////////////
  6175. case 0x0c:
  6176. switch (op & 0x0f00)
  6177. {
  6178. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  6179. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  6180. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  6181. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  6182. opd->dest = BITMASK1(SHR_MEM);
  6183. opd->size = (op & 0x300) >> 8;
  6184. opd->imm = (op & 0xff) << opd->size;
  6185. break;
  6186. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  6187. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  6188. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  6189. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  6190. opd->dest = BITMASK1(SHR_R0);
  6191. opd->size = (op & 0x300) >> 8;
  6192. opd->imm = (op & 0xff) << opd->size;
  6193. op_flags[i] |= OF_POLL_INSN;
  6194. break;
  6195. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  6196. opd->op = OP_TRAPA;
  6197. opd->source = BITMASK4(SHR_SP, SHR_PC, SHR_SR, SHR_T);
  6198. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  6199. opd->imm = (op & 0xff);
  6200. opd->cycles = 8;
  6201. op_flags[i+1] |= OF_BTARGET;
  6202. break;
  6203. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  6204. opd->op = OP_MOVA;
  6205. tmp = pc + 2;
  6206. if (op_flags[i] & OF_DELAY_OP) {
  6207. if (ops[i-1].op == OP_BRANCH)
  6208. tmp = ops[i-1].imm;
  6209. else if (ops[i-1].op != OP_BRANCH_N)
  6210. tmp = 0;
  6211. }
  6212. opd->dest = BITMASK1(SHR_R0);
  6213. if (tmp) {
  6214. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6215. if (opd->imm >= base_pc) {
  6216. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6217. lowest_mova = opd->imm;
  6218. }
  6219. }
  6220. break;
  6221. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  6222. opd->source = BITMASK1(SHR_R0);
  6223. opd->dest = BITMASK1(SHR_T);
  6224. opd->imm = op & 0xff;
  6225. break;
  6226. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  6227. opd->source = opd->dest = BITMASK1(SHR_R0);
  6228. opd->imm = op & 0xff;
  6229. break;
  6230. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  6231. opd->source = opd->dest = BITMASK1(SHR_R0);
  6232. opd->imm = op & 0xff;
  6233. break;
  6234. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  6235. opd->source = opd->dest = BITMASK1(SHR_R0);
  6236. opd->imm = op & 0xff;
  6237. break;
  6238. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  6239. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6240. opd->dest = BITMASK1(SHR_T);
  6241. opd->imm = op & 0xff;
  6242. op_flags[i] |= OF_POLL_INSN;
  6243. opd->cycles = 3;
  6244. break;
  6245. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  6246. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  6247. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  6248. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6249. opd->dest = BITMASK1(SHR_MEM);
  6250. opd->imm = op & 0xff;
  6251. opd->cycles = 3;
  6252. break;
  6253. default:
  6254. goto undefined;
  6255. }
  6256. break;
  6257. /////////////////////////////////////////////
  6258. case 0x0d:
  6259. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  6260. opd->op = OP_LOAD_POOL;
  6261. tmp = pc + 2;
  6262. if (op_flags[i] & OF_DELAY_OP) {
  6263. if (ops[i-1].op == OP_BRANCH)
  6264. tmp = ops[i-1].imm;
  6265. else if (ops[i-1].op != OP_BRANCH_N)
  6266. tmp = 0;
  6267. }
  6268. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6269. opd->dest = BITMASK1(GET_Rn());
  6270. if (tmp) {
  6271. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6272. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6273. lowest_literal = opd->imm;
  6274. }
  6275. opd->size = 2;
  6276. break;
  6277. /////////////////////////////////////////////
  6278. case 0x0e:
  6279. // MOV #imm,Rn 1110nnnniiiiiiii
  6280. opd->op = OP_LOAD_CONST;
  6281. opd->dest = BITMASK1(GET_Rn());
  6282. opd->imm = (s8)op;
  6283. break;
  6284. default:
  6285. undefined:
  6286. opd->op = OP_UNDEFINED;
  6287. // an unhandled instruction is probably not code if it's not the 1st insn
  6288. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  6289. goto end;
  6290. break;
  6291. }
  6292. if (op_flags[i] & OF_DELAY_OP) {
  6293. switch (opd->op) {
  6294. case OP_BRANCH:
  6295. case OP_BRANCH_N:
  6296. case OP_BRANCH_CT:
  6297. case OP_BRANCH_CF:
  6298. case OP_BRANCH_R:
  6299. case OP_BRANCH_RF:
  6300. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  6301. is_slave ? 's' : 'm', pc);
  6302. opd->op = OP_UNDEFINED;
  6303. op_flags[i] |= OF_B_IN_DS;
  6304. next_is_delay = 0;
  6305. break;
  6306. }
  6307. } else if (!is_divop && i_div >= 0)
  6308. i_div = -1; // divide parser stop
  6309. }
  6310. end:
  6311. i_end = i;
  6312. end_pc = pc;
  6313. // 2nd pass: some analysis
  6314. lowest_literal = end_literals = lowest_mova = 0;
  6315. t = T_UNKNOWN; // T flag state
  6316. last_btarget = 0;
  6317. op = 0; // delay/poll insns counter
  6318. is_divop = 0; // divide op insns counter
  6319. i_div = -1; // index of current divide op
  6320. for (i = 0, pc = base_pc; i < i_end; i++, pc += 2) {
  6321. opd = &ops[i];
  6322. crc += FETCH_OP(pc);
  6323. // propagate T (TODO: DIV0U)
  6324. if (op_flags[i] & OF_BTARGET)
  6325. t = T_UNKNOWN;
  6326. if ((opd->op == OP_BRANCH_CT && t == T_SET) ||
  6327. (opd->op == OP_BRANCH_CF && t == T_CLEAR)) {
  6328. opd->op = OP_BRANCH;
  6329. opd->cycles = (op_flags[i + 1] & OF_DELAY_OP) ? 2 : 3;
  6330. } else if ((opd->op == OP_BRANCH_CT && t == T_CLEAR) ||
  6331. (opd->op == OP_BRANCH_CF && t == T_SET))
  6332. opd->op = OP_BRANCH_N;
  6333. else if (OP_ISBRACND(opd->op))
  6334. t = (opd->op == OP_BRANCH_CF ? T_SET : T_CLEAR);
  6335. else if (opd->op == OP_SETCLRT)
  6336. t = (opd->imm ? T_SET : T_CLEAR);
  6337. else if (opd->dest & BITMASK1(SHR_T))
  6338. t = T_UNKNOWN;
  6339. // "overscan" detection: unreachable code after unconditional branch
  6340. // this can happen if the insn after a forward branch isn't a local target
  6341. if (OP_ISBRAUC(opd->op)) {
  6342. if (op_flags[i + 1] & OF_DELAY_OP) {
  6343. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  6344. i_end = i + 2;
  6345. } else {
  6346. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  6347. i_end = i + 1;
  6348. }
  6349. }
  6350. // divide operation verification:
  6351. // 1. there must not be a branch target inside
  6352. // 2. nothing is in a delay slot (could only be DIV0)
  6353. // 2. DIV0/n*(ROTCL+DIV1)/ROTCL:
  6354. // div.div1 > 0 && div.rotcl == div.div1+1 && div.rn =! div.ro
  6355. // 3. DIV0/n*DIV1/ROTCL:
  6356. // div.div1 > 0 && div.rotcl == 1 && div.ro == div.rn
  6357. if (i_div >= 0) {
  6358. if (op_flags[i] & OF_BTARGET) { // condition 1
  6359. ops[i_div].imm = 0;
  6360. i_div = -1;
  6361. } else if (--is_divop == 0)
  6362. i_div = -1;
  6363. } else if (opd->op == OP_DIV0) {
  6364. struct div *div = &div(opd);
  6365. is_divop = div->div1 + div->rotcl;
  6366. if (op_flags[i] & OF_DELAY_OP) // condition 2
  6367. opd->imm = 0;
  6368. else if (! div->div1 || ! ((div->ro == div->rn && div->rotcl == 1) ||
  6369. (div->ro != div->rn && div->rotcl == div->div1+1)))
  6370. opd->imm = 0; // condition 3+4
  6371. else if (is_divop)
  6372. i_div = i;
  6373. }
  6374. // literal pool size detection
  6375. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  6376. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6377. lowest_mova = opd->imm;
  6378. if (opd->op == OP_LOAD_POOL) {
  6379. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  6380. if (end_literals < opd->imm + opd->size * 2)
  6381. end_literals = opd->imm + opd->size * 2;
  6382. if (lowest_literal == 0 || lowest_literal > opd->imm)
  6383. lowest_literal = opd->imm;
  6384. if (opd->size == 2) {
  6385. // tweak for NFL: treat a 32bit literal as an address and check if it
  6386. // points to the literal space. In that case handle it like MOVA.
  6387. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  6388. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  6389. if (lowest_mova == 0 || tmp < lowest_mova)
  6390. lowest_mova = tmp;
  6391. }
  6392. }
  6393. }
  6394. #if LOOP_DETECTION
  6395. // inner loop detection
  6396. // 1. a loop always starts with a branch target (for the backwards jump)
  6397. // 2. it doesn't contain more than one polling and/or delaying insn
  6398. // 3. it doesn't contain unconditional jumps
  6399. // 4. no overlapping of loops
  6400. if (op_flags[i] & OF_BTARGET) {
  6401. last_btarget = i; // possible loop starting point
  6402. op = 0;
  6403. }
  6404. // XXX let's hope nobody is putting a delay or poll insn in a delay slot :-/
  6405. if (OP_ISBRAIMM(opd->op)) {
  6406. // BSR, BRA, BT, BF with immediate target
  6407. int i_tmp = (opd->imm - base_pc) / 2; // branch target, index in ops
  6408. if (i_tmp == last_btarget) // candidate for basic loop optimizer
  6409. op_flags[i_tmp] |= OF_BASIC_LOOP;
  6410. if (i_tmp == last_btarget && op <= 1) {
  6411. op_flags[i_tmp] |= OF_LOOP; // conditions met -> mark loop
  6412. last_btarget = i+1; // condition 4
  6413. } else if (opd->op == OP_BRANCH)
  6414. last_btarget = i+1; // condition 3
  6415. }
  6416. else if (OP_ISBRAIND(opd->op))
  6417. // BRAF, BSRF, JMP, JSR, register indirect. treat it as off-limits jump
  6418. last_btarget = i+1; // condition 3
  6419. else if (op_flags[i] & (OF_POLL_INSN|OF_DELAY_INSN))
  6420. op ++; // condition 2
  6421. #endif
  6422. }
  6423. end_pc = pc;
  6424. // end_literals is used to decide to inline a literal or not
  6425. // XXX: need better detection if this actually is used in write
  6426. if (lowest_literal >= base_pc) {
  6427. if (lowest_literal < end_pc) {
  6428. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  6429. // TODO: does this always mean end_pc covers data?
  6430. }
  6431. }
  6432. if (lowest_mova >= base_pc) {
  6433. if (lowest_mova < end_literals) {
  6434. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  6435. end_literals = lowest_mova;
  6436. }
  6437. if (lowest_mova < end_pc) {
  6438. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  6439. end_literals = end_pc;
  6440. }
  6441. }
  6442. if (lowest_literal >= end_literals)
  6443. lowest_literal = end_literals;
  6444. if (lowest_literal && end_literals)
  6445. for (pc = lowest_literal; pc < end_literals; pc += 2)
  6446. crc += FETCH_OP(pc);
  6447. *end_pc_out = end_pc;
  6448. if (base_literals_out != NULL)
  6449. *base_literals_out = (lowest_literal ? lowest_literal : end_pc);
  6450. if (end_literals_out != NULL)
  6451. *end_literals_out = (end_literals ? end_literals : end_pc);
  6452. // crc overflow handling, twice to collect all overflows
  6453. crc = (crc & 0xffff) + (crc >> 16);
  6454. crc = (crc & 0xffff) + (crc >> 16);
  6455. return crc;
  6456. }
  6457. // vim:shiftwidth=2:ts=2:expandtab