compiler.c 215 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. * (C) kub, 2018,2019,2020
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. *
  9. * notes:
  10. * - tcache, block descriptor, block entry buffer overflows result in oldest
  11. * blocks being deleted until enough space is available
  12. * - link and list element buffer overflows result in failure and exit
  13. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  14. * except jumps from global to CPU-local tcaches
  15. *
  16. * implemented:
  17. * - static register allocation
  18. * - remaining register caching and tracking in temporaries
  19. * - block-local branch linking
  20. * - block linking
  21. * - some constant propagation
  22. * - call stack caching for host block entry address
  23. * - delay, poll, and idle loop detection and handling
  24. * - some T/M flag optimizations where the value is known or isn't used
  25. *
  26. * TODO:
  27. * - better constant propagation
  28. * - bug fixing
  29. */
  30. #include <stddef.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <assert.h>
  34. #include "../../pico/pico_int.h"
  35. #include "../../pico/arm_features.h"
  36. #include "sh2.h"
  37. #include "compiler.h"
  38. #include "../drc/cmn.h"
  39. #include "../debug.h"
  40. // features
  41. #define PROPAGATE_CONSTANTS 1
  42. #define LINK_BRANCHES 1
  43. #define BRANCH_CACHE 1
  44. #define CALL_STACK 1
  45. #define ALIAS_REGISTERS 1
  46. #define REMAP_REGISTER 1
  47. #define LOOP_DETECTION 1
  48. #define LOOP_OPTIMIZER 1
  49. #define T_OPTIMIZER 1
  50. #define DIV_OPTIMIZER 0
  51. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  52. #define MAX_LOCAL_TARGETS (BLOCK_INSN_LIMIT / 4)
  53. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 2)
  54. // debug stuff
  55. // 01 - warnings/errors
  56. // 02 - block info/smc
  57. // 04 - asm
  58. // 08 - runtime block entry log
  59. // 10 - smc self-check
  60. // 20 - runtime block entry counter
  61. // 40 - rcache checking
  62. // 80 - branch cache statistics
  63. // 100 - write trace
  64. // 200 - compare trace
  65. // 400 - block entry backtrace on exit
  66. // 800 - state dump on exit
  67. // {
  68. #ifndef DRC_DEBUG
  69. #define DRC_DEBUG 0//x847
  70. #endif
  71. #if DRC_DEBUG
  72. #define dbg(l,...) { \
  73. if ((l) & DRC_DEBUG) \
  74. elprintf(EL_STATUS, ##__VA_ARGS__); \
  75. }
  76. #include "mame/sh2dasm.h"
  77. #include <platform/libpicofe/linux/host_dasm.h>
  78. static int insns_compiled, hash_collisions, host_insn_count;
  79. #define COUNT_OP \
  80. host_insn_count++
  81. #else // !DRC_DEBUG
  82. #define COUNT_OP
  83. #define dbg(...)
  84. #endif
  85. ///
  86. #define FETCH_OP(pc) \
  87. dr_pc_base[(pc) / 2]
  88. #define FETCH32(a) \
  89. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  90. #define CHECK_UNHANDLED_BITS(mask, label) { \
  91. if ((op & (mask)) != 0) \
  92. goto label; \
  93. }
  94. #define GET_Fx() \
  95. ((op >> 4) & 0x0f)
  96. #define GET_Rm GET_Fx
  97. #define GET_Rn() \
  98. ((op >> 8) & 0x0f)
  99. #define T 0x00000001
  100. #define S 0x00000002
  101. #define I 0x000000f0
  102. #define Q 0x00000100
  103. #define M 0x00000200
  104. #define T_save 0x00000800
  105. #define I_SHIFT 4
  106. #define Q_SHIFT 8
  107. #define M_SHIFT 9
  108. #define T_SHIFT 11
  109. static struct op_data {
  110. u8 op;
  111. u8 cycles;
  112. u8 size; // 0, 1, 2 - byte, word, long
  113. s8 rm; // branch or load/store data reg
  114. u32 source; // bitmask of src regs
  115. u32 dest; // bitmask of dest regs
  116. u32 imm; // immediate/io address/branch target
  117. // (for literal - address, not value)
  118. } ops[BLOCK_INSN_LIMIT];
  119. enum op_types {
  120. OP_UNHANDLED = 0,
  121. OP_BRANCH,
  122. OP_BRANCH_N, // conditional known not to be taken
  123. OP_BRANCH_CT, // conditional, branch if T set
  124. OP_BRANCH_CF, // conditional, branch if T clear
  125. OP_BRANCH_R, // indirect
  126. OP_BRANCH_RF, // indirect far (PC + Rm)
  127. OP_SETCLRT, // T flag set/clear
  128. OP_MOVE, // register move
  129. OP_LOAD_CONST,// load const to register
  130. OP_LOAD_POOL, // literal pool load, imm is address
  131. OP_MOVA, // MOVA instruction
  132. OP_SLEEP, // SLEEP instruction
  133. OP_RTE, // RTE instruction
  134. OP_TRAPA, // TRAPA instruction
  135. OP_LDC, // LDC instruction
  136. OP_DIV0, // DIV0[US] instruction
  137. OP_UNDEFINED,
  138. };
  139. struct div {
  140. u32 state:1; // 0: expect DIV1/ROTCL, 1: expect DIV1
  141. u32 rn:5, rm:5, ro:5; // rn and rm for DIV1, ro for ROTCL
  142. u32 div1:8, rotcl:8; // DIV1 count, ROTCL count
  143. };
  144. union _div { u32 imm; struct div div; }; // XXX tut-tut type punning...
  145. #define div(opd) ((union _div *)&((opd)->imm))->div
  146. // XXX consider trap insns: OP_TRAPA, OP_UNDEFINED?
  147. #define OP_ISBRANCH(op) ((BITRANGE(OP_BRANCH, OP_BRANCH_RF)| BITMASK1(OP_RTE)) \
  148. & BITMASK1(op))
  149. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  150. & BITMASK1(op))
  151. #define OP_ISBRACND(op) (BITMASK2(OP_BRANCH_CT, OP_BRANCH_CF) \
  152. & BITMASK1(op))
  153. #define OP_ISBRAIMM(op) (BITMASK3(OP_BRANCH, OP_BRANCH_CT, OP_BRANCH_CF) \
  154. & BITMASK1(op))
  155. #define OP_ISBRAIND(op) (BITMASK3(OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  156. & BITMASK1(op))
  157. #ifdef DRC_SH2
  158. #if (DRC_DEBUG & 4)
  159. static u8 *tcache_dsm_ptrs[3];
  160. static char sh2dasm_buff[64];
  161. #define do_host_disasm(tcid) \
  162. host_dasm(tcache_dsm_ptrs[tcid], emith_insn_ptr() - tcache_dsm_ptrs[tcid]); \
  163. tcache_dsm_ptrs[tcid] = emith_insn_ptr()
  164. #else
  165. #define do_host_disasm(x)
  166. #endif
  167. #define SH2_DUMP(sh2, reason) { \
  168. char ms = (sh2)->is_slave ? 's' : 'm'; \
  169. printf("%csh2 %s %08x\n", ms, reason, (sh2)->pc); \
  170. printf("%csh2 r0-7 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  171. (sh2)->r[0], (sh2)->r[1], (sh2)->r[2], (sh2)->r[3], \
  172. (sh2)->r[4], (sh2)->r[5], (sh2)->r[6], (sh2)->r[7]); \
  173. printf("%csh2 r8-15 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  174. (sh2)->r[8], (sh2)->r[9], (sh2)->r[10], (sh2)->r[11], \
  175. (sh2)->r[12], (sh2)->r[13], (sh2)->r[14], (sh2)->r[15]); \
  176. printf("%csh2 pc-ml %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  177. (sh2)->pc, (sh2)->ppc, (sh2)->pr, (sh2)->sr&0xfff, \
  178. (sh2)->gbr, (sh2)->vbr, (sh2)->mach, (sh2)->macl); \
  179. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  180. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  181. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  182. (sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  183. }
  184. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  185. #if (DRC_DEBUG & (256|512|1024))
  186. static SH2 csh2[2][8];
  187. static FILE *trace[2];
  188. #endif
  189. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  190. {
  191. if (block != NULL) {
  192. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  193. sh2->pc, block, (signed int)sr >> 12);
  194. #if defined PDB
  195. pdb_step(sh2, sh2->pc);
  196. #elif (DRC_DEBUG & 256)
  197. {
  198. int idx = sh2->is_slave;
  199. if (!trace[0]) {
  200. trace[0] = fopen("pico.trace0", "wb");
  201. trace[1] = fopen("pico.trace1", "wb");
  202. }
  203. if (csh2[idx][0].pc != sh2->pc) {
  204. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  205. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  206. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  207. csh2[idx][0].is_slave = idx;
  208. }
  209. }
  210. #elif (DRC_DEBUG & 512)
  211. {
  212. static SH2 fsh2;
  213. int idx = sh2->is_slave;
  214. if (!trace[0]) {
  215. trace[0] = fopen("pico.trace0", "rb");
  216. trace[1] = fopen("pico.trace1", "rb");
  217. }
  218. if (csh2[idx][0].pc != sh2->pc) {
  219. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  220. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  221. printf("trace eof at %08lx\n",ftell(trace[idx]));
  222. exit(1);
  223. }
  224. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  225. fsh2.is_slave = idx;
  226. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  227. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  228. {
  229. printf("difference at %08lx!\n",ftell(trace[idx]));
  230. SH2_DUMP(&fsh2, "file");
  231. SH2_DUMP(sh2, "current");
  232. SH2_DUMP(&csh2[idx][0], "previous");
  233. char *ps = (char *)sh2, *pf = (char *)&fsh2;
  234. for (idx = 0; idx < offsetof(SH2, read8_map); idx += sizeof(u32))
  235. if (*(u32 *)(ps+idx) != *(u32 *)(pf+idx))
  236. printf("diff reg %ld\n",idx/sizeof(u32));
  237. exit(1);
  238. }
  239. csh2[idx][0] = fsh2;
  240. }
  241. }
  242. #elif (DRC_DEBUG & 1024)
  243. {
  244. int x = sh2->is_slave, i;
  245. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  246. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  247. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  248. csh2[x][0].is_slave = x;
  249. }
  250. #endif
  251. }
  252. return block;
  253. }
  254. #endif
  255. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  256. // BIOS shares tcache with data array because it's only used for init
  257. // and can be discarded early
  258. #define TCACHE_BUFFERS 3
  259. struct ring_buffer {
  260. u8 *base; // ring buffer memory
  261. unsigned item_sz; // size of one buffer item
  262. unsigned size; // number of itmes in ring
  263. int first, next; // read and write pointers
  264. int used; // number of used items in ring
  265. };
  266. enum { BL_JMP=1, BL_LDJMP, BL_JCCBLX };
  267. struct block_link {
  268. short tcache_id;
  269. short type; // BL_JMP et al
  270. u32 target_pc;
  271. void *jump; // insn address
  272. void *blx; // block link/exit area if any
  273. u8 jdisp[12]; // jump backup buffer
  274. struct block_link *next; // either in block_entry->links or unresolved
  275. struct block_link *o_next; // ...in block_entry->o_links
  276. struct block_link *prev;
  277. struct block_link *o_prev;
  278. struct block_entry *target;// target block this is linked in (be->links)
  279. };
  280. struct block_entry {
  281. u32 pc;
  282. u8 *tcache_ptr; // translated block for above PC
  283. struct block_entry *next; // chain in hash_table with same pc hash
  284. struct block_entry *prev;
  285. struct block_link *links; // incoming links to this entry
  286. struct block_link *o_links;// outgoing links from this entry
  287. #if (DRC_DEBUG & 2)
  288. struct block_desc *block;
  289. #endif
  290. #if (DRC_DEBUG & 32)
  291. int entry_count;
  292. #endif
  293. };
  294. struct block_desc {
  295. u32 addr; // block start SH2 PC address
  296. u32 addr_lit; // block start SH2 literal pool addr
  297. int size; // ..of recompiled insns
  298. int size_lit; // ..of (insns+)literal pool
  299. u8 *tcache_ptr; // start address of block in cache
  300. u16 crc; // crc of insns and literals
  301. u16 active; // actively used or deactivated?
  302. struct block_list *list;
  303. #if (DRC_DEBUG & 2)
  304. int refcount;
  305. #endif
  306. int entry_count;
  307. struct block_entry *entryp;
  308. };
  309. struct block_list {
  310. struct block_desc *block; // block reference
  311. struct block_list *next; // pointers for doubly linked list
  312. struct block_list *prev;
  313. struct block_list **head; // list head (for removing from list)
  314. struct block_list *l_next;
  315. };
  316. static u8 *tcache_ptr; // ptr for code emitters
  317. // XXX: need to tune sizes
  318. static struct ring_buffer tcache_ring[TCACHE_BUFFERS];
  319. static const int tcache_sizes[TCACHE_BUFFERS] = {
  320. DRC_TCACHE_SIZE * 30 / 32, // ROM (rarely used), DRAM
  321. DRC_TCACHE_SIZE / 32, // BIOS, data array in master sh2
  322. DRC_TCACHE_SIZE / 32, // ... slave
  323. };
  324. #define BLOCK_MAX_COUNT(tcid) ((tcid) ? 256 : 32*256)
  325. static struct ring_buffer block_ring[TCACHE_BUFFERS];
  326. static struct block_desc *block_tables[TCACHE_BUFFERS];
  327. #define ENTRY_MAX_COUNT(tcid) ((tcid) ? 8*512 : 256*512)
  328. static struct ring_buffer entry_ring[TCACHE_BUFFERS];
  329. static struct block_entry *entry_tables[TCACHE_BUFFERS];
  330. // we have block_link_pool to avoid using mallocs
  331. #define BLOCK_LINK_MAX_COUNT(tcid) ((tcid) ? 512 : 32*512)
  332. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  333. static int block_link_pool_counts[TCACHE_BUFFERS];
  334. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  335. static struct block_link *blink_free[TCACHE_BUFFERS];
  336. // used for invalidation
  337. #define RAM_SIZE(tcid) ((tcid) ? 0x1000 : 0x40000)
  338. #define INVAL_PAGE_SIZE 0x100
  339. static struct block_list *inactive_blocks[TCACHE_BUFFERS];
  340. // array of pointers to block_lists for RAM and 2 data arrays
  341. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  342. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  343. #define HASH_TABLE_SIZE(tcid) ((tcid) ? 512 : 32*512)
  344. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  345. #define HASH_FUNC(hash_tab, addr, mask) \
  346. (hash_tab)[((addr) >> 1) & (mask)]
  347. #define BLOCK_LIST_MAX_COUNT (64*1024)
  348. static struct block_list *block_list_pool;
  349. static int block_list_pool_count;
  350. static struct block_list *blist_free;
  351. #if (DRC_DEBUG & 128)
  352. #if BRANCH_CACHE
  353. int bchit, bcmiss;
  354. #endif
  355. #if CALL_STACK
  356. int rchit, rcmiss;
  357. #endif
  358. #endif
  359. // host register tracking
  360. enum cache_reg_htype {
  361. HRT_TEMP = 1, // is for temps and args
  362. HRT_REG = 2, // is for sh2 regs
  363. };
  364. enum cache_reg_flags {
  365. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  366. HRF_PINNED = 1 << 1, // has a pinned mapping
  367. HRF_S16 = 1 << 2, // has a sign extended 16 bit value
  368. HRF_U16 = 1 << 3, // has a zero extended 16 bit value
  369. };
  370. enum cache_reg_type {
  371. HR_FREE,
  372. HR_CACHED, // vreg has sh2_reg_e
  373. HR_TEMP, // reg used for temp storage
  374. };
  375. typedef struct {
  376. u8 hreg:6; // "host" reg
  377. u8 htype:2; // TEMP or REG?
  378. u8 flags:4; // DIRTY, PINNED?
  379. u8 type:2; // CACHED or TEMP?
  380. u8 locked:2; // LOCKED reference counter
  381. u16 stamp; // kind of a timestamp
  382. u32 gregs; // "guest" reg mask
  383. } cache_reg_t;
  384. // guest register tracking
  385. enum guest_reg_flags {
  386. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  387. GRF_CONST = 1 << 1, // reg has a constant
  388. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  389. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  390. GRF_PINNED = 1 << 4, // reg has pinned mapping to vreg
  391. };
  392. typedef struct {
  393. u8 flags; // guest flags: is constant, is dirty?
  394. s8 sreg; // cache reg for static mapping
  395. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  396. s8 cnst; // const index if this is constant
  397. } guest_reg_t;
  398. // possibly needed in code emitter
  399. static int rcache_get_tmp(void);
  400. static void rcache_free_tmp(int hr);
  401. // Note: Register assignment goes by ABI convention. Caller save registers are
  402. // TEMPORARY, callee save registers are PRESERVED. Unusable regs are omitted.
  403. // there must be at least the free (not context or statically mapped) amount of
  404. // PRESERVED/TEMPORARY registers used by handlers in worst case (currently 4).
  405. // there must be at least 3 PARAM, and PARAM+TEMPORARY must be at least 4.
  406. // SR must and R0 should by all means be statically mapped.
  407. // XXX the static definition of SR MUST match that in compiler.h
  408. #if defined(__arm__) || defined(_M_ARM)
  409. #include "../drc/emit_arm.c"
  410. #elif defined(__aarch64__) || defined(_M_ARM64)
  411. #include "../drc/emit_arm64.c"
  412. #elif defined(__mips__)
  413. #include "../drc/emit_mips.c"
  414. #elif defined(__riscv__) || defined(__riscv)
  415. #include "../drc/emit_riscv.c"
  416. #elif defined(__powerpc__)
  417. #include "../drc/emit_ppc.c"
  418. #elif defined(__i386__) || defined(_M_X86)
  419. #include "../drc/emit_x86.c"
  420. #elif defined(__x86_64__) || defined(_M_X64)
  421. #include "../drc/emit_x86.c"
  422. #else
  423. #error unsupported arch
  424. #endif
  425. static const signed char hregs_param[] = PARAM_REGS;
  426. static const signed char hregs_temp [] = TEMPORARY_REGS;
  427. static const signed char hregs_saved[] = PRESERVED_REGS;
  428. static const signed char regs_static[] = STATIC_SH2_REGS;
  429. #define CACHE_REGS \
  430. (ARRAY_SIZE(hregs_param)+ARRAY_SIZE(hregs_temp)+ARRAY_SIZE(hregs_saved)-1)
  431. static cache_reg_t cache_regs[CACHE_REGS];
  432. static signed char reg_map_host[HOST_REGS];
  433. static guest_reg_t guest_regs[SH2_REGS];
  434. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  435. static void REGPARM(1) (*sh2_drc_dispatcher)(u32 pc);
  436. #if CALL_STACK
  437. static u32 REGPARM(2) (*sh2_drc_dispatcher_call)(u32 pc);
  438. static void REGPARM(1) (*sh2_drc_dispatcher_return)(u32 pc);
  439. #endif
  440. static void REGPARM(1) (*sh2_drc_exit)(u32 pc);
  441. static void (*sh2_drc_test_irq)(void);
  442. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  443. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  444. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  445. static u32 REGPARM(1) (*sh2_drc_read8_poll)(u32 a);
  446. static u32 REGPARM(1) (*sh2_drc_read16_poll)(u32 a);
  447. static u32 REGPARM(1) (*sh2_drc_read32_poll)(u32 a);
  448. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  449. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  450. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  451. #ifdef DRC_SR_REG
  452. void REGPARM(1) (*sh2_drc_save_sr)(SH2 *sh2);
  453. void REGPARM(1) (*sh2_drc_restore_sr)(SH2 *sh2);
  454. #endif
  455. // flags for memory access
  456. #define MF_SIZEMASK 0x03 // size of access
  457. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  458. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  459. #define MF_POLLING 0x20 // include polling check in read
  460. // address space stuff
  461. static int dr_is_rom(u32 a)
  462. {
  463. // tweak for WWF Raw which writes data to some high ROM addresses
  464. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  465. }
  466. static int dr_ctx_get_mem_ptr(SH2 *sh2, u32 a, u32 *mask)
  467. {
  468. void *memptr;
  469. int poffs = -1;
  470. // check if region is mapped memory
  471. memptr = p32x_sh2_get_mem_ptr(a, mask, sh2);
  472. if (memptr == NULL)
  473. return poffs;
  474. if (memptr == sh2->p_bios) // BIOS
  475. poffs = offsetof(SH2, p_bios);
  476. else if (memptr == sh2->p_da) // data array
  477. poffs = offsetof(SH2, p_da);
  478. else if (memptr == sh2->p_sdram) // SDRAM
  479. poffs = offsetof(SH2, p_sdram);
  480. else if (memptr == sh2->p_rom) // ROM
  481. poffs = offsetof(SH2, p_rom);
  482. return poffs;
  483. }
  484. static int dr_get_tcache_id(u32 pc, int is_slave)
  485. {
  486. u32 tcid = 0;
  487. if ((pc & 0xe0000000) == 0xc0000000)
  488. tcid = 1 + is_slave; // data array
  489. if ((pc & ~0xfff) == 0)
  490. tcid = 1 + is_slave; // BIOS
  491. return tcid;
  492. }
  493. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  494. {
  495. struct block_entry *be;
  496. *tcache_id = dr_get_tcache_id(pc, is_slave);
  497. be = HASH_FUNC(hash_tables[*tcache_id], pc, HASH_TABLE_SIZE(*tcache_id) - 1);
  498. if (be != NULL) // don't ask... gcc code generation hint
  499. for (; be != NULL; be = be->next)
  500. if (be->pc == pc)
  501. return be;
  502. return NULL;
  503. }
  504. // ---------------------------------------------------------------
  505. // ring buffer management
  506. #define RING_INIT(r,m,n) *(r) = (struct ring_buffer) { .base = (u8 *)m, \
  507. .item_sz = sizeof(*(m)), .size = n };
  508. static void *ring_alloc(struct ring_buffer *rb, int count)
  509. {
  510. // allocate space in ring buffer
  511. void *p;
  512. p = rb->base + rb->next * rb->item_sz;
  513. if (rb->next+count > rb->size) {
  514. rb->used += rb->size - rb->next;
  515. p = rb->base; // wrap if overflow at end
  516. rb->next = count;
  517. } else {
  518. rb->next += count;
  519. if (rb->next == rb->size) rb->next = 0;
  520. }
  521. rb->used += count;
  522. return p;
  523. }
  524. static void ring_wrap(struct ring_buffer *rb)
  525. {
  526. // insufficient space at end of buffer memory, wrap around
  527. rb->used += rb->size - rb->next;
  528. rb->next = 0;
  529. }
  530. static void ring_free(struct ring_buffer *rb, int count)
  531. {
  532. // free oldest space in ring buffer
  533. rb->first += count;
  534. if (rb->first >= rb->size) rb->first -= rb->size;
  535. rb->used -= count;
  536. }
  537. static void ring_free_p(struct ring_buffer *rb, void *p)
  538. {
  539. // free ring buffer space upto given pointer
  540. rb->first = ((u8 *)p - rb->base) / rb->item_sz;
  541. rb->used = rb->next - rb->first;
  542. if (rb->used < 0) rb->used += rb->size;
  543. }
  544. static void *ring_reset(struct ring_buffer *rb)
  545. {
  546. // reset to initial state
  547. rb->first = rb->next = rb->used = 0;
  548. return rb->base + rb->next * rb->item_sz;
  549. }
  550. static void *ring_first(struct ring_buffer *rb)
  551. {
  552. return rb->base + rb->first * rb->item_sz;
  553. }
  554. static void *ring_next(struct ring_buffer *rb)
  555. {
  556. return rb->base + rb->next * rb->item_sz;
  557. }
  558. // block management
  559. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  560. {
  561. struct block_list *added;
  562. if (blist_free) {
  563. added = blist_free;
  564. blist_free = added->next;
  565. } else if (block_list_pool_count >= BLOCK_LIST_MAX_COUNT) {
  566. printf( "block list overflow\n");
  567. exit(1);
  568. } else {
  569. added = block_list_pool + block_list_pool_count;
  570. block_list_pool_count ++;
  571. }
  572. added->block = block;
  573. added->l_next = block->list;
  574. block->list = added;
  575. added->head = blist;
  576. added->prev = NULL;
  577. if (*blist)
  578. (*blist)->prev = added;
  579. added->next = *blist;
  580. *blist = added;
  581. }
  582. static void rm_from_block_lists(struct block_desc *block)
  583. {
  584. struct block_list *entry;
  585. entry = block->list;
  586. while (entry != NULL) {
  587. if (entry->prev != NULL)
  588. entry->prev->next = entry->next;
  589. else
  590. *(entry->head) = entry->next;
  591. if (entry->next != NULL)
  592. entry->next->prev = entry->prev;
  593. entry->next = blist_free;
  594. blist_free = entry;
  595. entry = entry->l_next;
  596. }
  597. block->list = NULL;
  598. }
  599. static void discard_block_list(struct block_list **blist)
  600. {
  601. struct block_list *next, *current = *blist;
  602. while (current != NULL) {
  603. next = current->next;
  604. current->next = blist_free;
  605. blist_free = current;
  606. current = next;
  607. }
  608. *blist = NULL;
  609. }
  610. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  611. {
  612. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  613. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  614. be->prev = NULL;
  615. if (*head)
  616. (*head)->prev = be;
  617. be->next = *head;
  618. *head = be;
  619. #if (DRC_DEBUG & 2)
  620. if (be->next != NULL) {
  621. printf(" %08x@%p: entry hash collision with %08x@%p\n",
  622. be->pc, be->tcache_ptr, be->next->pc, be->next->tcache_ptr);
  623. hash_collisions++;
  624. }
  625. #endif
  626. }
  627. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  628. {
  629. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  630. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  631. #if DRC_DEBUG & 1
  632. struct block_entry *current = be;
  633. while (current->prev != NULL)
  634. current = current->prev;
  635. if (current != *head)
  636. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  637. #endif
  638. if (be->prev != NULL)
  639. be->prev->next = be->next;
  640. else
  641. *head = be->next;
  642. if (be->next != NULL)
  643. be->next->prev = be->prev;
  644. }
  645. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  646. {
  647. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  648. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  649. #if DRC_DEBUG & 1
  650. struct block_link *current = *head;
  651. while (current != NULL && current != bl)
  652. current = current->next;
  653. if (current == bl)
  654. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  655. #endif
  656. bl->target = NULL; // marker for not resolved
  657. bl->prev = NULL;
  658. if (*head)
  659. (*head)->prev = bl;
  660. bl->next = *head;
  661. *head = bl;
  662. }
  663. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  664. {
  665. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  666. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  667. #if DRC_DEBUG & 1
  668. struct block_link *current = bl;
  669. while (current->prev != NULL)
  670. current = current->prev;
  671. if (current != *head)
  672. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  673. #endif
  674. if (bl->prev != NULL)
  675. bl->prev->next = bl->next;
  676. else
  677. *head = bl->next;
  678. if (bl->next != NULL)
  679. bl->next->prev = bl->prev;
  680. }
  681. #if LINK_BRANCHES
  682. static void dr_block_link(struct block_entry *be, struct block_link *bl, int emit_jump)
  683. {
  684. dbg(2, "- %slink from %p to pc %08x entry %p", emit_jump ? "":"early ",
  685. bl->jump, bl->target_pc, be->tcache_ptr);
  686. if (emit_jump) {
  687. u8 *jump = bl->jump;
  688. int jsz = emith_jump_patch_size();
  689. if (bl->type == BL_JMP) { // patch: jump @entry
  690. // inlined: @jump far jump to target
  691. emith_jump_patch(jump, be->tcache_ptr, &jump);
  692. } else if (bl->type == BL_LDJMP) { // write: jump @entry
  693. // inlined: @jump far jump to target
  694. emith_jump_at(jump, be->tcache_ptr);
  695. jsz = emith_jump_at_size();
  696. } else if (bl->type == BL_JCCBLX) { // patch: jump cond -> jump @entry
  697. if (emith_jump_patch_inrange(bl->jump, be->tcache_ptr)) {
  698. // inlined: @jump near jumpcc to target
  699. emith_jump_patch(jump, be->tcache_ptr, &jump);
  700. } else { // dispatcher cond immediate
  701. // via blx: @jump near jumpcc to blx; @blx far jump
  702. emith_jump_patch(jump, bl->blx, &jump);
  703. emith_jump_at(bl->blx, be->tcache_ptr);
  704. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(),
  705. ((uintptr_t)bl->blx & 0x1f) + emith_jump_at_size()-1 > 0x1f);
  706. }
  707. } else {
  708. printf("unknown BL type %d\n", bl->type);
  709. exit(1);
  710. }
  711. host_instructions_updated(jump, jump + jsz, ((uintptr_t)jump & 0x1f) + jsz-1 > 0x1f);
  712. }
  713. // move bl to block_entry
  714. bl->target = be;
  715. bl->prev = NULL;
  716. if (be->links)
  717. be->links->prev = bl;
  718. bl->next = be->links;
  719. be->links = bl;
  720. }
  721. static void dr_block_unlink(struct block_link *bl, int emit_jump)
  722. {
  723. dbg(2,"- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  724. if (bl->target) {
  725. if (emit_jump) {
  726. u8 *jump = bl->jump;
  727. int jsz = emith_jump_patch_size();
  728. if (bl->type == BL_JMP) { // jump_patch @dispatcher
  729. // inlined: @jump far jump to dispatcher
  730. emith_jump_patch(jump, sh2_drc_dispatcher, &jump);
  731. } else if (bl->type == BL_LDJMP) { // restore: load pc, jump @dispatcher
  732. // inlined: @jump load target_pc, far jump to dispatcher
  733. memcpy(jump, bl->jdisp, emith_jump_at_size());
  734. jsz = emith_jump_at_size();
  735. } else if (bl->type == BL_JCCBLX) { // jump cond @blx; @blx: load pc, jump
  736. // via blx: @jump near jumpcc to blx; @blx load target_pc, far jump
  737. emith_jump_patch(bl->jump, bl->blx, &jump);
  738. memcpy(bl->blx, bl->jdisp, emith_jump_at_size());
  739. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(), 1);
  740. } else {
  741. printf("unknown BL type %d\n", bl->type);
  742. exit(1);
  743. }
  744. // update cpu caches since the previous jump target doesn't exist anymore
  745. host_instructions_updated(jump, jump + jsz, 1);
  746. }
  747. if (bl->prev)
  748. bl->prev->next = bl->next;
  749. else
  750. bl->target->links = bl->next;
  751. if (bl->next)
  752. bl->next->prev = bl->prev;
  753. bl->target = NULL;
  754. }
  755. }
  756. #endif
  757. static struct block_link *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  758. {
  759. #if LINK_BRANCHES
  760. struct block_link *bl = block_link_pool[tcache_id];
  761. int cnt = block_link_pool_counts[tcache_id];
  762. int target_tcache_id;
  763. // get the target block entry
  764. target_tcache_id = dr_get_tcache_id(pc, is_slave);
  765. if (target_tcache_id && target_tcache_id != tcache_id)
  766. return NULL;
  767. // get a block link
  768. if (blink_free[tcache_id] != NULL) {
  769. bl = blink_free[tcache_id];
  770. blink_free[tcache_id] = bl->next;
  771. } else if (cnt >= BLOCK_LINK_MAX_COUNT(tcache_id)) {
  772. dbg(1, "bl overflow for tcache %d", tcache_id);
  773. return NULL;
  774. } else {
  775. bl += cnt;
  776. block_link_pool_counts[tcache_id] = cnt+1;
  777. }
  778. // prepare link and add to outgoing list of owner
  779. bl->tcache_id = tcache_id;
  780. bl->target_pc = pc;
  781. bl->jump = tcache_ptr;
  782. bl->blx = NULL;
  783. bl->o_next = owner->o_links;
  784. owner->o_links = bl;
  785. add_to_hashlist_unresolved(bl, tcache_id);
  786. return bl;
  787. #else
  788. return NULL;
  789. #endif
  790. }
  791. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  792. {
  793. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  794. u32 addr, end, mask = 0, shift = 0, idx;
  795. // mark memory blocks as containing compiled code
  796. if ((block->addr & 0xc7fc0000) == 0x06000000
  797. || (block->addr & 0xfffff000) == 0xc0000000)
  798. {
  799. if (tcache_id != 0) {
  800. // data array
  801. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  802. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  803. shift = SH2_DRCBLK_DA_SHIFT;
  804. }
  805. else {
  806. // SDRAM
  807. drc_ram_blk = Pico32xMem->drcblk_ram;
  808. lit_ram_blk = Pico32xMem->drclit_ram;
  809. shift = SH2_DRCBLK_RAM_SHIFT;
  810. }
  811. mask = RAM_SIZE(tcache_id) - 1;
  812. // mark recompiled insns
  813. addr = block->addr & ~((1 << shift) - 1);
  814. end = block->addr + block->size;
  815. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  816. drc_ram_blk[idx++] += mark;
  817. // mark literal pool
  818. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  819. addr = block->addr_lit & ~((1 << shift) - 1);
  820. end = block->addr_lit + block->size_lit;
  821. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  822. drc_ram_blk[idx++] += mark;
  823. // mark for literals disabled
  824. if (nolit) {
  825. addr = nolit & ~((1 << shift) - 1);
  826. end = block->addr_lit + block->size_lit;
  827. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  828. lit_ram_blk[idx++] = 1;
  829. }
  830. if (mark < 0)
  831. rm_from_block_lists(block);
  832. else {
  833. // add to invalidation lookup lists
  834. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  835. end = block->addr + block->size;
  836. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  837. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  838. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  839. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  840. end = block->addr_lit + block->size_lit;
  841. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  842. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  843. }
  844. }
  845. }
  846. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  847. {
  848. u8 *lit_ram_blk = NULL;
  849. u32 mask = 0, shift = 0, addr, idx;
  850. if ((start & 0xc7fc0000) == 0x06000000
  851. || (start & 0xfffff000) == 0xc0000000)
  852. {
  853. if (tcache_id != 0) {
  854. // data array
  855. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  856. shift = SH2_DRCBLK_DA_SHIFT;
  857. }
  858. else {
  859. // SDRAM
  860. lit_ram_blk = Pico32xMem->drclit_ram;
  861. shift = SH2_DRCBLK_RAM_SHIFT;
  862. }
  863. mask = RAM_SIZE(tcache_id) - 1;
  864. addr = start & ~((1 << shift) - 1);
  865. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  866. if (lit_ram_blk[idx++])
  867. break;
  868. return (addr < start ? start : addr > end ? end : addr);
  869. }
  870. return end;
  871. }
  872. static void dr_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free)
  873. {
  874. struct block_link *bl;
  875. u32 i;
  876. free = free || nolit; // block is invalid if literals are overwritten
  877. dbg(2," %sing block %08x-%08x,%08x-%08x, blkid %d,%d", free?"delet":"disabl",
  878. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  879. tcache_id, bd - block_tables[tcache_id]);
  880. if (bd->addr == 0 || bd->entry_count == 0) {
  881. dbg(1, " killing dead block!? %08x", bd->addr);
  882. return;
  883. }
  884. #if LINK_BRANCHES
  885. // remove from hash table, make incoming links unresolved
  886. if (bd->active) {
  887. for (i = 0; i < bd->entry_count; i++) {
  888. rm_from_hashlist(&bd->entryp[i], tcache_id);
  889. while ((bl = bd->entryp[i].links) != NULL) {
  890. dr_block_unlink(bl, 1);
  891. add_to_hashlist_unresolved(bl, tcache_id);
  892. }
  893. }
  894. dr_mark_memory(-1, bd, tcache_id, nolit);
  895. add_to_block_list(&inactive_blocks[tcache_id], bd);
  896. }
  897. bd->active = 0;
  898. #endif
  899. if (free) {
  900. #if LINK_BRANCHES
  901. // revoke outgoing links
  902. for (bl = bd->entryp[0].o_links; bl != NULL; bl = bl->o_next) {
  903. if (bl->target)
  904. dr_block_unlink(bl, 0);
  905. else
  906. rm_from_hashlist_unresolved(bl, tcache_id);
  907. bl->jump = NULL;
  908. bl->next = blink_free[bl->tcache_id];
  909. blink_free[bl->tcache_id] = bl;
  910. }
  911. bd->entryp[0].o_links = NULL;
  912. #endif
  913. // invalidate block
  914. rm_from_block_lists(bd);
  915. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  916. bd->entry_count = 0;
  917. bd->entryp = NULL;
  918. }
  919. emith_update_cache();
  920. }
  921. static struct block_desc *dr_find_inactive_block(int tcache_id, u16 crc,
  922. u32 addr, int size, u32 addr_lit, int size_lit)
  923. {
  924. struct block_list **head = &inactive_blocks[tcache_id];
  925. struct block_list *current;
  926. for (current = *head; current != NULL; current = current->next) {
  927. struct block_desc *block = current->block;
  928. if (block->crc == crc && block->addr == addr && block->size == size &&
  929. block->addr_lit == addr_lit && block->size_lit == size_lit)
  930. {
  931. rm_from_block_lists(block);
  932. return block;
  933. }
  934. }
  935. return NULL;
  936. }
  937. static struct block_desc *dr_add_block(int entries, u32 addr, int size,
  938. u32 addr_lit, int size_lit, u16 crc, int is_slave, int *blk_id)
  939. {
  940. struct block_entry *be;
  941. struct block_desc *bd;
  942. int tcache_id;
  943. // do a lookup to get tcache_id and override check
  944. be = dr_get_entry(addr, is_slave, &tcache_id);
  945. if (be != NULL)
  946. dbg(1, "block override for %08x", addr);
  947. if (block_ring[tcache_id].used + 1 > block_ring[tcache_id].size ||
  948. entry_ring[tcache_id].used + entries > entry_ring[tcache_id].size) {
  949. dbg(1, "bd overflow for tcache %d", tcache_id);
  950. return NULL;
  951. }
  952. *blk_id = block_ring[tcache_id].next;
  953. bd = ring_alloc(&block_ring[tcache_id], 1);
  954. bd->entryp = ring_alloc(&entry_ring[tcache_id], entries);
  955. bd->addr = addr;
  956. bd->size = size;
  957. bd->addr_lit = addr_lit;
  958. bd->size_lit = size_lit;
  959. bd->tcache_ptr = tcache_ptr;
  960. bd->crc = crc;
  961. bd->active = 0;
  962. bd->list = NULL;
  963. bd->entry_count = 0;
  964. #if (DRC_DEBUG & 2)
  965. bd->refcount = 0;
  966. #endif
  967. return bd;
  968. }
  969. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  970. {
  971. #if LINK_BRANCHES
  972. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  973. u32 pc = be->pc;
  974. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  975. struct block_link *bl = *head, *next;
  976. while (bl != NULL) {
  977. next = bl->next;
  978. if (bl->target_pc == pc && (!bl->tcache_id || bl->tcache_id == tcache_id)) {
  979. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  980. dr_block_link(be, bl, 1);
  981. }
  982. bl = next;
  983. }
  984. #endif
  985. }
  986. static void dr_link_outgoing(struct block_entry *be, int tcache_id, int is_slave)
  987. {
  988. #if LINK_BRANCHES
  989. struct block_link *bl;
  990. int target_tcache_id;
  991. for (bl = be->o_links; bl; bl = bl->o_next) {
  992. if (bl->target == NULL) {
  993. be = dr_get_entry(bl->target_pc, is_slave, &target_tcache_id);
  994. if (be != NULL && (!target_tcache_id || target_tcache_id == tcache_id)) {
  995. // remove bl from unresolved_links (must've been since target was NULL)
  996. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  997. dr_block_link(be, bl, 1);
  998. }
  999. }
  1000. }
  1001. #endif
  1002. }
  1003. static void dr_activate_block(struct block_desc *bd, int tcache_id, int is_slave)
  1004. {
  1005. int i;
  1006. // connect branches
  1007. for (i = 0; i < bd->entry_count; i++) {
  1008. struct block_entry *entry = &bd->entryp[i];
  1009. add_to_hashlist(entry, tcache_id);
  1010. // incoming branches
  1011. dr_link_blocks(entry, tcache_id);
  1012. if (!tcache_id)
  1013. dr_link_blocks(entry, is_slave?2:1);
  1014. // outgoing branches
  1015. dr_link_outgoing(entry, tcache_id, is_slave);
  1016. }
  1017. // mark memory for overwrite detection
  1018. dr_mark_memory(1, bd, tcache_id, 0);
  1019. bd->active = 1;
  1020. }
  1021. static void REGPARM(3) *dr_lookup_block(u32 pc, SH2 *sh2, int *tcache_id)
  1022. {
  1023. struct block_entry *be = NULL;
  1024. void *block = NULL;
  1025. be = dr_get_entry(pc, sh2->is_slave, tcache_id);
  1026. if (be != NULL)
  1027. block = be->tcache_ptr;
  1028. #if (DRC_DEBUG & 2)
  1029. if (be != NULL)
  1030. be->block->refcount++;
  1031. #endif
  1032. return block;
  1033. }
  1034. static void dr_free_oldest_block(int tcache_id)
  1035. {
  1036. struct block_desc *bf;
  1037. bf = ring_first(&block_ring[tcache_id]);
  1038. if (bf->addr && bf->entry_count)
  1039. dr_rm_block_entry(bf, tcache_id, 0, 1);
  1040. ring_free(&block_ring[tcache_id], 1);
  1041. if (block_ring[tcache_id].used) {
  1042. bf = ring_first(&block_ring[tcache_id]);
  1043. ring_free_p(&entry_ring[tcache_id], bf->entryp);
  1044. ring_free_p(&tcache_ring[tcache_id], bf->tcache_ptr);
  1045. } else {
  1046. // reset since size of code block isn't known if no successor block exists
  1047. ring_reset(&block_ring[tcache_id]);
  1048. ring_reset(&entry_ring[tcache_id]);
  1049. ring_reset(&tcache_ring[tcache_id]);
  1050. }
  1051. }
  1052. static inline void dr_reserve_cache(int tcache_id, struct ring_buffer *rb, int count)
  1053. {
  1054. // while not enough space available
  1055. if (rb->next + count >= rb->size){
  1056. // not enough space in rest of buffer -> wrap around
  1057. while (rb->first >= rb->next && rb->used)
  1058. dr_free_oldest_block(tcache_id);
  1059. if (rb->first == 0 && rb->used)
  1060. dr_free_oldest_block(tcache_id);
  1061. ring_wrap(rb);
  1062. }
  1063. while (rb->first >= rb->next && rb->next + count > rb->first && rb->used)
  1064. dr_free_oldest_block(tcache_id);
  1065. }
  1066. static u8 *dr_prepare_cache(int tcache_id, int insn_count, int entry_count)
  1067. {
  1068. int bf = block_ring[tcache_id].first;
  1069. // reserve one block desc
  1070. if (block_ring[tcache_id].used >= block_ring[tcache_id].size)
  1071. dr_free_oldest_block(tcache_id);
  1072. // reserve block entries
  1073. dr_reserve_cache(tcache_id, &entry_ring[tcache_id], entry_count);
  1074. // reserve cache space
  1075. dr_reserve_cache(tcache_id, &tcache_ring[tcache_id], insn_count*128);
  1076. if (bf != block_ring[tcache_id].first) {
  1077. // deleted some block(s), clear branch cache and return stack
  1078. #if BRANCH_CACHE
  1079. if (tcache_id)
  1080. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1081. else {
  1082. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1083. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  1084. }
  1085. #endif
  1086. #if CALL_STACK
  1087. if (tcache_id) {
  1088. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1089. sh2s[tcache_id-1].rts_cache_idx = 0;
  1090. } else {
  1091. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1092. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  1093. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1094. }
  1095. #endif
  1096. }
  1097. return ring_next(&tcache_ring[tcache_id]);
  1098. }
  1099. static void dr_flush_tcache(int tcid)
  1100. {
  1101. int i;
  1102. #if (DRC_DEBUG & 1)
  1103. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d bes %d/%d)", tcid,
  1104. tcache_ring[tcid].used, tcache_ring[tcid].size, block_ring[tcid].used,
  1105. block_ring[tcid].size, entry_ring[tcid].used, entry_ring[tcid].size);
  1106. #endif
  1107. ring_reset(&tcache_ring[tcid]);
  1108. ring_reset(&block_ring[tcid]);
  1109. ring_reset(&entry_ring[tcid]);
  1110. block_link_pool_counts[tcid] = 0;
  1111. blink_free[tcid] = NULL;
  1112. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * HASH_TABLE_SIZE(tcid));
  1113. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * HASH_TABLE_SIZE(tcid));
  1114. if (tcid == 0) { // ROM, RAM
  1115. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1116. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1117. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1118. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  1119. memset(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1120. memset(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache));
  1121. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1122. } else {
  1123. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1124. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1125. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  1126. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  1127. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1128. memset(sh2s[tcid - 1].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1129. sh2s[tcid - 1].rts_cache_idx = 0;
  1130. }
  1131. #if (DRC_DEBUG & 4)
  1132. tcache_dsm_ptrs[tcid] = tcache_ring[tcid].base;
  1133. #endif
  1134. for (i = 0; i < RAM_SIZE(tcid) / INVAL_PAGE_SIZE; i++)
  1135. discard_block_list(&inval_lookup[tcid][i]);
  1136. discard_block_list(&inactive_blocks[tcid]);
  1137. }
  1138. static void *dr_failure(void)
  1139. {
  1140. printf("recompilation failed\n");
  1141. exit(1);
  1142. }
  1143. // ---------------------------------------------------------------
  1144. // NB rcache allocation dependencies:
  1145. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  1146. // - get_reg(..., NULL) before get_reg(..., &hr) if it might get the same reg
  1147. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  1148. // register cache / constant propagation stuff
  1149. typedef enum {
  1150. RC_GR_READ,
  1151. RC_GR_WRITE,
  1152. RC_GR_RMW,
  1153. } rc_gr_mode;
  1154. typedef struct {
  1155. u32 gregs;
  1156. u32 val;
  1157. } gconst_t;
  1158. gconst_t gconsts[ARRAY_SIZE(guest_regs)];
  1159. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  1160. static inline int rcache_is_cached(sh2_reg_e r);
  1161. static void rcache_add_vreg_alias(int x, sh2_reg_e r);
  1162. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  1163. static void rcache_evict_vreg(int x);
  1164. static void rcache_remap_vreg(int x);
  1165. static void rcache_set_x16(int hr, int s16_, int u16_)
  1166. {
  1167. int x = reg_map_host[hr];
  1168. if (x >= 0) {
  1169. cache_regs[x].flags &= ~(HRF_S16|HRF_U16);
  1170. if (s16_) cache_regs[x].flags |= HRF_S16;
  1171. if (u16_) cache_regs[x].flags |= HRF_U16;
  1172. }
  1173. }
  1174. static void rcache_copy_x16(int hr, int hr2)
  1175. {
  1176. int x = reg_map_host[hr], y = reg_map_host[hr2];
  1177. if (x >= 0 && y >= 0) {
  1178. cache_regs[x].flags = (cache_regs[x].flags & ~(HRF_S16|HRF_U16)) |
  1179. (cache_regs[y].flags & (HRF_S16|HRF_U16));
  1180. }
  1181. }
  1182. static int rcache_is_s16(int hr)
  1183. {
  1184. int x = reg_map_host[hr];
  1185. return (x >= 0 ? cache_regs[x].flags & HRF_S16 : 0);
  1186. }
  1187. static int rcache_is_u16(int hr)
  1188. {
  1189. int x = reg_map_host[hr];
  1190. return (x >= 0 ? cache_regs[x].flags & HRF_U16 : 0);
  1191. }
  1192. #define RCACHE_DUMP(msg) { \
  1193. cache_reg_t *cp; \
  1194. guest_reg_t *gp; \
  1195. int i; \
  1196. printf("cache dump %s:\n",msg); \
  1197. printf(" cache_regs:\n"); \
  1198. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1199. cp = &cache_regs[i]; \
  1200. if (cp->type != HR_FREE || cp->gregs || cp->locked || cp->flags) \
  1201. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1202. } \
  1203. printf(" guest_regs:\n"); \
  1204. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1205. gp = &guest_regs[i]; \
  1206. if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
  1207. printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
  1208. } \
  1209. printf(" gconsts:\n"); \
  1210. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1211. if (gconsts[i].gregs) \
  1212. printf(" %d: m=%x v=%x\n", i, gconsts[i].gregs, gconsts[i].val); \
  1213. } \
  1214. }
  1215. #define RCACHE_CHECK(msg) { \
  1216. cache_reg_t *cp; \
  1217. guest_reg_t *gp; \
  1218. int i, x, m = 0, d = 0; \
  1219. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1220. cp = &cache_regs[i]; \
  1221. if (cp->flags & HRF_PINNED) m |= (1 << i); \
  1222. if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
  1223. /* check connectivity greg->vreg */ \
  1224. FOR_ALL_BITS_SET_DO(cp->gregs, x, \
  1225. if (guest_regs[x].vreg != i) \
  1226. { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
  1227. ) \
  1228. } \
  1229. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1230. gp = &guest_regs[i]; \
  1231. if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
  1232. { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
  1233. if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_CACHED) \
  1234. { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
  1235. if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
  1236. { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
  1237. if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)))\
  1238. { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
  1239. if (gp->flags & (GRF_STATIC|GRF_PINNED)) { \
  1240. if (gp->sreg == -1 || !(cache_regs[gp->sreg].flags & HRF_PINNED))\
  1241. { d = 1; printf("cache check r=%d v=%d not pinned?\n", i, gp->vreg); } \
  1242. else m &= ~(1 << gp->sreg); \
  1243. } \
  1244. } \
  1245. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1246. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
  1247. if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
  1248. { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
  1249. ) \
  1250. } \
  1251. if (m) \
  1252. { d = 1; printf("cache check m=%x pinning wrong?\n",m); } \
  1253. if (d) RCACHE_DUMP(msg) \
  1254. /* else { \
  1255. printf("locked regs %s:\n",msg); \
  1256. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1257. cp = &cache_regs[i]; \
  1258. if (cp->locked) \
  1259. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1260. } \
  1261. } */ \
  1262. }
  1263. #if PROPAGATE_CONSTANTS
  1264. static inline int gconst_alloc(sh2_reg_e r)
  1265. {
  1266. int i, n = -1;
  1267. for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
  1268. gconsts[i].gregs &= ~(1 << r);
  1269. if (gconsts[i].gregs == 0 && n < 0)
  1270. n = i;
  1271. }
  1272. if (n >= 0)
  1273. gconsts[n].gregs = (1 << r);
  1274. else {
  1275. printf("all gconst buffers in use, aborting\n");
  1276. exit(1); // cannot happen - more constants than guest regs?
  1277. }
  1278. return n;
  1279. }
  1280. static void gconst_set(sh2_reg_e r, u32 val)
  1281. {
  1282. int i = gconst_alloc(r);
  1283. guest_regs[r].flags |= GRF_CONST;
  1284. guest_regs[r].cnst = i;
  1285. gconsts[i].val = val;
  1286. }
  1287. static void gconst_new(sh2_reg_e r, u32 val)
  1288. {
  1289. gconst_set(r, val);
  1290. guest_regs[r].flags |= GRF_CDIRTY;
  1291. // throw away old r that we might have cached
  1292. if (guest_regs[r].vreg >= 0)
  1293. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1294. }
  1295. #endif
  1296. static int gconst_get(sh2_reg_e r, u32 *val)
  1297. {
  1298. if (guest_regs[r].flags & GRF_CONST) {
  1299. *val = gconsts[guest_regs[r].cnst].val;
  1300. return 1;
  1301. }
  1302. *val = 0;
  1303. return 0;
  1304. }
  1305. static int gconst_check(sh2_reg_e r)
  1306. {
  1307. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1308. return 1;
  1309. return 0;
  1310. }
  1311. // update hr if dirty, else do nothing
  1312. static int gconst_try_read(int vreg, sh2_reg_e r)
  1313. {
  1314. int i, x;
  1315. u32 v;
  1316. if (guest_regs[r].flags & GRF_CDIRTY) {
  1317. x = guest_regs[r].cnst;
  1318. v = gconsts[x].val;
  1319. emith_move_r_imm(cache_regs[vreg].hreg, v);
  1320. rcache_set_x16(cache_regs[vreg].hreg, v == (s16)v, v == (u16)v);
  1321. FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
  1322. {
  1323. if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
  1324. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  1325. if (guest_regs[i].vreg < 0)
  1326. rcache_add_vreg_alias(vreg, i);
  1327. guest_regs[i].flags &= ~GRF_CDIRTY;
  1328. guest_regs[i].flags |= GRF_DIRTY;
  1329. });
  1330. cache_regs[vreg].type = HR_CACHED;
  1331. cache_regs[vreg].flags |= HRF_DIRTY;
  1332. return 1;
  1333. }
  1334. return 0;
  1335. }
  1336. static u32 gconst_dirty_mask(void)
  1337. {
  1338. u32 mask = 0;
  1339. int i;
  1340. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1341. if (guest_regs[i].flags & GRF_CDIRTY)
  1342. mask |= (1 << i);
  1343. return mask;
  1344. }
  1345. static void gconst_kill(sh2_reg_e r)
  1346. {
  1347. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1348. gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
  1349. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1350. }
  1351. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1352. {
  1353. gconst_kill(rd);
  1354. if (guest_regs[rs].flags & GRF_CONST) {
  1355. guest_regs[rd].flags |= GRF_CONST;
  1356. if (guest_regs[rd].vreg < 0)
  1357. guest_regs[rd].flags |= GRF_CDIRTY;
  1358. guest_regs[rd].cnst = guest_regs[rs].cnst;
  1359. gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
  1360. }
  1361. }
  1362. static void gconst_clean(void)
  1363. {
  1364. int i;
  1365. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1366. if (guest_regs[i].flags & GRF_CDIRTY) {
  1367. // using RC_GR_READ here: it will call gconst_try_read,
  1368. // cache the reg and mark it dirty.
  1369. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1370. }
  1371. }
  1372. static void gconst_invalidate(void)
  1373. {
  1374. int i;
  1375. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1376. if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
  1377. gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
  1378. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1379. }
  1380. }
  1381. static u16 rcache_counter;
  1382. // SH2 register usage bitmasks
  1383. static u32 rcache_vregs_reg; // regs of type HRT_REG (for pinning)
  1384. static u32 rcache_regs_static; // statically allocated regs
  1385. static u32 rcache_regs_pinned; // pinned regs
  1386. static u32 rcache_regs_now; // regs used in current insn
  1387. static u32 rcache_regs_soon; // regs used in the next few insns
  1388. static u32 rcache_regs_late; // regs used in later insns
  1389. static u32 rcache_regs_discard; // regs overwritten without being used
  1390. static u32 rcache_regs_clean; // regs needing cleaning
  1391. static void rcache_lock_vreg(int x)
  1392. {
  1393. if (x >= 0) {
  1394. cache_regs[x].locked ++;
  1395. #if DRC_DEBUG & 64
  1396. if (cache_regs[x].type == HR_FREE) {
  1397. printf("locking free vreg %x, aborting\n", x);
  1398. exit(1);
  1399. }
  1400. if (!cache_regs[x].locked) {
  1401. printf("locking overflow vreg %x, aborting\n", x);
  1402. exit(1);
  1403. }
  1404. #endif
  1405. }
  1406. }
  1407. static void rcache_unlock_vreg(int x)
  1408. {
  1409. if (x >= 0) {
  1410. #if DRC_DEBUG & 64
  1411. if (cache_regs[x].type == HR_FREE) {
  1412. printf("unlocking free vreg %x, aborting\n", x);
  1413. exit(1);
  1414. }
  1415. #endif
  1416. if (cache_regs[x].locked)
  1417. cache_regs[x].locked --;
  1418. }
  1419. }
  1420. static void rcache_free_vreg(int x)
  1421. {
  1422. cache_regs[x].type = cache_regs[x].locked ? HR_TEMP : HR_FREE;
  1423. cache_regs[x].flags &= HRF_PINNED;
  1424. cache_regs[x].gregs = 0;
  1425. }
  1426. static void rcache_unmap_vreg(int x)
  1427. {
  1428. int i;
  1429. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1430. if (guest_regs[i].flags & GRF_DIRTY) {
  1431. // if a dirty reg is unmapped save its value to context
  1432. if ((~rcache_regs_discard | rcache_regs_now) & (1 << i))
  1433. emith_ctx_write(cache_regs[x].hreg, i * 4);
  1434. guest_regs[i].flags &= ~GRF_DIRTY;
  1435. }
  1436. guest_regs[i].vreg = -1);
  1437. rcache_free_vreg(x);
  1438. }
  1439. static void rcache_move_vreg(int d, int x)
  1440. {
  1441. int i;
  1442. cache_regs[d].type = HR_CACHED;
  1443. cache_regs[d].gregs = cache_regs[x].gregs;
  1444. cache_regs[d].flags &= HRF_PINNED;
  1445. cache_regs[d].flags |= cache_regs[x].flags & ~HRF_PINNED;
  1446. cache_regs[d].locked = 0;
  1447. cache_regs[d].stamp = cache_regs[x].stamp;
  1448. emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
  1449. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1450. if (guest_regs[i].vreg == x)
  1451. guest_regs[i].vreg = d;
  1452. rcache_free_vreg(x);
  1453. }
  1454. static void rcache_clean_vreg(int x)
  1455. {
  1456. u32 rns = rcache_regs_now | rcache_regs_soon;
  1457. int r;
  1458. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1459. cache_regs[x].flags &= ~HRF_DIRTY;
  1460. rcache_lock_vreg(x);
  1461. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1462. if (guest_regs[r].flags & GRF_DIRTY) {
  1463. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) {
  1464. if (guest_regs[r].vreg != guest_regs[r].sreg &&
  1465. !cache_regs[guest_regs[r].sreg].locked &&
  1466. ((~rcache_regs_discard | rcache_regs_now) & (1 << r)) &&
  1467. !(rns & cache_regs[guest_regs[r].sreg].gregs)) {
  1468. // statically mapped reg not in its sreg. move back to sreg
  1469. rcache_evict_vreg(guest_regs[r].sreg);
  1470. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
  1471. cache_regs[guest_regs[r].vreg].hreg);
  1472. rcache_copy_x16(cache_regs[guest_regs[r].sreg].hreg,
  1473. cache_regs[guest_regs[r].vreg].hreg);
  1474. rcache_remove_vreg_alias(x, r);
  1475. rcache_add_vreg_alias(guest_regs[r].sreg, r);
  1476. cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
  1477. } else
  1478. // cannot remap. keep dirty for writeback in unmap
  1479. cache_regs[x].flags |= HRF_DIRTY;
  1480. } else {
  1481. if ((~rcache_regs_discard | rcache_regs_now) & (1 << r))
  1482. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1483. guest_regs[r].flags &= ~GRF_DIRTY;
  1484. }
  1485. rcache_regs_clean &= ~(1 << r);
  1486. })
  1487. rcache_unlock_vreg(x);
  1488. }
  1489. #if DRC_DEBUG & 64
  1490. RCACHE_CHECK("after clean");
  1491. #endif
  1492. }
  1493. static void rcache_add_vreg_alias(int x, sh2_reg_e r)
  1494. {
  1495. cache_regs[x].gregs |= (1 << r);
  1496. guest_regs[r].vreg = x;
  1497. cache_regs[x].type = HR_CACHED;
  1498. }
  1499. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1500. {
  1501. cache_regs[x].gregs &= ~(1 << r);
  1502. if (!cache_regs[x].gregs) {
  1503. // no reg mapped -> free vreg
  1504. if (cache_regs[x].locked)
  1505. cache_regs[x].type = HR_TEMP;
  1506. else
  1507. rcache_free_vreg(x);
  1508. }
  1509. guest_regs[r].vreg = -1;
  1510. }
  1511. static void rcache_evict_vreg(int x)
  1512. {
  1513. rcache_remap_vreg(x);
  1514. rcache_unmap_vreg(x);
  1515. }
  1516. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1517. {
  1518. rcache_remove_vreg_alias(x, r);
  1519. rcache_evict_vreg(x);
  1520. rcache_add_vreg_alias(x, r);
  1521. }
  1522. static int rcache_allocate(int what, int minprio)
  1523. {
  1524. // evict reg with oldest stamp (only for HRT_REG, no temps)
  1525. int i, i_prio, oldest = -1, prio = 0;
  1526. u16 min_stamp = (u16)-1;
  1527. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--) {
  1528. // consider only non-static, unpinned, unlocked REG or TEMP
  1529. if ((cache_regs[i].flags & HRF_PINNED) || cache_regs[i].locked)
  1530. continue;
  1531. if ((what > 0 && !(cache_regs[i].htype & HRT_REG)) || // get a REG
  1532. (what == 0 && (cache_regs[i].htype & HRT_TEMP)) || // get a non-TEMP
  1533. (what < 0 && !(cache_regs[i].htype & HRT_TEMP))) // get a TEMP
  1534. continue;
  1535. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
  1536. // REG is free
  1537. prio = 10;
  1538. oldest = i;
  1539. break;
  1540. }
  1541. if (cache_regs[i].type == HR_CACHED) {
  1542. if (rcache_regs_now & cache_regs[i].gregs)
  1543. // REGs needed for the current insn
  1544. i_prio = 0;
  1545. else if (rcache_regs_soon & cache_regs[i].gregs)
  1546. // REGs needed in the next insns
  1547. i_prio = 2;
  1548. else if (rcache_regs_late & cache_regs[i].gregs)
  1549. // REGs needed in some future insn
  1550. i_prio = 4;
  1551. else if (~rcache_regs_discard & cache_regs[i].gregs)
  1552. // REGs not needed in the foreseeable future
  1553. i_prio = 6;
  1554. else
  1555. // REGs soon overwritten anyway
  1556. i_prio = 8;
  1557. if (!(cache_regs[i].flags & HRF_DIRTY)) i_prio ++;
  1558. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1559. min_stamp = cache_regs[i].stamp;
  1560. oldest = i;
  1561. prio = i_prio;
  1562. }
  1563. }
  1564. }
  1565. if (prio < minprio || oldest == -1)
  1566. return -1;
  1567. if (cache_regs[oldest].type == HR_CACHED)
  1568. rcache_evict_vreg(oldest);
  1569. else
  1570. rcache_free_vreg(oldest);
  1571. return oldest;
  1572. }
  1573. static int rcache_allocate_vreg(int needed)
  1574. {
  1575. int x;
  1576. x = rcache_allocate(1, needed ? 0 : 4);
  1577. if (x < 0)
  1578. x = rcache_allocate(-1, 0);
  1579. return x;
  1580. }
  1581. static int rcache_allocate_nontemp(void)
  1582. {
  1583. int x = rcache_allocate(0, 4);
  1584. return x;
  1585. }
  1586. static int rcache_allocate_temp(void)
  1587. {
  1588. int x = rcache_allocate(-1, 0);
  1589. if (x < 0)
  1590. x = rcache_allocate(0, 0);
  1591. return x;
  1592. }
  1593. // maps a host register to a REG
  1594. static int rcache_map_reg(sh2_reg_e r, int hr)
  1595. {
  1596. #if REMAP_REGISTER
  1597. int i;
  1598. gconst_kill(r);
  1599. // lookup the TEMP hr maps to
  1600. i = reg_map_host[hr];
  1601. if (i < 0) {
  1602. // must not happen
  1603. printf("invalid host register %d\n", hr);
  1604. exit(1);
  1605. }
  1606. // remove old mappings of r and i if one exists
  1607. if (guest_regs[r].vreg >= 0)
  1608. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1609. if (cache_regs[i].type == HR_CACHED)
  1610. rcache_evict_vreg(i);
  1611. // set new mappping
  1612. cache_regs[i].type = HR_CACHED;
  1613. cache_regs[i].gregs = 1 << r;
  1614. cache_regs[i].locked = 0;
  1615. cache_regs[i].stamp = ++rcache_counter;
  1616. cache_regs[i].flags |= HRF_DIRTY;
  1617. rcache_lock_vreg(i);
  1618. guest_regs[r].flags |= GRF_DIRTY;
  1619. guest_regs[r].vreg = i;
  1620. #if DRC_DEBUG & 64
  1621. RCACHE_CHECK("after map");
  1622. #endif
  1623. return cache_regs[i].hreg;
  1624. #else
  1625. return rcache_get_reg(r, RC_GR_WRITE, NULL);
  1626. #endif
  1627. }
  1628. // remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
  1629. static void rcache_remap_vreg(int x)
  1630. {
  1631. #if REMAP_REGISTER
  1632. u32 rsl_d = rcache_regs_soon | rcache_regs_late;
  1633. int d;
  1634. // x must be a cached vreg
  1635. if (cache_regs[x].type != HR_CACHED || cache_regs[x].locked)
  1636. return;
  1637. // don't do it if x isn't used
  1638. if (!(rsl_d & cache_regs[x].gregs)) {
  1639. // clean here to avoid data loss on invalidation
  1640. rcache_clean_vreg(x);
  1641. return;
  1642. }
  1643. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, d,
  1644. if ((guest_regs[d].flags & (GRF_STATIC|GRF_PINNED)) &&
  1645. !cache_regs[guest_regs[d].sreg].locked &&
  1646. !((rsl_d|rcache_regs_now) & cache_regs[guest_regs[d].sreg].gregs)) {
  1647. // STATIC not in its sreg and sreg is available
  1648. rcache_evict_vreg(guest_regs[d].sreg);
  1649. rcache_move_vreg(guest_regs[d].sreg, x);
  1650. return;
  1651. }
  1652. )
  1653. // allocate a non-TEMP vreg
  1654. rcache_lock_vreg(x); // lock to avoid evicting x
  1655. d = rcache_allocate_nontemp();
  1656. rcache_unlock_vreg(x);
  1657. if (d < 0) {
  1658. rcache_clean_vreg(x);
  1659. return;
  1660. }
  1661. // move vreg to new location
  1662. rcache_move_vreg(d, x);
  1663. #if DRC_DEBUG & 64
  1664. RCACHE_CHECK("after remap");
  1665. #endif
  1666. #else
  1667. rcache_clean_vreg(x);
  1668. #endif
  1669. }
  1670. static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
  1671. {
  1672. #if ALIAS_REGISTERS
  1673. int x;
  1674. // if s isn't constant, it must be in cache for aliasing
  1675. if (!gconst_check(rs))
  1676. rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
  1677. // if d and s are not already aliased
  1678. x = guest_regs[rs].vreg;
  1679. if (guest_regs[rd].vreg != x) {
  1680. // remove possible old mapping of dst
  1681. if (guest_regs[rd].vreg >= 0)
  1682. rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
  1683. // make dst an alias of src
  1684. if (x >= 0)
  1685. rcache_add_vreg_alias(x, rd);
  1686. // if d is now in cache, it must be dirty
  1687. if (guest_regs[rd].vreg >= 0) {
  1688. x = guest_regs[rd].vreg;
  1689. cache_regs[x].flags |= HRF_DIRTY;
  1690. guest_regs[rd].flags |= GRF_DIRTY;
  1691. }
  1692. }
  1693. gconst_copy(rd, rs);
  1694. #if DRC_DEBUG & 64
  1695. RCACHE_CHECK("after alias");
  1696. #endif
  1697. #else
  1698. int hr_s = rcache_get_reg(rs, RC_GR_READ, NULL);
  1699. int hr_d = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  1700. emith_move_r_r(hr_d, hr_s);
  1701. gconst_copy(rd, rs);
  1702. #endif
  1703. }
  1704. // note: must not be called when doing conditional code
  1705. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1706. {
  1707. int src, dst, ali;
  1708. cache_reg_t *tr;
  1709. u32 rsp_d = (rcache_regs_soon | rcache_regs_static | rcache_regs_pinned) &
  1710. ~rcache_regs_discard;
  1711. dst = src = guest_regs[r].vreg;
  1712. rcache_lock_vreg(src); // lock to avoid evicting src
  1713. // good opportunity to relocate a remapped STATIC?
  1714. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1715. src != guest_regs[r].sreg && (src < 0 || mode != RC_GR_READ) &&
  1716. !cache_regs[guest_regs[r].sreg].locked &&
  1717. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[r].sreg].gregs)) {
  1718. dst = guest_regs[r].sreg;
  1719. rcache_evict_vreg(dst);
  1720. } else if (dst < 0) {
  1721. // allocate a cache register
  1722. if ((dst = rcache_allocate_vreg(rsp_d & (1 << r))) < 0) {
  1723. printf("no registers to evict, aborting\n");
  1724. exit(1);
  1725. }
  1726. }
  1727. tr = &cache_regs[dst];
  1728. tr->stamp = rcache_counter;
  1729. // remove r from src
  1730. if (src >= 0 && src != dst)
  1731. rcache_remove_vreg_alias(src, r);
  1732. rcache_unlock_vreg(src);
  1733. // if r has a constant it may have aliases
  1734. if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
  1735. src = dst;
  1736. // if r will be modified, check for aliases being needed rsn
  1737. ali = tr->gregs & ~(1 << r);
  1738. if (mode != RC_GR_READ && src == dst && ali) {
  1739. int x = -1;
  1740. if ((rsp_d|rcache_regs_now) & ali) {
  1741. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1742. guest_regs[r].sreg == dst && !tr->locked) {
  1743. // split aliases if r is STATIC in sreg and dst isn't already locked
  1744. int t;
  1745. FOR_ALL_BITS_SET_DO(ali, t,
  1746. if ((guest_regs[t].flags & (GRF_STATIC|GRF_PINNED)) &&
  1747. !(ali & ~(1 << t)) &&
  1748. !cache_regs[guest_regs[t].sreg].locked &&
  1749. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[t].sreg].gregs)) {
  1750. // alias is a single STATIC and its sreg is available
  1751. x = guest_regs[t].sreg;
  1752. rcache_evict_vreg(x);
  1753. } else {
  1754. rcache_lock_vreg(dst); // lock to avoid evicting dst
  1755. x = rcache_allocate_vreg(rsp_d & ali);
  1756. rcache_unlock_vreg(dst);
  1757. }
  1758. break;
  1759. )
  1760. if (x >= 0) {
  1761. rcache_remove_vreg_alias(src, r);
  1762. src = dst;
  1763. rcache_move_vreg(x, dst);
  1764. }
  1765. } else {
  1766. // split r
  1767. rcache_lock_vreg(src); // lock to avoid evicting src
  1768. x = rcache_allocate_vreg(rsp_d & (1 << r));
  1769. rcache_unlock_vreg(src);
  1770. if (x >= 0) {
  1771. rcache_remove_vreg_alias(src, r);
  1772. dst = x;
  1773. tr = &cache_regs[dst];
  1774. tr->stamp = rcache_counter;
  1775. }
  1776. }
  1777. }
  1778. if (x < 0)
  1779. // aliases not needed or no vreg available, remove them
  1780. rcache_evict_vreg_aliases(dst, r);
  1781. }
  1782. // assign r to dst
  1783. rcache_add_vreg_alias(dst, r);
  1784. // handle dst register transfer
  1785. if (src < 0 && mode != RC_GR_WRITE)
  1786. emith_ctx_read(tr->hreg, r * 4);
  1787. if (hr) {
  1788. *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
  1789. rcache_lock_vreg(src >= 0 ? src : dst);
  1790. } else if (src >= 0 && mode != RC_GR_WRITE && cache_regs[src].hreg != tr->hreg)
  1791. emith_move_r_r(tr->hreg, cache_regs[src].hreg);
  1792. // housekeeping
  1793. if (do_locking)
  1794. rcache_lock_vreg(dst);
  1795. if (mode != RC_GR_READ) {
  1796. tr->flags |= HRF_DIRTY;
  1797. guest_regs[r].flags |= GRF_DIRTY;
  1798. gconst_kill(r);
  1799. rcache_set_x16(tr->hreg, 0, 0);
  1800. } else if (src >= 0 && cache_regs[src].hreg != tr->hreg)
  1801. rcache_copy_x16(tr->hreg, cache_regs[src].hreg);
  1802. #if DRC_DEBUG & 64
  1803. RCACHE_CHECK("after getreg");
  1804. #endif
  1805. return tr->hreg;
  1806. }
  1807. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1808. {
  1809. return rcache_get_reg_(r, mode, 1, hr);
  1810. }
  1811. static void rcache_pin_reg(sh2_reg_e r)
  1812. {
  1813. int hr, x;
  1814. // don't pin if static or already pinned
  1815. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED))
  1816. return;
  1817. rcache_regs_soon |= (1 << r); // kludge to prevent allocation of a temp
  1818. hr = rcache_get_reg_(r, RC_GR_RMW, 0, NULL);
  1819. x = reg_map_host[hr];
  1820. // can only pin non-TEMPs
  1821. if (!(cache_regs[x].htype & HRT_TEMP)) {
  1822. guest_regs[r].flags |= GRF_PINNED;
  1823. cache_regs[x].flags |= HRF_PINNED;
  1824. guest_regs[r].sreg = x;
  1825. rcache_regs_pinned |= (1 << r);
  1826. }
  1827. #if DRC_DEBUG & 64
  1828. RCACHE_CHECK("after pin");
  1829. #endif
  1830. }
  1831. static int rcache_get_tmp(void)
  1832. {
  1833. int i;
  1834. i = rcache_allocate_temp();
  1835. if (i < 0) {
  1836. printf("cannot allocate temp\n");
  1837. exit(1);
  1838. }
  1839. cache_regs[i].type = HR_TEMP;
  1840. rcache_lock_vreg(i);
  1841. return cache_regs[i].hreg;
  1842. }
  1843. static int rcache_get_vreg_hr(int hr)
  1844. {
  1845. int i;
  1846. i = reg_map_host[hr];
  1847. if (i < 0 || cache_regs[i].locked) {
  1848. printf("host register %d is locked\n", hr);
  1849. exit(1);
  1850. }
  1851. if (cache_regs[i].type == HR_CACHED)
  1852. rcache_evict_vreg(i);
  1853. else if (cache_regs[i].type == HR_TEMP && cache_regs[i].locked) {
  1854. printf("host reg %d already used, aborting\n", hr);
  1855. exit(1);
  1856. }
  1857. return i;
  1858. }
  1859. static int rcache_get_vreg_arg(int arg)
  1860. {
  1861. int hr = 0;
  1862. host_arg2reg(hr, arg);
  1863. return rcache_get_vreg_hr(hr);
  1864. }
  1865. // get a reg to be used as function arg
  1866. static int rcache_get_tmp_arg(int arg)
  1867. {
  1868. int x = rcache_get_vreg_arg(arg);
  1869. cache_regs[x].type = HR_TEMP;
  1870. rcache_lock_vreg(x);
  1871. return cache_regs[x].hreg;
  1872. }
  1873. // ... as return value after a call
  1874. static int rcache_get_tmp_ret(void)
  1875. {
  1876. int x = rcache_get_vreg_hr(RET_REG);
  1877. cache_regs[x].type = HR_TEMP;
  1878. rcache_lock_vreg(x);
  1879. return cache_regs[x].hreg;
  1880. }
  1881. // same but caches a reg if access is readonly (announced by hr being NULL)
  1882. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1883. {
  1884. int i, srcr, dstr, dstid, keep;
  1885. u32 val;
  1886. host_arg2reg(dstr, arg);
  1887. i = guest_regs[r].vreg;
  1888. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1889. // r is already in arg, avoid evicting
  1890. dstid = i;
  1891. else
  1892. dstid = rcache_get_vreg_arg(arg);
  1893. dstr = cache_regs[dstid].hreg;
  1894. if (rcache_is_cached(r)) {
  1895. // r is needed later on anyway
  1896. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1897. keep = 1;
  1898. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1899. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1900. srcr = dstr;
  1901. emith_move_r_imm(srcr, val);
  1902. keep = 0;
  1903. } else {
  1904. // must read from ctx
  1905. srcr = dstr;
  1906. emith_ctx_read(srcr, r * 4);
  1907. keep = 1;
  1908. }
  1909. if (cache_regs[dstid].type == HR_CACHED)
  1910. rcache_evict_vreg(dstid);
  1911. cache_regs[dstid].type = HR_TEMP;
  1912. if (hr == NULL) {
  1913. if (dstr != srcr)
  1914. // arg is a copy of cached r
  1915. emith_move_r_r(dstr, srcr);
  1916. else if (keep && guest_regs[r].vreg < 0)
  1917. // keep arg as vreg for r
  1918. rcache_add_vreg_alias(dstid, r);
  1919. } else {
  1920. *hr = srcr;
  1921. if (dstr != srcr) // must lock srcr if not copied here
  1922. rcache_lock_vreg(reg_map_host[srcr]);
  1923. }
  1924. cache_regs[dstid].stamp = ++rcache_counter;
  1925. rcache_lock_vreg(dstid);
  1926. #if DRC_DEBUG & 64
  1927. RCACHE_CHECK("after getarg");
  1928. #endif
  1929. return dstr;
  1930. }
  1931. static void rcache_free_tmp(int hr)
  1932. {
  1933. int i = reg_map_host[hr];
  1934. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1935. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1936. exit(1);
  1937. }
  1938. rcache_unlock_vreg(i);
  1939. }
  1940. // saves temporary result either in REG or in drctmp
  1941. static int rcache_save_tmp(int hr)
  1942. {
  1943. int i;
  1944. // find REG, either free or unlocked temp or oldest non-hinted cached
  1945. i = rcache_allocate_nontemp();
  1946. if (i < 0) {
  1947. // if none is available, store in drctmp
  1948. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1949. rcache_free_tmp(hr);
  1950. return -1;
  1951. }
  1952. cache_regs[i].type = HR_CACHED;
  1953. cache_regs[i].gregs = 0; // not storing any guest register
  1954. cache_regs[i].flags &= HRF_PINNED;
  1955. cache_regs[i].locked = 0;
  1956. cache_regs[i].stamp = ++rcache_counter;
  1957. rcache_lock_vreg(i);
  1958. emith_move_r_r(cache_regs[i].hreg, hr);
  1959. rcache_free_tmp(hr);
  1960. return i;
  1961. }
  1962. static int rcache_restore_tmp(int x)
  1963. {
  1964. int hr;
  1965. // find REG with tmp store: cached but with no gregs
  1966. if (x >= 0) {
  1967. if (cache_regs[x].type != HR_CACHED || cache_regs[x].gregs) {
  1968. printf("invalid tmp storage %d\n", x);
  1969. exit(1);
  1970. }
  1971. // found, transform to a TEMP
  1972. cache_regs[x].type = HR_TEMP;
  1973. return cache_regs[x].hreg;
  1974. }
  1975. // if not available, create a TEMP store and fetch from drctmp
  1976. hr = rcache_get_tmp();
  1977. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1978. return hr;
  1979. }
  1980. static void rcache_free(int hr)
  1981. {
  1982. int x = reg_map_host[hr];
  1983. rcache_unlock_vreg(x);
  1984. }
  1985. static void rcache_unlock(int x)
  1986. {
  1987. if (x >= 0)
  1988. cache_regs[x].locked = 0;
  1989. }
  1990. static void rcache_unlock_all(void)
  1991. {
  1992. int i;
  1993. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1994. cache_regs[i].locked = 0;
  1995. }
  1996. static void rcache_unpin_all(void)
  1997. {
  1998. int i;
  1999. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2000. if (guest_regs[i].flags & GRF_PINNED) {
  2001. guest_regs[i].flags &= ~GRF_PINNED;
  2002. cache_regs[guest_regs[i].sreg].flags &= ~HRF_PINNED;
  2003. guest_regs[i].sreg = -1;
  2004. rcache_regs_pinned &= ~(1 << i);
  2005. }
  2006. }
  2007. #if DRC_DEBUG & 64
  2008. RCACHE_CHECK("after unpin");
  2009. #endif
  2010. }
  2011. static void rcache_save_pinned(void)
  2012. {
  2013. int i;
  2014. // save pinned regs to context
  2015. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2016. if ((guest_regs[i].flags & GRF_PINNED) && guest_regs[i].vreg >= 0)
  2017. emith_ctx_write(cache_regs[guest_regs[i].vreg].hreg, i * 4);
  2018. }
  2019. static inline void rcache_set_usage_now(u32 mask)
  2020. {
  2021. rcache_regs_now = mask;
  2022. }
  2023. static inline void rcache_set_usage_soon(u32 mask)
  2024. {
  2025. rcache_regs_soon = mask;
  2026. }
  2027. static inline void rcache_set_usage_late(u32 mask)
  2028. {
  2029. rcache_regs_late = mask;
  2030. }
  2031. static inline void rcache_set_usage_discard(u32 mask)
  2032. {
  2033. rcache_regs_discard = mask;
  2034. }
  2035. static inline int rcache_is_cached(sh2_reg_e r)
  2036. {
  2037. // is r in cache or needed RSN?
  2038. u32 rsc = rcache_regs_soon | rcache_regs_clean;
  2039. return (guest_regs[r].vreg >= 0 || (rsc & (1 << r)));
  2040. }
  2041. static inline int rcache_is_hreg_used(int hr)
  2042. {
  2043. int x = reg_map_host[hr];
  2044. // is hr in use?
  2045. return cache_regs[x].type != HR_FREE &&
  2046. (cache_regs[x].type != HR_TEMP || cache_regs[x].locked);
  2047. }
  2048. static inline u32 rcache_used_hregs_mask(void)
  2049. {
  2050. u32 mask = 0;
  2051. int i;
  2052. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2053. if ((cache_regs[i].htype & HRT_TEMP) && cache_regs[i].type != HR_FREE &&
  2054. (cache_regs[i].type != HR_TEMP || cache_regs[i].locked))
  2055. mask |= 1 << cache_regs[i].hreg;
  2056. return mask;
  2057. }
  2058. static inline u32 rcache_dirty_mask(void)
  2059. {
  2060. u32 mask = 0;
  2061. int i;
  2062. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2063. if (guest_regs[i].flags & GRF_DIRTY)
  2064. mask |= 1 << i;
  2065. mask |= gconst_dirty_mask();
  2066. return mask;
  2067. }
  2068. static inline u32 rcache_cached_mask(void)
  2069. {
  2070. u32 mask = 0;
  2071. int i;
  2072. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2073. if (cache_regs[i].type == HR_CACHED)
  2074. mask |= cache_regs[i].gregs;
  2075. return mask;
  2076. }
  2077. static void rcache_clean_tmp(void)
  2078. {
  2079. int i;
  2080. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2081. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2082. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].htype & HRT_TEMP)) {
  2083. rcache_unlock(i);
  2084. rcache_remap_vreg(i);
  2085. }
  2086. rcache_regs_clean = 0;
  2087. }
  2088. static void rcache_clean_masked(u32 mask)
  2089. {
  2090. int i, r, hr;
  2091. u32 m;
  2092. rcache_regs_clean |= mask;
  2093. mask = rcache_regs_clean;
  2094. // clean constants where all aliases are covered by the mask, exempt statics
  2095. // to avoid flushing them to context if sreg isn't available
  2096. m = mask & ~(rcache_regs_static | rcache_regs_pinned);
  2097. for (i = 0; i < ARRAY_SIZE(gconsts); i++)
  2098. if ((gconsts[i].gregs & m) && !(gconsts[i].gregs & ~mask)) {
  2099. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, r,
  2100. if (guest_regs[r].flags & GRF_CDIRTY) {
  2101. hr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  2102. rcache_clean_vreg(reg_map_host[hr]);
  2103. break;
  2104. });
  2105. }
  2106. // clean vregs where all aliases are covered by the mask
  2107. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2108. if (cache_regs[i].type == HR_CACHED &&
  2109. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  2110. rcache_clean_vreg(i);
  2111. }
  2112. static void rcache_clean(void)
  2113. {
  2114. int i;
  2115. gconst_clean();
  2116. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2117. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  2118. if (cache_regs[i].type == HR_CACHED)
  2119. rcache_clean_vreg(i);
  2120. // relocate statics to their sregs (necessary before conditional jumps)
  2121. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2122. if ((guest_regs[i].flags & (GRF_STATIC|GRF_PINNED)) &&
  2123. guest_regs[i].vreg != guest_regs[i].sreg) {
  2124. rcache_lock_vreg(guest_regs[i].vreg);
  2125. rcache_evict_vreg(guest_regs[i].sreg);
  2126. rcache_unlock_vreg(guest_regs[i].vreg);
  2127. if (guest_regs[i].vreg < 0)
  2128. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  2129. else {
  2130. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  2131. cache_regs[guest_regs[i].vreg].hreg);
  2132. rcache_copy_x16(cache_regs[guest_regs[i].sreg].hreg,
  2133. cache_regs[guest_regs[i].vreg].hreg);
  2134. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  2135. }
  2136. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2137. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2138. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2139. guest_regs[i].flags |= GRF_DIRTY;
  2140. guest_regs[i].vreg = guest_regs[i].sreg;
  2141. }
  2142. }
  2143. rcache_regs_clean = 0;
  2144. }
  2145. static void rcache_invalidate_tmp(void)
  2146. {
  2147. int i;
  2148. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2149. if (cache_regs[i].htype & HRT_TEMP) {
  2150. rcache_unlock(i);
  2151. if (cache_regs[i].type == HR_CACHED)
  2152. rcache_evict_vreg(i);
  2153. else
  2154. rcache_free_vreg(i);
  2155. }
  2156. }
  2157. }
  2158. static void rcache_invalidate(void)
  2159. {
  2160. int i;
  2161. gconst_invalidate();
  2162. rcache_unlock_all();
  2163. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2164. rcache_free_vreg(i);
  2165. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2166. guest_regs[i].flags &= GRF_STATIC;
  2167. if (!(guest_regs[i].flags & GRF_STATIC))
  2168. guest_regs[i].vreg = -1;
  2169. else {
  2170. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2171. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2172. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2173. guest_regs[i].flags |= GRF_DIRTY;
  2174. guest_regs[i].vreg = guest_regs[i].sreg;
  2175. }
  2176. }
  2177. rcache_counter = 0;
  2178. rcache_regs_now = rcache_regs_soon = rcache_regs_late = 0;
  2179. rcache_regs_discard = rcache_regs_clean = 0;
  2180. }
  2181. static void rcache_flush(void)
  2182. {
  2183. rcache_clean();
  2184. rcache_invalidate();
  2185. }
  2186. static void rcache_create(void)
  2187. {
  2188. int x = 0, i;
  2189. // create cache_regs as host register representation
  2190. // RET_REG/params should be first TEMPs to avoid allocation conflicts in calls
  2191. cache_regs[x++] = (cache_reg_t) {.hreg = RET_REG, .htype = HRT_TEMP};
  2192. for (i = 0; i < ARRAY_SIZE(hregs_param); i++)
  2193. if (hregs_param[i] != RET_REG)
  2194. cache_regs[x++] = (cache_reg_t){.hreg = hregs_param[i],.htype = HRT_TEMP};
  2195. for (i = 0; i < ARRAY_SIZE(hregs_temp); i++)
  2196. if (hregs_temp[i] != RET_REG)
  2197. cache_regs[x++] = (cache_reg_t){.hreg = hregs_temp[i], .htype = HRT_TEMP};
  2198. for (i = ARRAY_SIZE(hregs_saved)-1; i >= 0; i--)
  2199. if (hregs_saved[i] != CONTEXT_REG)
  2200. cache_regs[x++] = (cache_reg_t){.hreg = hregs_saved[i], .htype = HRT_REG};
  2201. if (x != ARRAY_SIZE(cache_regs)) {
  2202. printf("rcache_create failed (conflicting register count)\n");
  2203. exit(1);
  2204. }
  2205. // mapping from host_register to cache regs index
  2206. memset(reg_map_host, -1, sizeof(reg_map_host));
  2207. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2208. if (cache_regs[i].htype)
  2209. reg_map_host[cache_regs[i].hreg] = i;
  2210. if (cache_regs[i].htype == HRT_REG)
  2211. rcache_vregs_reg |= (1 << i);
  2212. }
  2213. // create static host register mapping for SH2 regs
  2214. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2215. guest_regs[i] = (guest_reg_t){.sreg = -1};
  2216. }
  2217. for (i = 0; i < ARRAY_SIZE(regs_static); i += 2) {
  2218. for (x = ARRAY_SIZE(cache_regs)-1; x >= 0; x--)
  2219. if (cache_regs[x].hreg == regs_static[i+1]) break;
  2220. if (x >= 0) {
  2221. guest_regs[regs_static[i]] = (guest_reg_t){.flags = GRF_STATIC,.sreg = x};
  2222. rcache_regs_static |= (1 << regs_static[i]);
  2223. rcache_vregs_reg &= ~(1 << x);
  2224. }
  2225. }
  2226. printf("DRC registers created, %ld host regs (%d REG, %d STATIC, 1 CTX)\n",
  2227. CACHE_REGS+1L, count_bits(rcache_vregs_reg),count_bits(rcache_regs_static));
  2228. }
  2229. static void rcache_init(void)
  2230. {
  2231. // create DRC data structures
  2232. rcache_create();
  2233. rcache_invalidate();
  2234. #if DRC_DEBUG & 64
  2235. RCACHE_CHECK("after init");
  2236. #endif
  2237. }
  2238. // ---------------------------------------------------------------
  2239. // NB may return either REG or TEMP
  2240. static int emit_get_rbase_and_offs(SH2 *sh2, sh2_reg_e r, int rmode, u32 *offs)
  2241. {
  2242. uptr omask = emith_rw_offs_max(); // offset mask
  2243. u32 mask = 0;
  2244. u32 a;
  2245. int poffs;
  2246. int hr, hr2;
  2247. uptr la;
  2248. // is r constant and points to a memory region?
  2249. if (! gconst_get(r, &a))
  2250. return -1;
  2251. poffs = dr_ctx_get_mem_ptr(sh2, a, &mask);
  2252. if (poffs == -1)
  2253. return -1;
  2254. if (mask < 0x20000) {
  2255. // data array, BIOS, DRAM, can't safely access directly since host addr may
  2256. // change (BIOS,da code may run on either core, DRAM may be switched)
  2257. hr = rcache_get_tmp();
  2258. a = (a + *offs) & mask;
  2259. if (poffs == offsetof(SH2, p_da)) {
  2260. // access sh2->data_array directly
  2261. a += offsetof(SH2, data_array);
  2262. emith_add_r_r_ptr_imm(hr, CONTEXT_REG, a & ~omask);
  2263. } else {
  2264. emith_ctx_read_ptr(hr, poffs);
  2265. if (a & ~omask)
  2266. emith_add_r_r_ptr_imm(hr, hr, a & ~omask);
  2267. }
  2268. *offs = a & omask;
  2269. return hr;
  2270. }
  2271. // ROM, SDRAM. Host address should be mmapped to be equal to SH2 address.
  2272. la = (uptr)*(void **)((char *)sh2 + poffs);
  2273. // if r is in rcache or needed soon anyway, and offs is relative to region,
  2274. // and address translation fits in add_ptr_imm (s32), then use rcached const
  2275. if (la == (s32)la && !(*offs & ~mask) && rcache_is_cached(r)) {
  2276. u32 odd = a & 1; // need to fix odd address for correct byte addressing
  2277. la -= (s32)((a & ~mask) - *offs - odd); // diff between reg and memory
  2278. hr = hr2 = rcache_get_reg(r, rmode, NULL);
  2279. if ((s32)a < 0) emith_uext_ptr(hr2);
  2280. if ((la & ~omask) - odd) {
  2281. hr = rcache_get_tmp();
  2282. emith_add_r_r_ptr_imm(hr, hr2, (la & ~omask) - odd);
  2283. rcache_free(hr2);
  2284. }
  2285. *offs = (la & omask);
  2286. } else {
  2287. // known fixed host address
  2288. la += (a + *offs) & mask;
  2289. hr = rcache_get_tmp();
  2290. emith_move_r_ptr_imm(hr, la & ~omask);
  2291. *offs = la & omask;
  2292. }
  2293. return hr;
  2294. }
  2295. // read const data from const ROM address
  2296. static int emit_get_rom_data(SH2 *sh2, sh2_reg_e r, u32 offs, int size, u32 *val)
  2297. {
  2298. u32 a, mask;
  2299. *val = 0;
  2300. if (gconst_get(r, &a)) {
  2301. a += offs;
  2302. // check if rom is memory mapped (not bank switched), and address is in rom
  2303. if (dr_is_rom(a) && p32x_sh2_get_mem_ptr(a, &mask, sh2) == sh2->p_rom) {
  2304. switch (size & MF_SIZEMASK) {
  2305. case 0: *val = (s8)p32x_sh2_read8(a, sh2s); break; // 8
  2306. case 1: *val = (s16)p32x_sh2_read16(a, sh2s); break; // 16
  2307. case 2: *val = p32x_sh2_read32(a, sh2s); break; // 32
  2308. }
  2309. return 1;
  2310. }
  2311. }
  2312. return 0;
  2313. }
  2314. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  2315. {
  2316. #if PROPAGATE_CONSTANTS
  2317. gconst_new(dst, imm);
  2318. #else
  2319. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2320. emith_move_r_imm(hr, imm);
  2321. #endif
  2322. }
  2323. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  2324. {
  2325. if (gconst_check(src) || rcache_is_cached(src))
  2326. rcache_alias_vreg(dst, src);
  2327. else {
  2328. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2329. emith_ctx_read(hr_d, src * 4);
  2330. }
  2331. }
  2332. static void emit_add_r_imm(sh2_reg_e r, u32 imm)
  2333. {
  2334. u32 val;
  2335. int isgc = gconst_get(r, &val);
  2336. int hr, hr2;
  2337. if (!isgc || rcache_is_cached(r)) {
  2338. // not constant, or r is already in cache
  2339. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2340. emith_add_r_r_imm(hr, hr2, imm);
  2341. rcache_free(hr2);
  2342. if (isgc)
  2343. gconst_set(r, val + imm);
  2344. } else
  2345. gconst_new(r, val + imm);
  2346. }
  2347. static void emit_sub_r_imm(sh2_reg_e r, u32 imm)
  2348. {
  2349. u32 val;
  2350. int isgc = gconst_get(r, &val);
  2351. int hr, hr2;
  2352. if (!isgc || rcache_is_cached(r)) {
  2353. // not constant, or r is already in cache
  2354. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2355. emith_sub_r_r_imm(hr, hr2, imm);
  2356. rcache_free(hr2);
  2357. if (isgc)
  2358. gconst_set(r, val - imm);
  2359. } else
  2360. gconst_new(r, val - imm);
  2361. }
  2362. static void emit_sync_t_to_sr(void)
  2363. {
  2364. // avoid reloading SR from context if there's nothing to do
  2365. if (emith_get_t_cond() >= 0) {
  2366. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2367. emith_sync_t(sr);
  2368. }
  2369. }
  2370. // rd = @(arg0)
  2371. static int emit_memhandler_read(int size)
  2372. {
  2373. int hr;
  2374. emit_sync_t_to_sr();
  2375. rcache_clean_tmp();
  2376. #ifndef DRC_SR_REG
  2377. // must writeback cycles for poll detection stuff
  2378. if (guest_regs[SHR_SR].vreg != -1)
  2379. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2380. #endif
  2381. rcache_invalidate_tmp();
  2382. if (size & MF_POLLING)
  2383. switch (size & MF_SIZEMASK) {
  2384. case 0: emith_call(sh2_drc_read8_poll); break; // 8
  2385. case 1: emith_call(sh2_drc_read16_poll); break; // 16
  2386. case 2: emith_call(sh2_drc_read32_poll); break; // 32
  2387. }
  2388. else
  2389. switch (size & MF_SIZEMASK) {
  2390. case 0: emith_call(sh2_drc_read8); break; // 8
  2391. case 1: emith_call(sh2_drc_read16); break; // 16
  2392. case 2: emith_call(sh2_drc_read32); break; // 32
  2393. }
  2394. hr = rcache_get_tmp_ret();
  2395. rcache_set_x16(hr, (size & MF_SIZEMASK) < 2, 0);
  2396. return hr;
  2397. }
  2398. // @(arg0) = arg1
  2399. static void emit_memhandler_write(int size)
  2400. {
  2401. emit_sync_t_to_sr();
  2402. rcache_clean_tmp();
  2403. #ifndef DRC_SR_REG
  2404. if (guest_regs[SHR_SR].vreg != -1)
  2405. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2406. #endif
  2407. rcache_invalidate_tmp();
  2408. switch (size & MF_SIZEMASK) {
  2409. case 0: emith_call(sh2_drc_write8); break; // 8
  2410. case 1: emith_call(sh2_drc_write16); break; // 16
  2411. case 2: emith_call(sh2_drc_write32); break; // 32
  2412. }
  2413. }
  2414. // rd = @(Rs,#offs); rd < 0 -> return a temp
  2415. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2416. {
  2417. int hr, hr2;
  2418. u32 val;
  2419. #if PROPAGATE_CONSTANTS
  2420. if (emit_get_rom_data(sh2, rs, offs, size, &val)) {
  2421. if (rd == SHR_TMP) {
  2422. hr2 = rcache_get_tmp();
  2423. emith_move_r_imm(hr2, val);
  2424. } else {
  2425. emit_move_r_imm32(rd, val);
  2426. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  2427. }
  2428. rcache_set_x16(hr2, val == (s16)val, val == (u16)val);
  2429. if (size & MF_POSTINCR)
  2430. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2431. return hr2;
  2432. }
  2433. val = size & MF_POSTINCR;
  2434. hr = emit_get_rbase_and_offs(sh2, rs, val ? RC_GR_RMW : RC_GR_READ, &offs);
  2435. if (hr != -1) {
  2436. if (rd == SHR_TMP)
  2437. hr2 = rcache_get_tmp();
  2438. else
  2439. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2440. switch (size & MF_SIZEMASK) {
  2441. case 0: emith_read8s_r_r_offs(hr2, hr, offs ^ 1); break; // 8
  2442. case 1: emith_read16s_r_r_offs(hr2, hr, offs); break; // 16
  2443. case 2: emith_read_r_r_offs(hr2, hr, offs); emith_ror(hr2, hr2, 16); break;
  2444. }
  2445. rcache_free(hr);
  2446. if (size & MF_POSTINCR)
  2447. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2448. return hr2;
  2449. }
  2450. #endif
  2451. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2452. hr = rcache_get_tmp_arg(0);
  2453. emith_move_r_imm(hr, val + offs);
  2454. if (size & MF_POSTINCR)
  2455. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  2456. } else if (size & MF_POSTINCR) {
  2457. hr = rcache_get_tmp_arg(0);
  2458. hr2 = rcache_get_reg(rs, RC_GR_RMW, NULL);
  2459. emith_add_r_r_imm(hr, hr2, offs);
  2460. emith_add_r_imm(hr2, 1 << (size & MF_SIZEMASK));
  2461. if (gconst_get(rs, &val))
  2462. gconst_set(rs, val + (1 << (size & MF_SIZEMASK)));
  2463. } else {
  2464. hr = rcache_get_reg_arg(0, rs, &hr2);
  2465. if (offs || hr != hr2)
  2466. emith_add_r_r_imm(hr, hr2, offs);
  2467. }
  2468. hr = emit_memhandler_read(size);
  2469. if (rd == SHR_TMP)
  2470. hr2 = hr;
  2471. else
  2472. hr2 = rcache_map_reg(rd, hr);
  2473. if (hr != hr2) {
  2474. emith_move_r_r(hr2, hr);
  2475. rcache_free_tmp(hr);
  2476. }
  2477. return hr2;
  2478. }
  2479. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  2480. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2481. {
  2482. int hr, hr2;
  2483. u32 val;
  2484. if (rd == SHR_TMP) {
  2485. host_arg2reg(hr2, 1); // already locked and prepared by caller
  2486. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  2487. hr2 = rcache_get_reg_arg(1, rd, &hr);
  2488. if (hr != hr2) {
  2489. emith_move_r_r(hr2, hr);
  2490. rcache_free(hr2);
  2491. }
  2492. } else
  2493. hr2 = rcache_get_reg_arg(1, rd, NULL);
  2494. if (rd != SHR_TMP)
  2495. rcache_unlock(guest_regs[rd].vreg); // unlock in case rd is in arg0
  2496. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2497. hr = rcache_get_tmp_arg(0);
  2498. if (size & MF_PREDECR) {
  2499. val -= 1 << (size & MF_SIZEMASK);
  2500. gconst_new(rs, val);
  2501. }
  2502. emith_move_r_imm(hr, val + offs);
  2503. } else if (offs || (size & MF_PREDECR)) {
  2504. if (size & MF_PREDECR)
  2505. emit_sub_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2506. rcache_unlock(guest_regs[rs].vreg); // unlock in case rs is in arg0
  2507. hr = rcache_get_reg_arg(0, rs, &hr2);
  2508. if (offs || hr != hr2)
  2509. emith_add_r_r_imm(hr, hr2, offs);
  2510. } else
  2511. hr = rcache_get_reg_arg(0, rs, NULL);
  2512. emit_memhandler_write(size);
  2513. }
  2514. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2515. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2516. {
  2517. int hr, hr2;
  2518. int tx, ty;
  2519. #if PROPAGATE_CONSTANTS
  2520. u32 offs;
  2521. // if offs is larger than 0x01000000, it's most probably the base address part
  2522. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2523. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2524. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2525. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2526. #endif
  2527. hr = rcache_get_reg_arg(0, rx, &tx);
  2528. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2529. emith_add_r_r_r(hr, tx, ty);
  2530. hr = emit_memhandler_read(size);
  2531. if (rd == SHR_TMP)
  2532. hr2 = hr;
  2533. else
  2534. hr2 = rcache_map_reg(rd, hr);
  2535. if (hr != hr2) {
  2536. emith_move_r_r(hr2, hr);
  2537. rcache_free_tmp(hr);
  2538. }
  2539. return hr2;
  2540. }
  2541. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2542. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2543. {
  2544. int hr, tx, ty;
  2545. #if PROPAGATE_CONSTANTS
  2546. u32 offs;
  2547. // if offs is larger than 0x01000000, it's most probably the base address part
  2548. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2549. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2550. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2551. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2552. #endif
  2553. if (rd != SHR_TMP)
  2554. rcache_get_reg_arg(1, rd, NULL);
  2555. hr = rcache_get_reg_arg(0, rx, &tx);
  2556. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2557. emith_add_r_r_r(hr, tx, ty);
  2558. emit_memhandler_write(size);
  2559. }
  2560. // @Rn+,@Rm+
  2561. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2562. {
  2563. int tmp;
  2564. // unlock rn, rm here to avoid REG shortage in MAC operation
  2565. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2566. rcache_unlock(guest_regs[rn].vreg);
  2567. tmp = rcache_save_tmp(tmp);
  2568. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2569. rcache_unlock(guest_regs[rm].vreg);
  2570. *rnr = rcache_restore_tmp(tmp);
  2571. }
  2572. static void emit_do_static_regs(int is_write, int tmpr)
  2573. {
  2574. int i, r, count;
  2575. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2576. if (guest_regs[i].flags & (GRF_STATIC|GRF_PINNED))
  2577. r = cache_regs[guest_regs[i].vreg].hreg;
  2578. else
  2579. continue;
  2580. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2581. if ((guest_regs[i + 1].flags & (GRF_STATIC|GRF_PINNED)) &&
  2582. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2583. count++;
  2584. else
  2585. break;
  2586. }
  2587. if (count > 1) {
  2588. // i, r point to last item
  2589. if (is_write)
  2590. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2591. else
  2592. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2593. } else {
  2594. if (is_write)
  2595. emith_ctx_write(r, i * 4);
  2596. else
  2597. emith_ctx_read(r, i * 4);
  2598. }
  2599. }
  2600. }
  2601. #if DIV_OPTIMIZER
  2602. // divide operation replacement functions, called by compiled code. Only the
  2603. // 32:16 cases and the 64:32 cases described in the SH2 prog man are replaced.
  2604. static uint32_t REGPARM(2) sh2_drc_divu32(uint32_t dv, uint32_t ds)
  2605. {
  2606. if (ds && ds >= dv) {
  2607. // good case: no divide by 0, and no result overflow
  2608. uint32_t quot = dv / (ds>>16), rem = dv - (quot * (ds>>16));
  2609. if (~quot&1) rem -= ds>>16;
  2610. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2611. } else {
  2612. // bad case: use the sh2 algo to get the right result
  2613. int q = 0, t = 0, s = 16;
  2614. while (s--) {
  2615. uint32_t v = dv>>31;
  2616. dv = (dv<<1) | t;
  2617. t = v;
  2618. v = dv;
  2619. if (q) dv += ds, q = dv < v;
  2620. else dv -= ds, q = !(dv < v);
  2621. q ^= t, t = !q;
  2622. }
  2623. return (dv<<1) | t;
  2624. }
  2625. }
  2626. static uint32_t REGPARM(3) sh2_drc_divu64(uint32_t dh, uint32_t *dl, uint32_t ds)
  2627. {
  2628. if (ds > 1 && ds >= dh) {
  2629. // good case: no divide by 0, and no result overflow
  2630. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2631. uint32_t quot = dv / ds, rem = dv - (quot * ds);
  2632. if (~quot&1) rem -= ds;
  2633. *dl = quot;
  2634. return rem;
  2635. } else {
  2636. // bad case: use the sh2 algo to get the right result
  2637. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2638. int q = 0, t = 0, s = 32;
  2639. while (s--) {
  2640. uint64_t v = dv>>63;
  2641. dv = (dv<<1) | t;
  2642. t = v;
  2643. v = dv;
  2644. if (q) dv += ((uint64_t)ds << 32), q = dv < v;
  2645. else dv -= ((uint64_t)ds << 32), q = !(dv < v);
  2646. q ^= t, t = !q;
  2647. }
  2648. *dl = (dv<<1) | t;
  2649. return (dv>>32);
  2650. }
  2651. }
  2652. static uint32_t REGPARM(2) sh2_drc_divs32(int32_t dv, int32_t ds)
  2653. {
  2654. uint32_t adv = abs(dv), ads = abs(ds)>>16;
  2655. if (ads > 1 && ads > adv>>16 && (int32_t)ads > 0 && !(uint16_t)ds) {
  2656. // good case: no divide by 0, and no result overflow
  2657. uint32_t quot = adv / ads, rem = adv - (quot * ads);
  2658. int m1 = (rem ? dv^ds : ds) < 0;
  2659. if (rem && dv < 0) rem = (quot&1 ? -rem : +ads-rem);
  2660. else rem = (quot&1 ? +rem : -ads+rem);
  2661. quot = ((dv^ds)<0 ? -quot : +quot) - m1;
  2662. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2663. } else {
  2664. // bad case: use the sh2 algo to get the right result
  2665. int m = (uint32_t)ds>>31, q = (uint32_t)dv>>31, t = m^q, s = 16;
  2666. while (s--) {
  2667. uint32_t v = (uint32_t)dv>>31;
  2668. dv = (dv<<1) | t;
  2669. t = v;
  2670. v = dv;
  2671. if (m^q) dv += ds, q = (uint32_t)dv < v;
  2672. else dv -= ds, q = !((uint32_t)dv < v);
  2673. q ^= m^t, t = !(m^q);
  2674. }
  2675. return (dv<<1) | t;
  2676. }
  2677. }
  2678. static uint32_t REGPARM(3) sh2_drc_divs64(int32_t dh, uint32_t *dl, int32_t ds)
  2679. {
  2680. int64_t _dv = *dl | ((int64_t)dh << 32);
  2681. uint64_t adv = (_dv < 0 ? -_dv : _dv); // llabs isn't in older toolchains
  2682. uint32_t ads = abs(ds);
  2683. if (ads > 1 && ads > adv>>32 && (int64_t)adv > 0) {
  2684. // good case: no divide by 0, and no result overflow
  2685. uint32_t quot = adv / ads, rem = adv - ((uint64_t)quot * ads);
  2686. int m1 = (rem ? dh^ds : ds) < 0;
  2687. if (rem && dh < 0) rem = (quot&1 ? -rem : +ads-rem);
  2688. else rem = (quot&1 ? +rem : -ads+rem);
  2689. quot = ((dh^ds)<0 ? -quot : +quot) - m1;
  2690. *dl = quot;
  2691. return rem;
  2692. } else {
  2693. // bad case: use the sh2 algo to get the right result
  2694. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2695. int m = (uint32_t)ds>>31, q = (uint64_t)dv>>63, t = m^q, s = 32;
  2696. while (s--) {
  2697. int64_t v = (uint64_t)dv>>63;
  2698. dv = (dv<<1) | t;
  2699. t = v;
  2700. v = dv;
  2701. if (m^q) dv += ((uint64_t)ds << 32), q = dv < v;
  2702. else dv -= ((uint64_t)ds << 32), q = !(dv < v);
  2703. q ^= m^t, t = !(m^q);
  2704. }
  2705. *dl = (dv<<1) | t;
  2706. return (dv>>32);
  2707. }
  2708. }
  2709. #endif
  2710. // block local link stuff
  2711. struct linkage {
  2712. u32 pc;
  2713. void *ptr;
  2714. struct block_link *bl;
  2715. u32 mask;
  2716. };
  2717. static inline int find_in_linkage(const struct linkage *array, int size, u32 pc)
  2718. {
  2719. size_t i;
  2720. for (i = 0; i < size; i++)
  2721. if (pc == array[i].pc)
  2722. return i;
  2723. return -1;
  2724. }
  2725. static int find_in_sorted_linkage(const struct linkage *array, int size, u32 pc)
  2726. {
  2727. // binary search in sorted array
  2728. int left = 0, right = size-1;
  2729. while (left <= right)
  2730. {
  2731. int middle = (left + right) / 2;
  2732. if (array[middle].pc == pc)
  2733. return middle;
  2734. else if (array[middle].pc < pc)
  2735. left = middle + 1;
  2736. else
  2737. right = middle - 1;
  2738. }
  2739. return -1;
  2740. }
  2741. static void emit_branch_linkage_code(SH2 *sh2, struct block_desc *block, int tcache_id,
  2742. const struct linkage *targets, int target_count,
  2743. const struct linkage *links, int link_count)
  2744. {
  2745. struct block_link *bl;
  2746. int u, v, tmp;
  2747. emith_flush();
  2748. for (u = 0; u < link_count; u++) {
  2749. emith_pool_check();
  2750. // look up local branch targets
  2751. if (links[u].mask & 0x2) {
  2752. v = find_in_sorted_linkage(targets, target_count, links[u].pc);
  2753. if (v < 0 || ! targets[v].ptr) {
  2754. // forward branch not yet resolved, prepare external linking
  2755. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2756. bl = dr_prepare_ext_branch(block->entryp, links[u].pc, sh2->is_slave, tcache_id);
  2757. if (bl)
  2758. bl->type = BL_LDJMP;
  2759. tmp = rcache_get_tmp_arg(0);
  2760. emith_move_r_imm(tmp, links[u].pc);
  2761. rcache_free_tmp(tmp);
  2762. emith_jump_patchable(sh2_drc_dispatcher);
  2763. } else if (emith_jump_patch_inrange(links[u].ptr, targets[v].ptr)) {
  2764. // inrange local branch
  2765. emith_jump_patch(links[u].ptr, targets[v].ptr, NULL);
  2766. } else {
  2767. // far local branch
  2768. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2769. emith_jump(targets[v].ptr);
  2770. }
  2771. } else {
  2772. // external or exit, emit blx area entry
  2773. void *target = (links[u].mask & 0x1 ? sh2_drc_exit : sh2_drc_dispatcher);
  2774. if (links[u].bl)
  2775. links[u].bl->blx = tcache_ptr;
  2776. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2777. tmp = rcache_get_tmp_arg(0);
  2778. emith_move_r_imm(tmp, links[u].pc & ~1);
  2779. rcache_free_tmp(tmp);
  2780. emith_jump(target);
  2781. }
  2782. }
  2783. }
  2784. #define DELAY_SAVE_T(sr) { \
  2785. int t_ = rcache_get_tmp(); \
  2786. emith_bic_r_imm(sr, T_save); \
  2787. emith_and_r_r_imm(t_, sr, 1); \
  2788. emith_or_r_r_lsl(sr, t_, T_SHIFT); \
  2789. rcache_free_tmp(t_); \
  2790. }
  2791. #define FLUSH_CYCLES(sr) \
  2792. if (cycles > 0) { \
  2793. emith_sub_r_imm(sr, cycles << 12); \
  2794. cycles = 0; \
  2795. }
  2796. static void *dr_get_pc_base(u32 pc, SH2 *sh2);
  2797. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2798. {
  2799. // branch targets in current block
  2800. static struct linkage branch_targets[MAX_LOCAL_TARGETS];
  2801. int branch_target_count = 0;
  2802. // unresolved local or external targets with block link/exit area if needed
  2803. static struct linkage blx_targets[MAX_LOCAL_BRANCHES];
  2804. int blx_target_count = 0;
  2805. static u8 op_flags[BLOCK_INSN_LIMIT];
  2806. enum flg_states { FLG_UNKNOWN, FLG_UNUSED, FLG_0, FLG_1 };
  2807. struct drcf {
  2808. int delay_reg:8;
  2809. u32 loop_type:8;
  2810. u32 polling:8;
  2811. u32 pinning:1;
  2812. u32 test_irq:1;
  2813. u32 pending_branch_direct:1;
  2814. u32 pending_branch_indirect:1;
  2815. u32 Tflag:2, Mflag:2;
  2816. } drcf = { 0, };
  2817. #if LOOP_OPTIMIZER
  2818. // loops with pinned registers for optimzation
  2819. // pinned regs are like statics and don't need saving/restoring inside a loop
  2820. static struct linkage pinned_loops[MAX_LOCAL_TARGETS/16];
  2821. int pinned_loop_count = 0;
  2822. #endif
  2823. // PC of current, first, last SH2 insn
  2824. u32 pc, base_pc, end_pc;
  2825. u32 base_literals, end_literals;
  2826. u8 *block_entry_ptr;
  2827. struct block_desc *block;
  2828. struct block_entry *entry;
  2829. struct block_link *bl;
  2830. u16 *dr_pc_base;
  2831. struct op_data *opd;
  2832. int blkid_main = 0;
  2833. int skip_op = 0;
  2834. int tmp, tmp2;
  2835. int cycles;
  2836. int i, v;
  2837. u32 u, m1, m2, m3, m4;
  2838. int op;
  2839. u16 crc;
  2840. base_pc = sh2->pc;
  2841. // get base/validate PC
  2842. dr_pc_base = dr_get_pc_base(base_pc, sh2);
  2843. if (dr_pc_base == (void *)-1) {
  2844. printf("invalid PC, aborting: %08lx\n", (long)base_pc);
  2845. // FIXME: be less destructive
  2846. exit(1);
  2847. }
  2848. // initial passes to disassemble and analyze the block
  2849. crc = scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2850. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2851. if (base_literals == end_literals) // map empty lit section to end of code
  2852. base_literals = end_literals = end_pc;
  2853. // if there is already a translated but inactive block, reuse it
  2854. block = dr_find_inactive_block(tcache_id, crc, base_pc, end_pc - base_pc,
  2855. base_literals, end_literals - base_literals);
  2856. if (block) {
  2857. dbg(2, "== %csh2 reuse block %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2858. base_pc, end_pc, base_literals, end_literals, block->entryp->tcache_ptr);
  2859. dr_activate_block(block, tcache_id, sh2->is_slave);
  2860. emith_update_cache();
  2861. return block->entryp[0].tcache_ptr;
  2862. }
  2863. // collect branch_targets that don't land on delay slots
  2864. m1 = m2 = m3 = m4 = v = op = 0;
  2865. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2866. if (op_flags[i] & OF_DELAY_OP)
  2867. op_flags[i] &= ~OF_BTARGET;
  2868. if (op_flags[i] & OF_BTARGET) {
  2869. if (branch_target_count < ARRAY_SIZE(branch_targets))
  2870. branch_targets[branch_target_count++] = (struct linkage) { .pc = pc };
  2871. else {
  2872. printf("warning: linkage overflow\n");
  2873. end_pc = pc;
  2874. break;
  2875. }
  2876. }
  2877. if (ops[i].op == OP_LDC && (ops[i].dest & BITMASK1(SHR_SR)) && pc+2 < end_pc)
  2878. op_flags[i+1] |= OF_BTARGET; // RTE entrypoint in case of SR.IMASK change
  2879. // unify T and SR since rcache doesn't know about "virtual" guest regs
  2880. if (ops[i].source & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2881. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2882. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].dest |= BITMASK1(SHR_SR);
  2883. #if LOOP_DETECTION
  2884. // loop types detected:
  2885. // 1. target: ... BRA target -> idle loop
  2886. // 2. target: ... delay insn ... BF target -> delay loop
  2887. // 3. target: ... poll insn ... BF/BT target -> poll loop
  2888. // 4. target: ... poll insn ... BF/BT exit ... BRA target, exit: -> poll
  2889. // conditions:
  2890. // a. no further branch targets between target and back jump.
  2891. // b. no unconditional branch insn inside the loop.
  2892. // c. exactly one poll or delay insn is allowed inside a delay/poll loop
  2893. // (scan_block marks loops only if they meet conditions a through c)
  2894. // d. idle loops do not modify anything but PC,SR and contain no branches
  2895. // e. delay/poll loops do not modify anything but the concerned reg,PC,SR
  2896. // f. loading constants into registers inside the loop is allowed
  2897. // g. a delay/poll loop must have a conditional branch somewhere
  2898. // h. an idle loop must not have a conditional branch
  2899. if (op_flags[i] & OF_BTARGET) {
  2900. // possible loop entry point
  2901. drcf.loop_type = op_flags[i] & OF_LOOP;
  2902. drcf.pending_branch_direct = drcf.pending_branch_indirect = 0;
  2903. op = OF_IDLE_LOOP; // loop type
  2904. v = i;
  2905. m1 = m2 = m3 = m4 = 0;
  2906. if (!drcf.loop_type) // reset basic loop it it isn't recognized as loop
  2907. op_flags[i] &= ~OF_BASIC_LOOP;
  2908. }
  2909. if (drcf.loop_type) {
  2910. // calculate reg masks for loop pinning
  2911. m4 |= ops[i].source & ~m3;
  2912. m3 |= ops[i].dest;
  2913. // detect loop type, and store poll/delay register
  2914. if (op_flags[i] & OF_POLL_INSN) {
  2915. op = OF_POLL_LOOP;
  2916. m1 |= ops[i].dest; // loop poll/delay regs
  2917. } else if (op_flags[i] & OF_DELAY_INSN) {
  2918. op = OF_DELAY_LOOP;
  2919. m1 |= ops[i].dest;
  2920. } else if (ops[i].op != OP_LOAD_POOL && ops[i].op != OP_LOAD_CONST
  2921. && (ops[i].op != OP_MOVE || op != OF_POLL_LOOP)) {
  2922. // not (MOV @(PC) or MOV # or (MOV reg and poll)), condition f
  2923. m2 |= ops[i].dest; // regs modified by other insns
  2924. }
  2925. // branch detector
  2926. if (OP_ISBRAIMM(ops[i].op)) {
  2927. if (ops[i].imm == base_pc + 2*v)
  2928. drcf.pending_branch_direct = 1; // backward branch detected
  2929. else
  2930. op_flags[v] &= ~OF_BASIC_LOOP; // no basic loop
  2931. }
  2932. if (OP_ISBRACND(ops[i].op))
  2933. drcf.pending_branch_indirect = 1; // conditions g,h - cond.branch
  2934. // poll/idle loops terminate with their backwards branch to the loop start
  2935. if (drcf.pending_branch_direct && !(op_flags[i+1] & OF_DELAY_OP)) {
  2936. m2 &= ~(m1 | BITMASK3(SHR_PC, SHR_SR, SHR_T)); // conditions d,e + g,h
  2937. if (m2 || ((op == OF_IDLE_LOOP) == (drcf.pending_branch_indirect)))
  2938. op = 0; // conditions not met
  2939. op_flags[v] = (op_flags[v] & ~OF_LOOP) | op; // set loop type
  2940. drcf.loop_type = 0;
  2941. #if LOOP_OPTIMIZER
  2942. if (op_flags[v] & OF_BASIC_LOOP) {
  2943. m3 &= ~rcache_regs_static & ~BITMASK5(SHR_PC, SHR_PR, SHR_SR, SHR_T, SHR_MEM);
  2944. if (m3 && count_bits(m3) < count_bits(rcache_vregs_reg) &&
  2945. pinned_loop_count < ARRAY_SIZE(pinned_loops)-1) {
  2946. pinned_loops[pinned_loop_count++] =
  2947. (struct linkage) { .pc = base_pc + 2*v, .mask = m3 };
  2948. } else
  2949. op_flags[v] &= ~OF_BASIC_LOOP;
  2950. }
  2951. #endif
  2952. }
  2953. }
  2954. #endif
  2955. }
  2956. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2, branch_target_count);
  2957. #if (DRC_DEBUG & 4)
  2958. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  2959. #endif
  2960. block = dr_add_block(branch_target_count, base_pc, end_pc - base_pc,
  2961. base_literals, end_literals-base_literals, crc, sh2->is_slave, &blkid_main);
  2962. if (block == NULL)
  2963. return NULL;
  2964. block_entry_ptr = tcache_ptr;
  2965. dbg(2, "== %csh2 block #%d,%d %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2966. tcache_id, blkid_main, base_pc, end_pc, base_literals, end_literals, block_entry_ptr);
  2967. // clear stale state after compile errors
  2968. rcache_invalidate();
  2969. emith_invalidate_t();
  2970. drcf = (struct drcf) { 0 };
  2971. #if LOOP_OPTIMIZER
  2972. pinned_loops[pinned_loop_count].pc = -1;
  2973. pinned_loop_count = 0;
  2974. #endif
  2975. // -------------------------------------------------
  2976. // 3rd pass: actual compilation
  2977. pc = base_pc;
  2978. cycles = 0;
  2979. for (i = 0; pc < end_pc; i++)
  2980. {
  2981. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  2982. int tmp3, tmp4;
  2983. int sr;
  2984. if (op_flags[i] & OF_BTARGET)
  2985. {
  2986. if (pc != base_pc)
  2987. {
  2988. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2989. FLUSH_CYCLES(sr);
  2990. emith_sync_t(sr);
  2991. drcf.Mflag = FLG_UNKNOWN;
  2992. rcache_flush();
  2993. emith_flush();
  2994. }
  2995. // make block entry
  2996. v = block->entry_count;
  2997. entry = &block->entryp[v];
  2998. if (v < branch_target_count)
  2999. {
  3000. entry = &block->entryp[v];
  3001. entry->pc = pc;
  3002. entry->tcache_ptr = tcache_ptr;
  3003. entry->links = entry->o_links = NULL;
  3004. #if (DRC_DEBUG & 2)
  3005. entry->block = block;
  3006. #endif
  3007. block->entry_count++;
  3008. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  3009. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  3010. pc, tcache_ptr);
  3011. }
  3012. else {
  3013. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  3014. tcache_id, blkid_main, pc);
  3015. break;
  3016. }
  3017. v = find_in_sorted_linkage(branch_targets, branch_target_count, pc);
  3018. if (v >= 0)
  3019. branch_targets[v].ptr = tcache_ptr;
  3020. #if LOOP_DETECTION
  3021. drcf.loop_type = op_flags[i] & OF_LOOP;
  3022. drcf.delay_reg = -1;
  3023. drcf.polling = (drcf.loop_type == OF_POLL_LOOP ? MF_POLLING : 0);
  3024. #endif
  3025. rcache_clean();
  3026. #if (DRC_DEBUG & 0x10)
  3027. tmp = rcache_get_tmp_arg(0);
  3028. emith_move_r_imm(tmp, pc);
  3029. tmp = emit_memhandler_read(1);
  3030. tmp2 = rcache_get_tmp();
  3031. tmp3 = rcache_get_tmp();
  3032. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  3033. emith_move_r_imm(tmp3, 0);
  3034. emith_cmp_r_r(tmp, tmp2);
  3035. EMITH_SJMP_START(DCOND_EQ);
  3036. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  3037. EMITH_SJMP_END(DCOND_EQ);
  3038. rcache_free_tmp(tmp);
  3039. rcache_free_tmp(tmp2);
  3040. rcache_free_tmp(tmp3);
  3041. #endif
  3042. // check cycles
  3043. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3044. #if LOOP_OPTIMIZER
  3045. if (op_flags[i] & OF_BASIC_LOOP) {
  3046. if (pinned_loops[pinned_loop_count].pc == pc) {
  3047. // pin needed regs on loop entry
  3048. FOR_ALL_BITS_SET_DO(pinned_loops[pinned_loop_count].mask, v, rcache_pin_reg(v));
  3049. emith_flush();
  3050. // store current PC as loop target
  3051. pinned_loops[pinned_loop_count].ptr = tcache_ptr;
  3052. drcf.pinning = 1;
  3053. } else
  3054. op_flags[i] &= ~OF_BASIC_LOOP;
  3055. }
  3056. if (op_flags[i] & OF_BASIC_LOOP) {
  3057. // if exiting a pinned loop pinned regs must be written back to ctx
  3058. // since they are reloaded in the loop entry code
  3059. emith_cmp_r_imm(sr, 0);
  3060. EMITH_JMP_START(DCOND_GT);
  3061. rcache_save_pinned();
  3062. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3063. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3064. blx_targets[blx_target_count++] =
  3065. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3066. emith_jump_patchable(tcache_ptr);
  3067. } else {
  3068. // blx table full, must inline exit code
  3069. tmp = rcache_get_tmp_arg(0);
  3070. emith_move_r_imm(tmp, pc);
  3071. emith_jump(sh2_drc_exit);
  3072. rcache_free_tmp(tmp);
  3073. }
  3074. EMITH_JMP_END(DCOND_GT);
  3075. } else
  3076. #endif
  3077. {
  3078. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3079. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3080. emith_cmp_r_imm(sr, 0);
  3081. blx_targets[blx_target_count++] =
  3082. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3083. emith_jump_cond_patchable(DCOND_LE, tcache_ptr);
  3084. } else {
  3085. // blx table full, must inline exit code
  3086. tmp = rcache_get_tmp_arg(0);
  3087. emith_cmp_r_imm(sr, 0);
  3088. EMITH_SJMP_START(DCOND_GT);
  3089. emith_move_r_imm_c(DCOND_LE, tmp, pc);
  3090. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  3091. EMITH_SJMP_END(DCOND_GT);
  3092. rcache_free_tmp(tmp);
  3093. }
  3094. }
  3095. #if (DRC_DEBUG & 32)
  3096. // block hit counter
  3097. tmp = rcache_get_tmp_arg(0);
  3098. tmp2 = rcache_get_tmp_arg(1);
  3099. emith_move_r_ptr_imm(tmp, (uptr)entry);
  3100. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3101. emith_add_r_imm(tmp2, 1);
  3102. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3103. rcache_free_tmp(tmp);
  3104. rcache_free_tmp(tmp2);
  3105. #endif
  3106. #if (DRC_DEBUG & (8|256|512|1024))
  3107. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3108. emith_sync_t(sr);
  3109. rcache_clean();
  3110. tmp = rcache_used_hregs_mask();
  3111. emith_save_caller_regs(tmp);
  3112. emit_do_static_regs(1, 0);
  3113. rcache_get_reg_arg(2, SHR_SR, NULL);
  3114. tmp2 = rcache_get_tmp_arg(0);
  3115. tmp3 = rcache_get_tmp_arg(1);
  3116. tmp4 = rcache_get_tmp();
  3117. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  3118. emith_move_r_r_ptr(tmp3, CONTEXT_REG);
  3119. emith_move_r_imm(tmp4, pc);
  3120. emith_ctx_write(tmp4, SHR_PC * 4);
  3121. rcache_invalidate_tmp();
  3122. emith_abicall(sh2_drc_log_entry);
  3123. emith_restore_caller_regs(tmp);
  3124. #endif
  3125. do_host_disasm(tcache_id);
  3126. rcache_unlock_all();
  3127. }
  3128. #ifdef DRC_CMP
  3129. if (!(op_flags[i] & OF_DELAY_OP)) {
  3130. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3131. FLUSH_CYCLES(sr);
  3132. emith_sync_t(sr);
  3133. emit_move_r_imm32(SHR_PC, pc);
  3134. rcache_clean();
  3135. tmp = rcache_used_hregs_mask();
  3136. emith_save_caller_regs(tmp);
  3137. emit_do_static_regs(1, 0);
  3138. emith_pass_arg_r(0, CONTEXT_REG);
  3139. emith_abicall(do_sh2_cmp);
  3140. emith_restore_caller_regs(tmp);
  3141. }
  3142. #endif
  3143. // emit blx area if limits are approached
  3144. if (blx_target_count && (blx_target_count > ARRAY_SIZE(blx_targets)-4 ||
  3145. !emith_jump_patch_inrange(blx_targets[0].ptr, tcache_ptr+0x100))) {
  3146. u8 *jp;
  3147. rcache_invalidate_tmp();
  3148. jp = tcache_ptr;
  3149. emith_jump_patchable(tcache_ptr);
  3150. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  3151. branch_target_count, blx_targets, blx_target_count);
  3152. blx_target_count = 0;
  3153. do_host_disasm(tcache_id);
  3154. emith_jump_patch(jp, tcache_ptr, NULL);
  3155. }
  3156. emith_pool_check();
  3157. opd = &ops[i];
  3158. op = FETCH_OP(pc);
  3159. #if (DRC_DEBUG & 4)
  3160. DasmSH2(sh2dasm_buff, pc, op);
  3161. if (op_flags[i] & OF_BTARGET) {
  3162. if ((op_flags[i] & OF_LOOP) == OF_DELAY_LOOP) tmp3 = '+';
  3163. else if ((op_flags[i] & OF_LOOP) == OF_POLL_LOOP) tmp3 = '=';
  3164. else if ((op_flags[i] & OF_LOOP) == OF_IDLE_LOOP) tmp3 = '~';
  3165. else tmp3 = '*';
  3166. } else if (drcf.loop_type) tmp3 = '.';
  3167. else tmp3 = ' ';
  3168. printf("%c%08x %04x %s\n", tmp3, pc, op, sh2dasm_buff);
  3169. #endif
  3170. pc += 2;
  3171. #if (DRC_DEBUG & 2)
  3172. insns_compiled++;
  3173. #endif
  3174. if (skip_op > 0) {
  3175. skip_op--;
  3176. continue;
  3177. }
  3178. if (op_flags[i] & OF_DELAY_OP)
  3179. {
  3180. // handle delay slot dependencies
  3181. delay_dep_fw = opd->dest & ops[i-1].source;
  3182. delay_dep_bk = opd->source & ops[i-1].dest;
  3183. if (delay_dep_fw & BITMASK1(SHR_T)) {
  3184. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3185. emith_sync_t(sr);
  3186. DELAY_SAVE_T(sr);
  3187. }
  3188. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  3189. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  3190. // can only be those 2 really..
  3191. elprintf_sh2(sh2, EL_ANOMALY,
  3192. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  3193. }
  3194. // store PC for MOVA/MOV @PC address calculation
  3195. if (opd->imm != 0)
  3196. ; // case OP_BRANCH - addr already resolved in scan_block
  3197. else {
  3198. switch (ops[i-1].op) {
  3199. case OP_BRANCH:
  3200. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  3201. break;
  3202. case OP_BRANCH_CT:
  3203. case OP_BRANCH_CF:
  3204. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3205. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3206. emith_move_r_imm(tmp, pc);
  3207. tmp2 = emith_tst_t(sr, (ops[i-1].op == OP_BRANCH_CT));
  3208. tmp3 = emith_invert_cond(tmp2);
  3209. EMITH_SJMP_START(tmp3);
  3210. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  3211. EMITH_SJMP_END(tmp3);
  3212. break;
  3213. case OP_BRANCH_N: // BT/BF known not to be taken
  3214. // XXX could modify opd->imm instead?
  3215. emit_move_r_imm32(SHR_PC, pc);
  3216. break;
  3217. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  3218. }
  3219. }
  3220. }
  3221. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  3222. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  3223. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  3224. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  3225. }
  3226. // inform cache about future register usage
  3227. u32 late = 0; // regs read by future ops
  3228. u32 write = 0; // regs written to (to detect write before read)
  3229. u32 soon = 0; // regs read soon
  3230. for (v = 1; v <= 9; v++) {
  3231. // no sense in looking any further than the next rcache flush
  3232. tmp = ((op_flags[i+v] & OF_BTARGET) || (op_flags[i+v-1] & OF_DELAY_OP) ||
  3233. (OP_ISBRACND(opd[v-1].op) && !(op_flags[i+v] & OF_DELAY_OP)));
  3234. // XXX looking behind cond branch to avoid evicting regs used later?
  3235. if (pc + 2*v <= end_pc && !tmp) { // (pc already incremented above)
  3236. late |= opd[v].source & ~write;
  3237. // ignore source regs after they have been written to
  3238. write |= opd[v].dest;
  3239. // regs needed in the next few instructions
  3240. if (v <= 4)
  3241. soon = late;
  3242. } else
  3243. break;
  3244. }
  3245. rcache_set_usage_now(opd[0].source); // current insn
  3246. rcache_set_usage_soon(soon); // insns 1-4
  3247. rcache_set_usage_late(late & ~soon); // insns 5-9
  3248. rcache_set_usage_discard(write & ~(late|soon));
  3249. if (v <= 9)
  3250. // upcoming rcache_flush, start writing back unused dirty stuff
  3251. rcache_clean_masked(rcache_dirty_mask() & ~(write|opd[0].dest));
  3252. switch (opd->op)
  3253. {
  3254. case OP_BRANCH_N:
  3255. // never taken, just use up cycles
  3256. goto end_op;
  3257. case OP_BRANCH:
  3258. case OP_BRANCH_CT:
  3259. case OP_BRANCH_CF:
  3260. if (opd->dest & BITMASK1(SHR_PR))
  3261. emit_move_r_imm32(SHR_PR, pc + 2);
  3262. drcf.pending_branch_direct = 1;
  3263. goto end_op;
  3264. case OP_BRANCH_R:
  3265. if (opd->dest & BITMASK1(SHR_PR))
  3266. emit_move_r_imm32(SHR_PR, pc + 2);
  3267. emit_move_r_r(SHR_PC, opd->rm);
  3268. drcf.pending_branch_indirect = 1;
  3269. goto end_op;
  3270. case OP_BRANCH_RF:
  3271. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3272. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3273. emith_move_r_imm(tmp, pc + 2);
  3274. if (opd->dest & BITMASK1(SHR_PR)) {
  3275. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  3276. emith_move_r_r(tmp3, tmp);
  3277. }
  3278. emith_add_r_r(tmp, tmp2);
  3279. if (gconst_get(GET_Rn(), &u))
  3280. gconst_set(SHR_PC, pc + 2 + u);
  3281. drcf.pending_branch_indirect = 1;
  3282. goto end_op;
  3283. case OP_SLEEP: // SLEEP 0000000000011011
  3284. printf("TODO sleep\n");
  3285. goto end_op;
  3286. case OP_RTE: // RTE 0000000000101011
  3287. emith_invalidate_t();
  3288. // pop PC
  3289. tmp = emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  3290. rcache_free(tmp);
  3291. // pop SR
  3292. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  3293. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3294. emith_write_sr(sr, tmp);
  3295. rcache_free_tmp(tmp);
  3296. drcf.test_irq = 1;
  3297. drcf.pending_branch_indirect = 1;
  3298. goto end_op;
  3299. case OP_UNDEFINED:
  3300. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  3301. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  3302. // fallthrough
  3303. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  3304. // push SR
  3305. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  3306. emith_sync_t(tmp2);
  3307. emith_clear_msb(tmp, tmp2, 22);
  3308. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3309. // push PC
  3310. if (opd->op == OP_TRAPA) {
  3311. tmp = rcache_get_tmp_arg(1);
  3312. emith_move_r_imm(tmp, pc);
  3313. } else if (drcf.pending_branch_indirect) {
  3314. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  3315. } else {
  3316. tmp = rcache_get_tmp_arg(1);
  3317. emith_move_r_imm(tmp, pc - 2);
  3318. }
  3319. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3320. // obtain new PC
  3321. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  3322. // indirect jump -> back to dispatcher
  3323. drcf.pending_branch_indirect = 1;
  3324. goto end_op;
  3325. case OP_LOAD_POOL:
  3326. #if PROPAGATE_CONSTANTS
  3327. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  3328. dr_is_rom(opd->imm))
  3329. {
  3330. if (opd->size == 2)
  3331. u = FETCH32(opd->imm);
  3332. else
  3333. u = (s16)FETCH_OP(opd->imm);
  3334. // tweak for Blackthorne: avoid stack overwriting
  3335. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  3336. gconst_new(GET_Rn(), u);
  3337. }
  3338. else
  3339. #endif
  3340. {
  3341. if (opd->imm != 0) {
  3342. tmp = rcache_get_tmp_arg(0);
  3343. emith_move_r_imm(tmp, opd->imm);
  3344. } else {
  3345. // have to calculate read addr from PC for delay slot
  3346. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  3347. if (opd->size == 2) {
  3348. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3349. emith_bic_r_imm(tmp, 3);
  3350. }
  3351. else
  3352. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  3353. }
  3354. tmp2 = emit_memhandler_read(opd->size);
  3355. tmp3 = rcache_map_reg(GET_Rn(), tmp2);
  3356. if (tmp3 != tmp2) {
  3357. emith_move_r_r(tmp3, tmp2);
  3358. rcache_free_tmp(tmp2);
  3359. }
  3360. }
  3361. goto end_op;
  3362. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  3363. if (opd->imm != 0)
  3364. emit_move_r_imm32(SHR_R0, opd->imm);
  3365. else {
  3366. // have to calculate addr from PC for delay slot
  3367. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  3368. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  3369. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3370. emith_bic_r_imm(tmp, 3);
  3371. }
  3372. goto end_op;
  3373. }
  3374. switch ((op >> 12) & 0x0f)
  3375. {
  3376. /////////////////////////////////////////////
  3377. case 0x00:
  3378. switch (op & 0x0f)
  3379. {
  3380. case 0x02:
  3381. switch (GET_Fx())
  3382. {
  3383. case 0: // STC SR,Rn 0000nnnn00000010
  3384. tmp2 = SHR_SR;
  3385. break;
  3386. case 1: // STC GBR,Rn 0000nnnn00010010
  3387. tmp2 = SHR_GBR;
  3388. break;
  3389. case 2: // STC VBR,Rn 0000nnnn00100010
  3390. tmp2 = SHR_VBR;
  3391. break;
  3392. default:
  3393. goto default_;
  3394. }
  3395. if (tmp2 == SHR_SR) {
  3396. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3397. emith_sync_t(sr);
  3398. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3399. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  3400. } else
  3401. emit_move_r_r(GET_Rn(), tmp2);
  3402. goto end_op;
  3403. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  3404. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  3405. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  3406. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  3407. goto end_op;
  3408. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  3409. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3410. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3411. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3412. emith_mul(tmp3, tmp2, tmp);
  3413. goto end_op;
  3414. case 0x08:
  3415. switch (GET_Fx())
  3416. {
  3417. case 0: // CLRT 0000000000001000
  3418. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3419. #if T_OPTIMIZER
  3420. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3421. #endif
  3422. emith_set_t(sr, 0);
  3423. break;
  3424. case 1: // SETT 0000000000011000
  3425. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3426. #if T_OPTIMIZER
  3427. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3428. #endif
  3429. emith_set_t(sr, 1);
  3430. break;
  3431. case 2: // CLRMAC 0000000000101000
  3432. emit_move_r_imm32(SHR_MACL, 0);
  3433. emit_move_r_imm32(SHR_MACH, 0);
  3434. break;
  3435. default:
  3436. goto default_;
  3437. }
  3438. goto end_op;
  3439. case 0x09:
  3440. switch (GET_Fx())
  3441. {
  3442. case 0: // NOP 0000000000001001
  3443. break;
  3444. case 1: // DIV0U 0000000000011001
  3445. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3446. emith_invalidate_t();
  3447. emith_bic_r_imm(sr, M|Q|T);
  3448. drcf.Mflag = FLG_0;
  3449. #if DIV_OPTIMIZER
  3450. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3451. // divide 32/16
  3452. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3453. rcache_get_reg_arg(1, div(opd).rm, NULL);
  3454. rcache_invalidate_tmp();
  3455. emith_abicall(sh2_drc_divu32);
  3456. tmp = rcache_get_tmp_ret();
  3457. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3458. if (tmp != tmp2)
  3459. emith_move_r_r(tmp2, tmp);
  3460. tmp3 = rcache_get_tmp();
  3461. emith_and_r_r_imm(tmp3, tmp2, 1); // Q = !Rn[0]
  3462. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3463. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3464. rcache_free_tmp(tmp3);
  3465. emith_or_r_r_r_lsr(sr, sr, tmp2, 31); // T = Rn[31]
  3466. skip_op = div(opd).div1 + div(opd).rotcl;
  3467. }
  3468. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3469. // divide 64/32
  3470. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3471. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3472. tmp = rcache_get_tmp_arg(1);
  3473. emith_add_r_r_ptr_imm(tmp, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3474. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3475. rcache_get_reg_arg(2, div(opd).rm, NULL);
  3476. rcache_invalidate_tmp();
  3477. emith_abicall(sh2_drc_divu64);
  3478. tmp = rcache_get_tmp_ret();
  3479. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3480. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3481. if (tmp != tmp2)
  3482. emith_move_r_r(tmp2, tmp);
  3483. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3484. tmp3 = rcache_get_tmp();
  3485. emith_and_r_r_imm(tmp3, tmp4, 1); // Q = !Ro[0]
  3486. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3487. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3488. rcache_free_tmp(tmp3);
  3489. emith_or_r_r_r_lsr(sr, sr, tmp4, 31); // T = Ro[31]
  3490. skip_op = div(opd).div1 + div(opd).rotcl;
  3491. }
  3492. #endif
  3493. break;
  3494. case 2: // MOVT Rn 0000nnnn00101001
  3495. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3496. emith_sync_t(sr);
  3497. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3498. emith_clear_msb(tmp2, sr, 31);
  3499. break;
  3500. default:
  3501. goto default_;
  3502. }
  3503. goto end_op;
  3504. case 0x0a:
  3505. switch (GET_Fx())
  3506. {
  3507. case 0: // STS MACH,Rn 0000nnnn00001010
  3508. tmp2 = SHR_MACH;
  3509. break;
  3510. case 1: // STS MACL,Rn 0000nnnn00011010
  3511. tmp2 = SHR_MACL;
  3512. break;
  3513. case 2: // STS PR,Rn 0000nnnn00101010
  3514. tmp2 = SHR_PR;
  3515. break;
  3516. default:
  3517. goto default_;
  3518. }
  3519. emit_move_r_r(GET_Rn(), tmp2);
  3520. goto end_op;
  3521. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  3522. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  3523. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  3524. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), (op & 3) | drcf.polling);
  3525. goto end_op;
  3526. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  3527. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  3528. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3529. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3530. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3531. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  3532. rcache_free_tmp(tmp2);
  3533. rcache_free_tmp(tmp);
  3534. goto end_op;
  3535. }
  3536. goto default_;
  3537. /////////////////////////////////////////////
  3538. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  3539. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  3540. goto end_op;
  3541. case 0x02:
  3542. switch (op & 0x0f)
  3543. {
  3544. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  3545. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  3546. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  3547. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  3548. goto end_op;
  3549. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  3550. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  3551. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  3552. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  3553. goto end_op;
  3554. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  3555. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3556. emith_invalidate_t();
  3557. emith_bic_r_imm(sr, M|Q|T);
  3558. drcf.Mflag = FLG_UNKNOWN;
  3559. #if DIV_OPTIMIZER
  3560. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3561. // divide 32/16
  3562. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3563. tmp2 = rcache_get_reg_arg(1, div(opd).rm, NULL);
  3564. tmp3 = rcache_get_tmp();
  3565. emith_lsr(tmp3, tmp2, 31);
  3566. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3567. rcache_invalidate_tmp();
  3568. emith_abicall(sh2_drc_divs32);
  3569. tmp = rcache_get_tmp_ret();
  3570. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3571. if (tmp != tmp2)
  3572. emith_move_r_r(tmp2, tmp);
  3573. tmp3 = rcache_get_tmp();
  3574. emith_eor_r_r_r_lsr(tmp3, tmp2, sr, M_SHIFT);
  3575. emith_and_r_r_imm(tmp3, tmp3, 1);
  3576. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3577. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Rn[0]^M
  3578. rcache_free_tmp(tmp3);
  3579. emith_or_r_r_r_lsr(sr, sr, tmp2, 31); // T = Rn[31]
  3580. skip_op = div(opd).div1 + div(opd).rotcl;
  3581. }
  3582. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3583. // divide 64/32
  3584. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3585. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3586. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3587. tmp2 = rcache_get_reg_arg(2, div(opd).rm, NULL);
  3588. tmp3 = rcache_get_tmp_arg(1);
  3589. emith_lsr(tmp3, tmp2, 31);
  3590. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3591. emith_add_r_r_ptr_imm(tmp3, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3592. rcache_invalidate_tmp();
  3593. emith_abicall(sh2_drc_divs64);
  3594. tmp = rcache_get_tmp_ret();
  3595. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3596. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3597. if (tmp != tmp2)
  3598. emith_move_r_r(tmp2, tmp);
  3599. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3600. tmp3 = rcache_get_tmp();
  3601. emith_eor_r_r_r_lsr(tmp3, tmp4, sr, M_SHIFT);
  3602. emith_and_r_r_imm(tmp3, tmp3, 1);
  3603. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3604. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Ro[0]^M
  3605. rcache_free_tmp(tmp3);
  3606. emith_or_r_r_r_lsr(sr, sr, tmp4, 31); // T = Ro[31]
  3607. skip_op = div(opd).div1 + div(opd).rotcl;
  3608. } else
  3609. #endif
  3610. {
  3611. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3612. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3613. tmp = rcache_get_tmp();
  3614. emith_lsr(tmp, tmp2, 31); // Q = Nn
  3615. emith_or_r_r_lsl(sr, tmp, Q_SHIFT);
  3616. emith_lsr(tmp, tmp3, 31); // M = Nm
  3617. emith_or_r_r_lsl(sr, tmp, M_SHIFT);
  3618. emith_eor_r_r_lsr(tmp, tmp2, 31);
  3619. emith_or_r_r(sr, tmp); // T = Q^M
  3620. rcache_free(tmp);
  3621. }
  3622. goto end_op;
  3623. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  3624. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3625. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3626. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3627. emith_clr_t_cond(sr);
  3628. emith_tst_r_r(tmp2, tmp3);
  3629. emith_set_t_cond(sr, DCOND_EQ);
  3630. goto end_op;
  3631. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  3632. if (GET_Rm() != GET_Rn()) {
  3633. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3634. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3635. emith_and_r_r_r(tmp, tmp3, tmp2);
  3636. }
  3637. goto end_op;
  3638. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  3639. #if PROPAGATE_CONSTANTS
  3640. if (GET_Rn() == GET_Rm()) {
  3641. gconst_new(GET_Rn(), 0);
  3642. goto end_op;
  3643. }
  3644. #endif
  3645. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3646. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3647. emith_eor_r_r_r(tmp, tmp3, tmp2);
  3648. goto end_op;
  3649. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  3650. if (GET_Rm() != GET_Rn()) {
  3651. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3652. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3653. emith_or_r_r_r(tmp, tmp3, tmp2);
  3654. }
  3655. goto end_op;
  3656. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  3657. tmp = rcache_get_tmp();
  3658. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3659. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3660. emith_eor_r_r_r(tmp, tmp2, tmp3);
  3661. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3662. emith_clr_t_cond(sr);
  3663. emith_tst_r_imm(tmp, 0x000000ff);
  3664. EMITH_SJMP_START(DCOND_EQ);
  3665. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  3666. EMITH_SJMP_START(DCOND_EQ);
  3667. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  3668. EMITH_SJMP_START(DCOND_EQ);
  3669. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  3670. EMITH_SJMP_END(DCOND_EQ);
  3671. EMITH_SJMP_END(DCOND_EQ);
  3672. EMITH_SJMP_END(DCOND_EQ);
  3673. emith_set_t_cond(sr, DCOND_EQ);
  3674. rcache_free_tmp(tmp);
  3675. goto end_op;
  3676. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  3677. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3678. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3679. emith_lsr(tmp, tmp3, 16);
  3680. emith_or_r_r_lsl(tmp, tmp2, 16);
  3681. goto end_op;
  3682. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  3683. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  3684. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3685. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3686. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3687. tmp4 = tmp3;
  3688. if (op & 1) {
  3689. if (! rcache_is_s16(tmp2)) {
  3690. emith_sext(tmp, tmp2, 16);
  3691. tmp2 = tmp;
  3692. }
  3693. if (! rcache_is_s16(tmp3)) {
  3694. tmp4 = rcache_get_tmp();
  3695. emith_sext(tmp4, tmp3, 16);
  3696. }
  3697. } else {
  3698. if (! rcache_is_u16(tmp2)) {
  3699. emith_clear_msb(tmp, tmp2, 16);
  3700. tmp2 = tmp;
  3701. }
  3702. if (! rcache_is_u16(tmp3)) {
  3703. tmp4 = rcache_get_tmp();
  3704. emith_clear_msb(tmp4, tmp3, 16);
  3705. }
  3706. }
  3707. emith_mul(tmp, tmp2, tmp4);
  3708. if (tmp4 != tmp3)
  3709. rcache_free_tmp(tmp4);
  3710. goto end_op;
  3711. }
  3712. goto default_;
  3713. /////////////////////////////////////////////
  3714. case 0x03:
  3715. switch (op & 0x0f)
  3716. {
  3717. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  3718. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  3719. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  3720. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  3721. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  3722. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3723. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3724. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3725. switch (op & 0x07)
  3726. {
  3727. case 0x00: // CMP/EQ
  3728. tmp = DCOND_EQ;
  3729. break;
  3730. case 0x02: // CMP/HS
  3731. tmp = DCOND_HS;
  3732. break;
  3733. case 0x03: // CMP/GE
  3734. tmp = DCOND_GE;
  3735. break;
  3736. case 0x06: // CMP/HI
  3737. tmp = DCOND_HI;
  3738. break;
  3739. case 0x07: // CMP/GT
  3740. tmp = DCOND_GT;
  3741. break;
  3742. }
  3743. emith_clr_t_cond(sr);
  3744. emith_cmp_r_r(tmp2, tmp3);
  3745. emith_set_t_cond(sr, tmp);
  3746. goto end_op;
  3747. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  3748. // Q1 = carry(Rn = (Rn << 1) | T)
  3749. // if Q ^ M
  3750. // Q2 = carry(Rn += Rm)
  3751. // else
  3752. // Q2 = carry(Rn -= Rm)
  3753. // Q = M ^ Q1 ^ Q2
  3754. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  3755. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3756. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3757. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3758. emith_sync_t(sr);
  3759. tmp = rcache_get_tmp();
  3760. if (drcf.Mflag != FLG_0) {
  3761. emith_and_r_r_imm(tmp, sr, M);
  3762. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT); // Q ^= M
  3763. }
  3764. rcache_free_tmp(tmp);
  3765. // shift Rn, add T, add or sub Rm, set T = !(Q1 ^ Q2)
  3766. // in: (Q ^ M) passed in Q
  3767. emith_sh2_div1_step(tmp2, tmp3, sr);
  3768. tmp = rcache_get_tmp();
  3769. emith_or_r_imm(sr, Q); // Q = !T
  3770. emith_and_r_r_imm(tmp, sr, T);
  3771. emith_eor_r_r_lsl(sr, tmp, Q_SHIFT);
  3772. if (drcf.Mflag != FLG_0) { // Q = M ^ !T = M ^ Q1 ^ Q2
  3773. emith_and_r_r_imm(tmp, sr, M);
  3774. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT);
  3775. }
  3776. rcache_free_tmp(tmp);
  3777. goto end_op;
  3778. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  3779. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3780. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3781. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3782. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3783. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  3784. goto end_op;
  3785. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  3786. #if PROPAGATE_CONSTANTS
  3787. if (GET_Rn() == GET_Rm()) {
  3788. gconst_new(GET_Rn(), 0);
  3789. goto end_op;
  3790. }
  3791. #endif
  3792. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  3793. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3794. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3795. if (op & 4) {
  3796. emith_add_r_r_r(tmp, tmp3, tmp2);
  3797. } else
  3798. emith_sub_r_r_r(tmp, tmp3, tmp2);
  3799. goto end_op;
  3800. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  3801. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  3802. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3803. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3804. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3805. emith_sync_t(sr);
  3806. #if T_OPTIMIZER
  3807. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3808. if (op & 4) {
  3809. emith_t_to_carry(sr, 0);
  3810. emith_adc_r_r_r(tmp, tmp3, tmp2);
  3811. } else {
  3812. emith_t_to_carry(sr, 1);
  3813. emith_sbc_r_r_r(tmp, tmp3, tmp2);
  3814. }
  3815. } else
  3816. #endif
  3817. {
  3818. EMITH_HINT_COND(DCOND_CS);
  3819. if (op & 4) { // adc
  3820. emith_tpop_carry(sr, 0);
  3821. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  3822. emith_tpush_carry(sr, 0);
  3823. } else {
  3824. emith_tpop_carry(sr, 1);
  3825. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  3826. emith_tpush_carry(sr, 1);
  3827. }
  3828. }
  3829. goto end_op;
  3830. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  3831. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  3832. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3833. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3834. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3835. #if T_OPTIMIZER
  3836. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3837. if (op & 4)
  3838. emith_add_r_r_r(tmp,tmp3,tmp2);
  3839. else
  3840. emith_sub_r_r_r(tmp,tmp3,tmp2);
  3841. } else
  3842. #endif
  3843. {
  3844. emith_clr_t_cond(sr);
  3845. EMITH_HINT_COND(DCOND_VS);
  3846. if (op & 4)
  3847. emith_addf_r_r_r(tmp, tmp3, tmp2);
  3848. else
  3849. emith_subf_r_r_r(tmp, tmp3, tmp2);
  3850. emith_set_t_cond(sr, DCOND_VS);
  3851. }
  3852. goto end_op;
  3853. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  3854. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3855. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3856. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3857. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3858. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  3859. goto end_op;
  3860. }
  3861. goto default_;
  3862. /////////////////////////////////////////////
  3863. case 0x04:
  3864. switch (op & 0x0f)
  3865. {
  3866. case 0x00:
  3867. switch (GET_Fx())
  3868. {
  3869. case 0: // SHLL Rn 0100nnnn00000000
  3870. case 2: // SHAL Rn 0100nnnn00100000
  3871. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3872. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3873. #if T_OPTIMIZER
  3874. if (rcache_regs_discard & BITMASK1(SHR_T))
  3875. emith_lsl(tmp, tmp2, 1);
  3876. else
  3877. #endif
  3878. {
  3879. emith_invalidate_t();
  3880. emith_lslf(tmp, tmp2, 1);
  3881. emith_carry_to_t(sr, 0);
  3882. }
  3883. goto end_op;
  3884. case 1: // DT Rn 0100nnnn00010000
  3885. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3886. #if LOOP_DETECTION
  3887. if (drcf.loop_type == OF_DELAY_LOOP) {
  3888. if (drcf.delay_reg == -1)
  3889. drcf.delay_reg = GET_Rn();
  3890. else
  3891. drcf.polling = drcf.loop_type = 0;
  3892. }
  3893. #endif
  3894. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3895. emith_clr_t_cond(sr);
  3896. EMITH_HINT_COND(DCOND_EQ);
  3897. emith_subf_r_r_imm(tmp, tmp2, 1);
  3898. emith_set_t_cond(sr, DCOND_EQ);
  3899. goto end_op;
  3900. }
  3901. goto default_;
  3902. case 0x01:
  3903. switch (GET_Fx())
  3904. {
  3905. case 0: // SHLR Rn 0100nnnn00000001
  3906. case 2: // SHAR Rn 0100nnnn00100001
  3907. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3908. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3909. #if T_OPTIMIZER
  3910. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3911. if (op & 0x20)
  3912. emith_asr(tmp,tmp2,1);
  3913. else
  3914. emith_lsr(tmp,tmp2,1);
  3915. } else
  3916. #endif
  3917. {
  3918. emith_invalidate_t();
  3919. if (op & 0x20) {
  3920. emith_asrf(tmp, tmp2, 1);
  3921. } else
  3922. emith_lsrf(tmp, tmp2, 1);
  3923. emith_carry_to_t(sr, 0);
  3924. }
  3925. goto end_op;
  3926. case 1: // CMP/PZ Rn 0100nnnn00010001
  3927. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3928. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3929. emith_clr_t_cond(sr);
  3930. emith_cmp_r_imm(tmp, 0);
  3931. emith_set_t_cond(sr, DCOND_GE);
  3932. goto end_op;
  3933. }
  3934. goto default_;
  3935. case 0x02:
  3936. case 0x03:
  3937. switch (op & 0x3f)
  3938. {
  3939. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  3940. tmp = SHR_MACH;
  3941. break;
  3942. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  3943. tmp = SHR_MACL;
  3944. break;
  3945. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  3946. tmp = SHR_PR;
  3947. break;
  3948. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  3949. tmp = SHR_SR;
  3950. break;
  3951. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  3952. tmp = SHR_GBR;
  3953. break;
  3954. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  3955. tmp = SHR_VBR;
  3956. break;
  3957. default:
  3958. goto default_;
  3959. }
  3960. if (tmp == SHR_SR) {
  3961. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  3962. emith_sync_t(tmp4);
  3963. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  3964. } else
  3965. tmp3 = rcache_get_reg_arg(1, tmp, NULL);
  3966. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  3967. goto end_op;
  3968. case 0x04:
  3969. case 0x05:
  3970. switch (op & 0x3f)
  3971. {
  3972. case 0x04: // ROTL Rn 0100nnnn00000100
  3973. case 0x05: // ROTR Rn 0100nnnn00000101
  3974. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3975. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3976. #if T_OPTIMIZER
  3977. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3978. if (op & 1)
  3979. emith_ror(tmp, tmp2, 1);
  3980. else
  3981. emith_rol(tmp, tmp2, 1);
  3982. } else
  3983. #endif
  3984. {
  3985. emith_invalidate_t();
  3986. if (op & 1)
  3987. emith_rorf(tmp, tmp2, 1);
  3988. else
  3989. emith_rolf(tmp, tmp2, 1);
  3990. emith_carry_to_t(sr, 0);
  3991. }
  3992. goto end_op;
  3993. case 0x24: // ROTCL Rn 0100nnnn00100100
  3994. case 0x25: // ROTCR Rn 0100nnnn00100101
  3995. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3996. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3997. emith_sync_t(sr);
  3998. #if T_OPTIMIZER
  3999. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4000. emith_t_to_carry(sr, 0);
  4001. if (op & 1)
  4002. emith_rorc(tmp);
  4003. else
  4004. emith_rolc(tmp);
  4005. } else
  4006. #endif
  4007. {
  4008. emith_tpop_carry(sr, 0);
  4009. if (op & 1)
  4010. emith_rorcf(tmp);
  4011. else
  4012. emith_rolcf(tmp);
  4013. emith_tpush_carry(sr, 0);
  4014. }
  4015. goto end_op;
  4016. case 0x15: // CMP/PL Rn 0100nnnn00010101
  4017. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4018. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4019. emith_clr_t_cond(sr);
  4020. emith_cmp_r_imm(tmp, 0);
  4021. emith_set_t_cond(sr, DCOND_GT);
  4022. goto end_op;
  4023. }
  4024. goto default_;
  4025. case 0x06:
  4026. case 0x07:
  4027. switch (op & 0x3f)
  4028. {
  4029. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  4030. tmp = SHR_MACH;
  4031. break;
  4032. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  4033. tmp = SHR_MACL;
  4034. break;
  4035. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  4036. tmp = SHR_PR;
  4037. break;
  4038. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  4039. tmp = SHR_SR;
  4040. break;
  4041. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  4042. tmp = SHR_GBR;
  4043. break;
  4044. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  4045. tmp = SHR_VBR;
  4046. break;
  4047. default:
  4048. goto default_;
  4049. }
  4050. if (tmp == SHR_SR) {
  4051. emith_invalidate_t();
  4052. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  4053. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4054. emith_write_sr(sr, tmp2);
  4055. rcache_free_tmp(tmp2);
  4056. drcf.test_irq = 1;
  4057. } else
  4058. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  4059. goto end_op;
  4060. case 0x08:
  4061. case 0x09:
  4062. switch (GET_Fx())
  4063. {
  4064. case 0: // SHLL2 Rn 0100nnnn00001000
  4065. // SHLR2 Rn 0100nnnn00001001
  4066. tmp = 2;
  4067. break;
  4068. case 1: // SHLL8 Rn 0100nnnn00011000
  4069. // SHLR8 Rn 0100nnnn00011001
  4070. tmp = 8;
  4071. break;
  4072. case 2: // SHLL16 Rn 0100nnnn00101000
  4073. // SHLR16 Rn 0100nnnn00101001
  4074. tmp = 16;
  4075. break;
  4076. default:
  4077. goto default_;
  4078. }
  4079. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  4080. if (op & 1) {
  4081. emith_lsr(tmp2, tmp3, tmp);
  4082. } else
  4083. emith_lsl(tmp2, tmp3, tmp);
  4084. goto end_op;
  4085. case 0x0a:
  4086. switch (GET_Fx())
  4087. {
  4088. case 0: // LDS Rm,MACH 0100mmmm00001010
  4089. tmp2 = SHR_MACH;
  4090. break;
  4091. case 1: // LDS Rm,MACL 0100mmmm00011010
  4092. tmp2 = SHR_MACL;
  4093. break;
  4094. case 2: // LDS Rm,PR 0100mmmm00101010
  4095. tmp2 = SHR_PR;
  4096. break;
  4097. default:
  4098. goto default_;
  4099. }
  4100. emit_move_r_r(tmp2, GET_Rn());
  4101. goto end_op;
  4102. case 0x0b:
  4103. switch (GET_Fx())
  4104. {
  4105. case 1: // TAS.B @Rn 0100nnnn00011011
  4106. // XXX: is TAS working on 32X?
  4107. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4108. tmp = emit_memhandler_read(0);
  4109. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4110. emith_clr_t_cond(sr);
  4111. emith_cmp_r_imm(tmp, 0);
  4112. emith_set_t_cond(sr, DCOND_EQ);
  4113. emith_or_r_imm(tmp, 0x80);
  4114. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  4115. emith_move_r_r(tmp2, tmp);
  4116. rcache_free_tmp(tmp);
  4117. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4118. emit_memhandler_write(0);
  4119. break;
  4120. default:
  4121. goto default_;
  4122. }
  4123. goto end_op;
  4124. case 0x0e:
  4125. switch (GET_Fx())
  4126. {
  4127. case 0: // LDC Rm,SR 0100mmmm00001110
  4128. tmp2 = SHR_SR;
  4129. break;
  4130. case 1: // LDC Rm,GBR 0100mmmm00011110
  4131. tmp2 = SHR_GBR;
  4132. break;
  4133. case 2: // LDC Rm,VBR 0100mmmm00101110
  4134. tmp2 = SHR_VBR;
  4135. break;
  4136. default:
  4137. goto default_;
  4138. }
  4139. if (tmp2 == SHR_SR) {
  4140. emith_invalidate_t();
  4141. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4142. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4143. emith_write_sr(sr, tmp);
  4144. drcf.test_irq = 1;
  4145. } else
  4146. emit_move_r_r(tmp2, GET_Rn());
  4147. goto end_op;
  4148. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  4149. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  4150. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4151. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  4152. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  4153. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  4154. rcache_free_tmp(tmp2);
  4155. rcache_free_tmp(tmp);
  4156. goto end_op;
  4157. }
  4158. goto default_;
  4159. /////////////////////////////////////////////
  4160. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  4161. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2 | drcf.polling);
  4162. goto end_op;
  4163. /////////////////////////////////////////////
  4164. case 0x06:
  4165. switch (op & 0x0f)
  4166. {
  4167. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  4168. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  4169. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  4170. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  4171. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  4172. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  4173. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : drcf.polling;
  4174. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  4175. goto end_op;
  4176. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  4177. emit_move_r_r(GET_Rn(), GET_Rm());
  4178. goto end_op;
  4179. default: // 0x07 ... 0x0f
  4180. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  4181. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  4182. switch (op & 0x0f)
  4183. {
  4184. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  4185. emith_mvn_r_r(tmp2, tmp);
  4186. break;
  4187. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  4188. tmp3 = tmp2;
  4189. if (tmp == tmp2)
  4190. tmp3 = rcache_get_tmp();
  4191. tmp4 = rcache_get_tmp();
  4192. emith_lsr(tmp3, tmp, 16);
  4193. emith_or_r_r_lsl(tmp3, tmp, 24);
  4194. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  4195. emith_or_r_r_lsl(tmp3, tmp4, 8);
  4196. emith_rol(tmp2, tmp3, 16);
  4197. rcache_free_tmp(tmp4);
  4198. if (tmp == tmp2)
  4199. rcache_free_tmp(tmp3);
  4200. break;
  4201. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  4202. emith_rol(tmp2, tmp, 16);
  4203. break;
  4204. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  4205. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4206. emith_sync_t(sr);
  4207. #if T_OPTIMIZER
  4208. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4209. emith_t_to_carry(sr, 1);
  4210. emith_negc_r_r(tmp2, tmp);
  4211. } else
  4212. #endif
  4213. {
  4214. EMITH_HINT_COND(DCOND_CS);
  4215. emith_tpop_carry(sr, 1);
  4216. emith_negcf_r_r(tmp2, tmp);
  4217. emith_tpush_carry(sr, 1);
  4218. }
  4219. break;
  4220. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  4221. emith_neg_r_r(tmp2, tmp);
  4222. break;
  4223. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  4224. emith_clear_msb(tmp2, tmp, 24);
  4225. rcache_set_x16(tmp2, 1, 1);
  4226. break;
  4227. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  4228. emith_clear_msb(tmp2, tmp, 16);
  4229. rcache_set_x16(tmp2, 0, 1);
  4230. break;
  4231. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  4232. emith_sext(tmp2, tmp, 8);
  4233. rcache_set_x16(tmp2, 1, 0);
  4234. break;
  4235. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  4236. emith_sext(tmp2, tmp, 16);
  4237. rcache_set_x16(tmp2, 1, 0);
  4238. break;
  4239. }
  4240. goto end_op;
  4241. }
  4242. goto default_;
  4243. /////////////////////////////////////////////
  4244. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  4245. if (op & 0x80) // adding negative
  4246. emit_sub_r_imm(GET_Rn(), (u8)-op);
  4247. else
  4248. emit_add_r_imm(GET_Rn(), (u8)op);
  4249. goto end_op;
  4250. /////////////////////////////////////////////
  4251. case 0x08:
  4252. switch (op & 0x0f00)
  4253. {
  4254. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  4255. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  4256. tmp = (op & 0x100) >> 8;
  4257. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  4258. goto end_op;
  4259. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  4260. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  4261. tmp = (op & 0x100) >> 8;
  4262. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp | drcf.polling);
  4263. goto end_op;
  4264. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  4265. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4266. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4267. emith_clr_t_cond(sr);
  4268. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  4269. emith_set_t_cond(sr, DCOND_EQ);
  4270. goto end_op;
  4271. }
  4272. goto default_;
  4273. /////////////////////////////////////////////
  4274. case 0x0c:
  4275. switch (op & 0x0f00)
  4276. {
  4277. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  4278. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  4279. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  4280. tmp = (op & 0x300) >> 8;
  4281. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  4282. goto end_op;
  4283. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  4284. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  4285. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  4286. tmp = (op & 0x300) >> 8;
  4287. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp | drcf.polling);
  4288. goto end_op;
  4289. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  4290. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4291. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4292. emith_clr_t_cond(sr);
  4293. emith_tst_r_imm(tmp, op & 0xff);
  4294. emith_set_t_cond(sr, DCOND_EQ);
  4295. goto end_op;
  4296. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  4297. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4298. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  4299. goto end_op;
  4300. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  4301. if (op & 0xff) {
  4302. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4303. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  4304. }
  4305. goto end_op;
  4306. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  4307. if (op & 0xff) {
  4308. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4309. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  4310. }
  4311. goto end_op;
  4312. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  4313. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0 | drcf.polling);
  4314. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4315. emith_clr_t_cond(sr);
  4316. emith_tst_r_imm(tmp, op & 0xff);
  4317. emith_set_t_cond(sr, DCOND_EQ);
  4318. rcache_free_tmp(tmp);
  4319. goto end_op;
  4320. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  4321. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4322. tmp2 = rcache_get_tmp_arg(1);
  4323. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  4324. goto end_rmw_op;
  4325. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  4326. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4327. tmp2 = rcache_get_tmp_arg(1);
  4328. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  4329. goto end_rmw_op;
  4330. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  4331. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4332. tmp2 = rcache_get_tmp_arg(1);
  4333. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  4334. end_rmw_op:
  4335. rcache_free_tmp(tmp);
  4336. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4337. goto end_op;
  4338. }
  4339. goto default_;
  4340. /////////////////////////////////////////////
  4341. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  4342. emit_move_r_imm32(GET_Rn(), (s8)op);
  4343. goto end_op;
  4344. default:
  4345. default_:
  4346. if (!(op_flags[i] & OF_B_IN_DS)) {
  4347. elprintf_sh2(sh2, EL_ANOMALY,
  4348. "drc: illegal op %04x @ %08x", op, pc - 2);
  4349. exit(1);
  4350. }
  4351. }
  4352. end_op:
  4353. rcache_unlock_all();
  4354. rcache_set_usage_now(0);
  4355. #if DRC_DEBUG & 64
  4356. RCACHE_CHECK("after insn");
  4357. #endif
  4358. cycles += opd->cycles;
  4359. if (op_flags[i+1] & OF_DELAY_OP) {
  4360. do_host_disasm(tcache_id);
  4361. continue;
  4362. }
  4363. // test irq?
  4364. if (drcf.test_irq && !drcf.pending_branch_direct) {
  4365. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4366. FLUSH_CYCLES(sr);
  4367. emith_sync_t(sr);
  4368. if (!drcf.pending_branch_indirect)
  4369. emit_move_r_imm32(SHR_PC, pc);
  4370. rcache_flush();
  4371. emith_call(sh2_drc_test_irq);
  4372. drcf.test_irq = 0;
  4373. }
  4374. // branch handling
  4375. if (drcf.pending_branch_direct)
  4376. {
  4377. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4378. u32 target_pc = opd_b->imm;
  4379. int cond = -1;
  4380. int ctaken = 0;
  4381. void *target = NULL;
  4382. if (OP_ISBRACND(opd_b->op))
  4383. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  4384. cycles += ctaken; // assume branch taken
  4385. #if LOOP_OPTIMIZER
  4386. if ((drcf.loop_type == OF_IDLE_LOOP ||
  4387. (drcf.loop_type == OF_DELAY_LOOP && drcf.delay_reg >= 0)))
  4388. {
  4389. // idle or delay loop
  4390. emit_sync_t_to_sr();
  4391. emith_sh2_delay_loop(cycles, drcf.delay_reg);
  4392. rcache_unlock_all(); // may lock delay_reg
  4393. drcf.polling = drcf.loop_type = drcf.pinning = 0;
  4394. }
  4395. #endif
  4396. #if CALL_STACK
  4397. void *rtsadd = NULL, *rtsret = NULL;
  4398. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4399. // BSR - save rts data
  4400. tmp = rcache_get_tmp_arg(1);
  4401. rtsadd = tcache_ptr;
  4402. emith_move_r_imm_s8_patchable(tmp, 0);
  4403. rcache_clean_tmp();
  4404. rcache_invalidate_tmp();
  4405. emith_call(sh2_drc_dispatcher_call);
  4406. rtsret = tcache_ptr;
  4407. }
  4408. #endif
  4409. // XXX move below cond test if not changing host cond (MIPS delay slot)?
  4410. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4411. FLUSH_CYCLES(sr);
  4412. rcache_clean();
  4413. if (OP_ISBRACND(opd_b->op)) {
  4414. // BT[S], BF[S] - emit condition test
  4415. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  4416. if (delay_dep_fw & BITMASK1(SHR_T)) {
  4417. emith_sync_t(sr);
  4418. emith_tst_r_imm(sr, T_save);
  4419. } else {
  4420. cond = emith_tst_t(sr, (opd_b->op == OP_BRANCH_CT));
  4421. if (emith_get_t_cond() >= 0) {
  4422. if (opd_b->op == OP_BRANCH_CT)
  4423. emith_or_r_imm_c(cond, sr, T);
  4424. else
  4425. emith_bic_r_imm_c(cond, sr, T);
  4426. }
  4427. }
  4428. } else
  4429. emith_sync_t(sr);
  4430. // no modification of host status/flags between here and branching!
  4431. v = find_in_sorted_linkage(branch_targets, branch_target_count, target_pc);
  4432. if (v >= 0)
  4433. {
  4434. // local branch
  4435. if (branch_targets[v].ptr) {
  4436. // local backward jump, link here now since host PC is already known
  4437. target = branch_targets[v].ptr;
  4438. #if LOOP_OPTIMIZER
  4439. if (pinned_loops[pinned_loop_count].pc == target_pc) {
  4440. // backward jump at end of optimized loop
  4441. rcache_unpin_all();
  4442. target = pinned_loops[pinned_loop_count].ptr;
  4443. pinned_loop_count ++;
  4444. }
  4445. #endif
  4446. if (cond != -1) {
  4447. if (emith_jump_patch_inrange(tcache_ptr, target)) {
  4448. emith_jump_cond(cond, target);
  4449. } else {
  4450. // not reachable directly, must use far branch
  4451. EMITH_JMP_START(emith_invert_cond(cond));
  4452. emith_jump(target);
  4453. EMITH_JMP_END(emith_invert_cond(cond));
  4454. }
  4455. } else {
  4456. emith_jump(target);
  4457. rcache_invalidate();
  4458. }
  4459. } else if (blx_target_count < MAX_LOCAL_BRANCHES) {
  4460. // local forward jump
  4461. target = tcache_ptr;
  4462. blx_targets[blx_target_count++] =
  4463. (struct linkage) { .pc = target_pc, .ptr = target, .mask = 0x2 };
  4464. if (cond != -1)
  4465. emith_jump_cond_patchable(cond, target);
  4466. else {
  4467. emith_jump_patchable(target);
  4468. rcache_invalidate();
  4469. }
  4470. } else
  4471. // no space for resolving forward branch, handle it as external
  4472. dbg(1, "warning: too many unresolved branches");
  4473. }
  4474. if (target == NULL)
  4475. {
  4476. // can't resolve branch locally, make a block exit
  4477. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4478. if (cond != -1) {
  4479. #if 1
  4480. if (bl && blx_target_count < ARRAY_SIZE(blx_targets)) {
  4481. // conditional jumps get a blx stub for the far jump
  4482. bl->type = BL_JCCBLX;
  4483. target = tcache_ptr;
  4484. blx_targets[blx_target_count++] =
  4485. (struct linkage) { .pc = target_pc, .ptr = target, .bl = bl };
  4486. emith_jump_cond_patchable(cond, target);
  4487. } else {
  4488. // not linkable, or blx table full; inline jump @dispatcher
  4489. EMITH_JMP_START(emith_invert_cond(cond));
  4490. if (bl) {
  4491. bl->jump = tcache_ptr;
  4492. emith_flush(); // flush to inhibit insn swapping
  4493. bl->type = BL_LDJMP;
  4494. }
  4495. tmp = rcache_get_tmp_arg(0);
  4496. emith_move_r_imm(tmp, target_pc);
  4497. rcache_free_tmp(tmp);
  4498. target = sh2_drc_dispatcher;
  4499. emith_jump_patchable(target);
  4500. EMITH_JMP_END(emith_invert_cond(cond));
  4501. }
  4502. #else
  4503. // jump @dispatcher - ARM 32bit version with conditional execution
  4504. EMITH_SJMP_START(emith_invert_cond(cond));
  4505. tmp = rcache_get_tmp_arg(0);
  4506. emith_move_r_imm_c(cond, tmp, target_pc);
  4507. rcache_free_tmp(tmp);
  4508. target = sh2_drc_dispatcher;
  4509. if (bl) {
  4510. bl->jump = tcache_ptr;
  4511. bl->type = BL_JMP;
  4512. }
  4513. emith_jump_cond_patchable(cond, target);
  4514. EMITH_SJMP_END(emith_invert_cond(cond));
  4515. #endif
  4516. } else {
  4517. // unconditional, has the far jump inlined
  4518. if (bl) {
  4519. emith_flush(); // flush to inhibit insn swapping
  4520. bl->type = BL_LDJMP;
  4521. }
  4522. tmp = rcache_get_tmp_arg(0);
  4523. emith_move_r_imm(tmp, target_pc);
  4524. rcache_free_tmp(tmp);
  4525. target = sh2_drc_dispatcher;
  4526. emith_jump_patchable(target);
  4527. rcache_invalidate();
  4528. }
  4529. }
  4530. #if CALL_STACK
  4531. if (rtsadd)
  4532. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4533. #endif
  4534. // branch not taken, correct cycle count
  4535. if (ctaken)
  4536. cycles -= ctaken;
  4537. // set T bit to reflect branch not taken for OP_BRANCH_CT/CF
  4538. if (emith_get_t_cond() >= 0) // T is synced for all other cases
  4539. emith_set_t(sr, opd_b->op == OP_BRANCH_CF);
  4540. drcf.pending_branch_direct = 0;
  4541. if (target_pc >= base_pc && target_pc < pc)
  4542. drcf.polling = drcf.loop_type = 0;
  4543. }
  4544. else if (drcf.pending_branch_indirect) {
  4545. u32 target_pc;
  4546. tmp = rcache_get_reg_arg(0, SHR_PC, NULL);
  4547. #if CALL_STACK
  4548. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4549. void *rtsadd = NULL, *rtsret = NULL;
  4550. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4551. // JSR, BSRF - save rts data
  4552. tmp = rcache_get_tmp_arg(1);
  4553. rtsadd = tcache_ptr;
  4554. emith_move_r_imm_s8_patchable(tmp, 0);
  4555. rcache_clean_tmp();
  4556. rcache_invalidate_tmp();
  4557. emith_call(sh2_drc_dispatcher_call);
  4558. rtsret = tcache_ptr;
  4559. }
  4560. #endif
  4561. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4562. FLUSH_CYCLES(sr);
  4563. emith_sync_t(sr);
  4564. rcache_clean();
  4565. #if CALL_STACK
  4566. if (opd_b->rm == SHR_PR) {
  4567. // RTS - restore rts data, else jump to dispatcher
  4568. emith_jump(sh2_drc_dispatcher_return);
  4569. } else
  4570. #endif
  4571. if (gconst_get(SHR_PC, &target_pc)) {
  4572. // JMP, JSR, BRAF, BSRF const - treat like unconditional direct branch
  4573. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4574. if (bl) // pc already loaded somewhere else, can patch jump only
  4575. bl->type = BL_JMP;
  4576. emith_jump_patchable(sh2_drc_dispatcher);
  4577. } else {
  4578. // JMP, JSR, BRAF, BSRF not const
  4579. emith_jump(sh2_drc_dispatcher);
  4580. }
  4581. rcache_invalidate();
  4582. #if CALL_STACK
  4583. if (rtsadd)
  4584. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4585. #endif
  4586. drcf.pending_branch_indirect = 0;
  4587. drcf.polling = drcf.loop_type = 0;
  4588. }
  4589. rcache_unlock_all();
  4590. do_host_disasm(tcache_id);
  4591. }
  4592. // check the last op
  4593. if (op_flags[i-1] & OF_DELAY_OP)
  4594. opd = &ops[i-2];
  4595. else
  4596. opd = &ops[i-1];
  4597. if (! OP_ISBRAUC(opd->op))
  4598. {
  4599. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4600. FLUSH_CYCLES(tmp);
  4601. emith_sync_t(tmp);
  4602. rcache_clean();
  4603. bl = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  4604. if (bl) {
  4605. emith_flush(); // flush to inhibit insn swapping
  4606. bl->type = BL_LDJMP;
  4607. }
  4608. tmp = rcache_get_tmp_arg(0);
  4609. emith_move_r_imm(tmp, pc);
  4610. emith_jump_patchable(sh2_drc_dispatcher);
  4611. rcache_invalidate();
  4612. } else
  4613. rcache_flush();
  4614. // link unresolved branches, emitting blx area entries as needed
  4615. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  4616. branch_target_count, blx_targets, blx_target_count);
  4617. emith_flush();
  4618. do_host_disasm(tcache_id);
  4619. emith_pool_commit(0);
  4620. // fill blx backup; do this last to backup final patched code
  4621. for (i = 0; i < block->entry_count; i++)
  4622. for (bl = block->entryp[i].o_links; bl; bl = bl->o_next)
  4623. memcpy(bl->jdisp, bl->blx ? bl->blx : bl->jump, emith_jump_at_size());
  4624. ring_alloc(&tcache_ring[tcache_id], tcache_ptr - block_entry_ptr);
  4625. host_instructions_updated(block_entry_ptr, tcache_ptr, 1);
  4626. dr_activate_block(block, tcache_id, sh2->is_slave);
  4627. emith_update_cache();
  4628. do_host_disasm(tcache_id);
  4629. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  4630. tcache_id, blkid_main, tcache_ptr,
  4631. tcache_ring[tcache_id].used, tcache_ring[tcache_id].size,
  4632. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  4633. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  4634. dbg(2, " hash collisions %d/%d", hash_collisions, block_ring[tcache_id].used);
  4635. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  4636. }
  4637. /*
  4638. printf("~~~\n");
  4639. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  4640. do_host_disasm(tcache_id);
  4641. printf("~~~\n");
  4642. */
  4643. #if (DRC_DEBUG)
  4644. fflush(stdout);
  4645. #endif
  4646. return block_entry_ptr;
  4647. }
  4648. static void sh2_generate_utils(void)
  4649. {
  4650. int arg0, arg1, arg2, arg3, sr, tmp, tmp2;
  4651. #if DRC_DEBUG
  4652. int hic = host_insn_count; // don't count utils for insn statistics
  4653. #endif
  4654. host_arg2reg(arg0, 0);
  4655. host_arg2reg(arg1, 1);
  4656. host_arg2reg(arg2, 2);
  4657. host_arg2reg(arg3, 3);
  4658. emith_move_r_r(arg0, arg0); // nop
  4659. emith_flush();
  4660. // sh2_drc_write8(u32 a, u32 d)
  4661. sh2_drc_write8 = (void *)tcache_ptr;
  4662. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  4663. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4664. emith_flush();
  4665. // sh2_drc_write16(u32 a, u32 d)
  4666. sh2_drc_write16 = (void *)tcache_ptr;
  4667. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  4668. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4669. emith_flush();
  4670. // sh2_drc_write32(u32 a, u32 d)
  4671. sh2_drc_write32 = (void *)tcache_ptr;
  4672. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  4673. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4674. emith_flush();
  4675. // d = sh2_drc_read8(u32 a)
  4676. sh2_drc_read8 = (void *)tcache_ptr;
  4677. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4678. EMITH_HINT_COND(DCOND_CS);
  4679. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4680. EMITH_SJMP_START(DCOND_CS);
  4681. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4682. emith_eor_r_imm_ptr_c(DCOND_CC, arg0, 1);
  4683. emith_read8s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4684. emith_ret_c(DCOND_CC);
  4685. EMITH_SJMP_END(DCOND_CS);
  4686. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4687. emith_abijump_reg(arg2);
  4688. emith_flush();
  4689. // d = sh2_drc_read16(u32 a)
  4690. sh2_drc_read16 = (void *)tcache_ptr;
  4691. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4692. EMITH_HINT_COND(DCOND_CS);
  4693. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4694. EMITH_SJMP_START(DCOND_CS);
  4695. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4696. emith_read16s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4697. emith_ret_c(DCOND_CC);
  4698. EMITH_SJMP_END(DCOND_CS);
  4699. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4700. emith_abijump_reg(arg2);
  4701. emith_flush();
  4702. // d = sh2_drc_read32(u32 a)
  4703. sh2_drc_read32 = (void *)tcache_ptr;
  4704. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4705. EMITH_HINT_COND(DCOND_CS);
  4706. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4707. EMITH_SJMP_START(DCOND_CS);
  4708. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4709. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4710. emith_ror_c(DCOND_CC, RET_REG, RET_REG, 16);
  4711. emith_ret_c(DCOND_CC);
  4712. EMITH_SJMP_END(DCOND_CS);
  4713. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4714. emith_abijump_reg(arg2);
  4715. emith_flush();
  4716. // d = sh2_drc_read8_poll(u32 a)
  4717. sh2_drc_read8_poll = (void *)tcache_ptr;
  4718. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4719. EMITH_HINT_COND(DCOND_CS);
  4720. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4721. EMITH_SJMP_START(DCOND_CC);
  4722. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4723. emith_abijump_reg_c(DCOND_CS, arg2);
  4724. EMITH_SJMP_END(DCOND_CC);
  4725. emith_and_r_r_r(arg1, arg0, arg3);
  4726. emith_eor_r_imm_ptr(arg1, 1);
  4727. emith_read8s_r_r_r(arg1, arg2, arg1);
  4728. emith_push_ret(arg1);
  4729. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4730. emith_abicall(p32x_sh2_poll_memory8);
  4731. emith_pop_and_ret(arg1);
  4732. emith_flush();
  4733. // d = sh2_drc_read16_poll(u32 a)
  4734. sh2_drc_read16_poll = (void *)tcache_ptr;
  4735. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4736. EMITH_HINT_COND(DCOND_CS);
  4737. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4738. EMITH_SJMP_START(DCOND_CC);
  4739. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4740. emith_abijump_reg_c(DCOND_CS, arg2);
  4741. EMITH_SJMP_END(DCOND_CC);
  4742. emith_and_r_r_r(arg1, arg0, arg3);
  4743. emith_read16s_r_r_r(arg1, arg2, arg1);
  4744. emith_push_ret(arg1);
  4745. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4746. emith_abicall(p32x_sh2_poll_memory16);
  4747. emith_pop_and_ret(arg1);
  4748. emith_flush();
  4749. // d = sh2_drc_read32_poll(u32 a)
  4750. sh2_drc_read32_poll = (void *)tcache_ptr;
  4751. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4752. EMITH_HINT_COND(DCOND_CS);
  4753. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4754. EMITH_SJMP_START(DCOND_CC);
  4755. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4756. emith_abijump_reg_c(DCOND_CS, arg2);
  4757. EMITH_SJMP_END(DCOND_CC);
  4758. emith_and_r_r_r(arg1, arg0, arg3);
  4759. emith_read_r_r_r(arg1, arg2, arg1);
  4760. emith_ror(arg1, arg1, 16);
  4761. emith_push_ret(arg1);
  4762. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4763. emith_abicall(p32x_sh2_poll_memory32);
  4764. emith_pop_and_ret(arg1);
  4765. emith_flush();
  4766. // sh2_drc_exit(u32 pc)
  4767. sh2_drc_exit = (void *)tcache_ptr;
  4768. emith_ctx_write(arg0, SHR_PC * 4);
  4769. emit_do_static_regs(1, arg2);
  4770. emith_sh2_drc_exit();
  4771. emith_flush();
  4772. // sh2_drc_dispatcher(u32 pc)
  4773. sh2_drc_dispatcher = (void *)tcache_ptr;
  4774. emith_ctx_write(arg0, SHR_PC * 4);
  4775. #if BRANCH_CACHE
  4776. // check if PC is in branch target cache
  4777. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4778. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4779. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  4780. emith_cmp_r_r(arg2, arg0);
  4781. EMITH_SJMP_START(DCOND_NE);
  4782. #if (DRC_DEBUG & 128)
  4783. emith_move_r_ptr_imm(arg2, (uptr)&bchit);
  4784. emith_read_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4785. emith_add_r_imm_c(DCOND_EQ, arg3, 1);
  4786. emith_write_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4787. #endif
  4788. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4789. emith_jump_reg_c(DCOND_EQ, RET_REG);
  4790. EMITH_SJMP_END(DCOND_NE);
  4791. #endif
  4792. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4793. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  4794. emith_abicall(dr_lookup_block);
  4795. // store PC and block entry ptr (in arg0) in branch target cache
  4796. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4797. EMITH_SJMP_START(DCOND_EQ);
  4798. #if BRANCH_CACHE
  4799. #if (DRC_DEBUG & 128)
  4800. emith_move_r_ptr_imm(arg2, (uptr)&bcmiss);
  4801. emith_read_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4802. emith_add_r_imm_c(DCOND_NE, arg3, 1);
  4803. emith_write_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4804. #endif
  4805. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  4806. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4807. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4808. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  4809. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4810. #endif
  4811. emith_jump_reg_c(DCOND_NE, RET_REG);
  4812. EMITH_SJMP_END(DCOND_EQ);
  4813. // lookup failed, call sh2_translate()
  4814. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4815. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  4816. emith_abicall(sh2_translate);
  4817. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4818. EMITH_SJMP_START(DCOND_EQ);
  4819. emith_jump_reg_c(DCOND_NE, RET_REG);
  4820. EMITH_SJMP_END(DCOND_EQ);
  4821. // XXX: can't translate, fail
  4822. emith_abicall(dr_failure);
  4823. emith_flush();
  4824. #if CALL_STACK
  4825. // pc = sh2_drc_dispatcher_call(u32 pc)
  4826. sh2_drc_dispatcher_call = (void *)tcache_ptr;
  4827. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4828. emith_add_r_imm(arg2, (u32)(2*sizeof(void *)));
  4829. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4830. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4831. emith_add_r_r_r_lsl_ptr(arg3, CONTEXT_REG, arg2, 0);
  4832. rcache_get_reg_arg(2, SHR_PR, NULL);
  4833. emith_add_r_ret(arg1);
  4834. emith_write_r_r_offs_ptr(arg1, arg3, offsetof(SH2, rts_cache)+sizeof(void *));
  4835. emith_write_r_r_offs(arg2, arg3, offsetof(SH2, rts_cache));
  4836. rcache_flush();
  4837. emith_ret();
  4838. emith_flush();
  4839. // sh2_drc_dispatcher_return(u32 pc)
  4840. sh2_drc_dispatcher_return = (void *)tcache_ptr;
  4841. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4842. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg2, 0);
  4843. emith_read_r_r_offs(arg3, arg1, offsetof(SH2, rts_cache));
  4844. emith_cmp_r_r(arg0, arg3);
  4845. #if (DRC_DEBUG & 128)
  4846. EMITH_SJMP_START(DCOND_EQ);
  4847. emith_move_r_ptr_imm(arg3, (uptr)&rcmiss);
  4848. emith_read_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4849. emith_add_r_imm_c(DCOND_NE, arg1, 1);
  4850. emith_write_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4851. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4852. EMITH_SJMP_END(DCOND_EQ);
  4853. #else
  4854. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4855. #endif
  4856. emith_read_r_r_offs_ptr(arg0, arg1, offsetof(SH2, rts_cache) + sizeof(void *));
  4857. emith_sub_r_imm(arg2, (u32)(2*sizeof(void *)));
  4858. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4859. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4860. #if (DRC_DEBUG & 128)
  4861. emith_move_r_ptr_imm(arg3, (uptr)&rchit);
  4862. emith_read_r_r_offs(arg1, arg3, 0);
  4863. emith_add_r_imm(arg1, 1);
  4864. emith_write_r_r_offs(arg1, arg3, 0);
  4865. #endif
  4866. emith_jump_reg(arg0);
  4867. emith_flush();
  4868. #endif
  4869. // sh2_drc_test_irq(void)
  4870. // assumes it's called from main function (may jump to dispatcher)
  4871. sh2_drc_test_irq = (void *)tcache_ptr;
  4872. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4873. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4874. emith_lsr(arg0, sr, I_SHIFT);
  4875. emith_and_r_imm(arg0, 0x0f);
  4876. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  4877. EMITH_SJMP_START(DCOND_GT);
  4878. emith_ret_c(DCOND_LE); // nope, return
  4879. EMITH_SJMP_END(DCOND_GT);
  4880. // adjust SP
  4881. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  4882. emith_sub_r_imm(tmp, 4*2);
  4883. rcache_clean();
  4884. // push SR
  4885. tmp = rcache_get_reg_arg(0, SHR_SP, &tmp2);
  4886. emith_add_r_r_imm(tmp, tmp2, 4);
  4887. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  4888. emith_clear_msb(tmp, tmp, 22);
  4889. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4890. rcache_invalidate_tmp();
  4891. emith_abicall(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  4892. // push PC
  4893. rcache_get_reg_arg(0, SHR_SP, NULL);
  4894. rcache_get_reg_arg(1, SHR_PC, NULL);
  4895. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4896. rcache_invalidate_tmp();
  4897. emith_abicall(p32x_sh2_write32);
  4898. // update I, cycles, do callback
  4899. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4900. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4901. emith_bic_r_imm(sr, I);
  4902. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  4903. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  4904. rcache_flush();
  4905. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4906. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  4907. // obtain new PC
  4908. tmp = rcache_get_reg_arg(1, SHR_VBR, &tmp2);
  4909. emith_add_r_r_r_lsl(arg0, tmp2, RET_REG, 2);
  4910. emith_call(sh2_drc_read32);
  4911. if (arg0 != RET_REG)
  4912. emith_move_r_r(arg0, RET_REG);
  4913. emith_call_cleanup();
  4914. rcache_invalidate();
  4915. emith_jump(sh2_drc_dispatcher);
  4916. emith_flush();
  4917. // sh2_drc_entry(SH2 *sh2)
  4918. sh2_drc_entry = (void *)tcache_ptr;
  4919. emith_sh2_drc_entry();
  4920. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  4921. emit_do_static_regs(0, arg2);
  4922. emith_call(sh2_drc_test_irq);
  4923. emith_ctx_read(arg0, SHR_PC * 4);
  4924. emith_jump(sh2_drc_dispatcher);
  4925. emith_flush();
  4926. #ifdef DRC_SR_REG
  4927. // sh2_drc_save_sr(SH2 *sh2)
  4928. sh2_drc_save_sr = (void *)tcache_ptr;
  4929. tmp = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4930. emith_write_r_r_offs(tmp, arg0, SHR_SR * 4);
  4931. rcache_invalidate();
  4932. emith_ret();
  4933. emith_flush();
  4934. // sh2_drc_restore_sr(SH2 *sh2)
  4935. sh2_drc_restore_sr = (void *)tcache_ptr;
  4936. tmp = rcache_get_reg(SHR_SR, RC_GR_WRITE, NULL);
  4937. emith_read_r_r_offs(tmp, arg0, SHR_SR * 4);
  4938. rcache_flush();
  4939. emith_ret();
  4940. emith_flush();
  4941. #endif
  4942. #ifdef PDB_NET
  4943. // debug
  4944. #define MAKE_READ_WRAPPER(func) { \
  4945. void *tmp = (void *)tcache_ptr; \
  4946. emith_push_ret(); \
  4947. emith_call(func); \
  4948. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4949. emith_addf_r_r(arg2, arg0); \
  4950. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4951. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4952. emith_adc_r_imm(arg2, 0x01000000); \
  4953. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4954. emith_pop_and_ret(); \
  4955. emith_flush(); \
  4956. func = tmp; \
  4957. }
  4958. #define MAKE_WRITE_WRAPPER(func) { \
  4959. void *tmp = (void *)tcache_ptr; \
  4960. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4961. emith_addf_r_r(arg2, arg1); \
  4962. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4963. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4964. emith_adc_r_imm(arg2, 0x01000000); \
  4965. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4966. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  4967. emith_jump(func); \
  4968. emith_flush(); \
  4969. func = tmp; \
  4970. }
  4971. MAKE_READ_WRAPPER(sh2_drc_read8);
  4972. MAKE_READ_WRAPPER(sh2_drc_read16);
  4973. MAKE_READ_WRAPPER(sh2_drc_read32);
  4974. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  4975. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  4976. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  4977. MAKE_READ_WRAPPER(sh2_drc_read8_poll);
  4978. MAKE_READ_WRAPPER(sh2_drc_read16_poll);
  4979. MAKE_READ_WRAPPER(sh2_drc_read32_poll);
  4980. #endif
  4981. emith_pool_commit(0);
  4982. rcache_invalidate();
  4983. #if (DRC_DEBUG & 4)
  4984. host_dasm_new_symbol(sh2_drc_entry);
  4985. host_dasm_new_symbol(sh2_drc_dispatcher);
  4986. #if CALL_STACK
  4987. host_dasm_new_symbol(sh2_drc_dispatcher_call);
  4988. host_dasm_new_symbol(sh2_drc_dispatcher_return);
  4989. #endif
  4990. host_dasm_new_symbol(sh2_drc_exit);
  4991. host_dasm_new_symbol(sh2_drc_test_irq);
  4992. host_dasm_new_symbol(sh2_drc_write8);
  4993. host_dasm_new_symbol(sh2_drc_write16);
  4994. host_dasm_new_symbol(sh2_drc_write32);
  4995. host_dasm_new_symbol(sh2_drc_read8);
  4996. host_dasm_new_symbol(sh2_drc_read16);
  4997. host_dasm_new_symbol(sh2_drc_read32);
  4998. host_dasm_new_symbol(sh2_drc_read8_poll);
  4999. host_dasm_new_symbol(sh2_drc_read16_poll);
  5000. host_dasm_new_symbol(sh2_drc_read32_poll);
  5001. #ifdef DRC_SR_REG
  5002. host_dasm_new_symbol(sh2_drc_save_sr);
  5003. host_dasm_new_symbol(sh2_drc_restore_sr);
  5004. #endif
  5005. #endif
  5006. #if DRC_DEBUG
  5007. host_insn_count = hic;
  5008. #endif
  5009. }
  5010. static void sh2_smc_rm_blocks(u32 a, int len, int tcache_id, u32 shift)
  5011. {
  5012. struct block_list **blist, *entry, *next;
  5013. u32 mask = RAM_SIZE(tcache_id) - 1;
  5014. u32 wtmask = ~0x20000000; // writethrough area mask
  5015. u32 start_addr, end_addr;
  5016. u32 start_lit, end_lit;
  5017. struct block_desc *block;
  5018. #if (DRC_DEBUG & 2)
  5019. int removed = 0;
  5020. #endif
  5021. // ignore cache-through
  5022. a &= wtmask;
  5023. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  5024. entry = *blist;
  5025. // go through the block list for this range
  5026. while (entry != NULL) {
  5027. next = entry->next;
  5028. block = entry->block;
  5029. start_addr = block->addr & wtmask;
  5030. end_addr = start_addr + block->size;
  5031. start_lit = block->addr_lit & wtmask;
  5032. end_lit = start_lit + block->size_lit;
  5033. // disable/delete block if it covers the modified address
  5034. if ((start_addr < a+len && a < end_addr) ||
  5035. (start_lit < a+len && a < end_lit))
  5036. {
  5037. dbg(2, "smc remove @%08x", a);
  5038. end_addr = (start_lit < a+len && block->size_lit ? a : 0);
  5039. dr_rm_block_entry(block, tcache_id, end_addr, 0);
  5040. #if (DRC_DEBUG & 2)
  5041. removed = 1;
  5042. #endif
  5043. }
  5044. entry = next;
  5045. }
  5046. #if (DRC_DEBUG & 2)
  5047. if (!removed)
  5048. dbg(2, "rm_blocks called @%08x, no work?", a);
  5049. #endif
  5050. #if BRANCH_CACHE
  5051. if (tcache_id)
  5052. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5053. else {
  5054. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5055. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  5056. }
  5057. #endif
  5058. #if CALL_STACK
  5059. if (tcache_id) {
  5060. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5061. sh2s[tcache_id-1].rts_cache_idx = 0;
  5062. } else {
  5063. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5064. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  5065. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  5066. }
  5067. #endif
  5068. }
  5069. void sh2_drc_wcheck_ram(u32 a, unsigned len, SH2 *sh2)
  5070. {
  5071. sh2_smc_rm_blocks(a, len, 0, SH2_DRCBLK_RAM_SHIFT);
  5072. }
  5073. void sh2_drc_wcheck_da(u32 a, unsigned len, SH2 *sh2)
  5074. {
  5075. sh2_smc_rm_blocks(a, len, 1 + sh2->is_slave, SH2_DRCBLK_DA_SHIFT);
  5076. }
  5077. int sh2_execute_drc(SH2 *sh2c, int cycles)
  5078. {
  5079. int ret_cycles;
  5080. // cycles are kept in SHR_SR unused bits (upper 20)
  5081. // bit11 contains T saved for delay slot
  5082. // others are usual SH2 flags
  5083. sh2c->sr &= 0x3f3;
  5084. sh2c->sr |= cycles << 12;
  5085. sh2c->state |= SH2_IN_DRC;
  5086. sh2_drc_entry(sh2c);
  5087. sh2c->state &= ~SH2_IN_DRC;
  5088. // TODO: irq cycles
  5089. ret_cycles = (int32_t)sh2c->sr >> 12;
  5090. if (ret_cycles > 0)
  5091. dbg(1, "warning: drc returned with cycles: %d, pc %08x", ret_cycles, sh2c->pc);
  5092. sh2c->sr &= 0x3f3;
  5093. return ret_cycles;
  5094. }
  5095. static void block_stats(void)
  5096. {
  5097. #if (DRC_DEBUG & 2)
  5098. int c, b, i;
  5099. long total = 0;
  5100. printf("block stats:\n");
  5101. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5102. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5103. if (block_tables[b][i].addr != 0)
  5104. total += block_tables[b][i].refcount;
  5105. }
  5106. printf("total: %ld\n",total);
  5107. for (c = 0; c < 20; c++) {
  5108. struct block_desc *blk, *maxb = NULL;
  5109. int max = 0;
  5110. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5111. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5112. if ((blk = &block_tables[b][i])->addr != 0 && blk->refcount > max) {
  5113. max = blk->refcount;
  5114. maxb = blk;
  5115. }
  5116. }
  5117. if (maxb == NULL)
  5118. break;
  5119. printf("%08x %p %9d %2.3f%%\n", maxb->addr, maxb->tcache_ptr, maxb->refcount,
  5120. (double)maxb->refcount / total * 100.0);
  5121. maxb->refcount = 0;
  5122. }
  5123. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  5124. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5125. block_tables[b][i].refcount = 0;
  5126. #endif
  5127. }
  5128. void entry_stats(void)
  5129. {
  5130. #if (DRC_DEBUG & 32)
  5131. int c, b, i, j;
  5132. long total = 0;
  5133. printf("block entry stats:\n");
  5134. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5135. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5136. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5137. total += block_tables[b][i].entryp[j].entry_count;
  5138. }
  5139. printf("total: %ld\n",total);
  5140. for (c = 0; c < 20; c++) {
  5141. struct block_desc *blk;
  5142. struct block_entry *maxb = NULL;
  5143. int max = 0;
  5144. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5145. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size) {
  5146. blk = &block_tables[b][i];
  5147. for (j = 0; j < blk->entry_count; j++)
  5148. if (blk->entryp[j].entry_count > max) {
  5149. max = blk->entryp[j].entry_count;
  5150. maxb = &blk->entryp[j];
  5151. }
  5152. }
  5153. }
  5154. if (maxb == NULL)
  5155. break;
  5156. printf("%08x %p %9d %2.3f%%\n", maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  5157. (double)100 * maxb->entry_count / total);
  5158. maxb->entry_count = 0;
  5159. }
  5160. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5161. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5162. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5163. block_tables[b][i].entryp[j].entry_count = 0;
  5164. }
  5165. #endif
  5166. }
  5167. static void backtrace(void)
  5168. {
  5169. #if (DRC_DEBUG & 1024)
  5170. int i;
  5171. printf("backtrace master:\n");
  5172. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  5173. SH2_DUMP(&csh2[0][i], "bt msh2");
  5174. printf("backtrace slave:\n");
  5175. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  5176. SH2_DUMP(&csh2[1][i], "bt ssh2");
  5177. #endif
  5178. }
  5179. static void state_dump(void)
  5180. {
  5181. #if (DRC_DEBUG & 2048)
  5182. int i;
  5183. SH2_DUMP(&sh2s[0], "master");
  5184. printf("VBR msh2: %x\n", sh2s[0].vbr);
  5185. for (i = 0; i < 0x60; i++) {
  5186. printf("%08x ",p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  5187. if ((i+1) % 8 == 0) printf("\n");
  5188. }
  5189. printf("stack msh2: %x\n", sh2s[0].r[15]);
  5190. for (i = -0x30; i < 0x30; i++) {
  5191. printf("%08x ",p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  5192. if ((i+1) % 8 == 0) printf("\n");
  5193. }
  5194. SH2_DUMP(&sh2s[1], "slave");
  5195. printf("VBR ssh2: %x\n", sh2s[1].vbr);
  5196. for (i = 0; i < 0x60; i++) {
  5197. printf("%08x ",p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  5198. if ((i+1) % 8 == 0) printf("\n");
  5199. }
  5200. printf("stack ssh2: %x\n", sh2s[1].r[15]);
  5201. for (i = -0x30; i < 0x30; i++) {
  5202. printf("%08x ",p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  5203. if ((i+1) % 8 == 0) printf("\n");
  5204. }
  5205. #endif
  5206. }
  5207. static void bcache_stats(void)
  5208. {
  5209. #if (DRC_DEBUG & 128)
  5210. int i;
  5211. #if CALL_STACK
  5212. for (i = 1; i < ARRAY_SIZE(sh2s->rts_cache); i++)
  5213. if (sh2s[0].rts_cache[i].pc == -1 && sh2s[1].rts_cache[i].pc == -1) break;
  5214. printf("return cache hits:%d misses:%d depth: %d index: %d/%d\n", rchit, rcmiss, i,sh2s[0].rts_cache_idx,sh2s[1].rts_cache_idx);
  5215. for (i = 0; i < ARRAY_SIZE(sh2s[0].rts_cache); i++) {
  5216. printf("%08x ",sh2s[0].rts_cache[i].pc);
  5217. if ((i+1) % 8 == 0) printf("\n");
  5218. }
  5219. for (i = 0; i < ARRAY_SIZE(sh2s[1].rts_cache); i++) {
  5220. printf("%08x ",sh2s[1].rts_cache[i].pc);
  5221. if ((i+1) % 8 == 0) printf("\n");
  5222. }
  5223. #endif
  5224. #if BRANCH_CACHE
  5225. printf("branch cache hits:%d misses:%d\n", bchit, bcmiss);
  5226. printf("branch cache master:\n");
  5227. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  5228. printf("%08x ",sh2s[0].branch_cache[i].pc);
  5229. if ((i+1) % 8 == 0) printf("\n");
  5230. }
  5231. printf("branch cache slave:\n");
  5232. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  5233. printf("%08x ",sh2s[1].branch_cache[i].pc);
  5234. if ((i+1) % 8 == 0) printf("\n");
  5235. }
  5236. #endif
  5237. #endif
  5238. }
  5239. void sh2_drc_flush_all(void)
  5240. {
  5241. backtrace();
  5242. state_dump();
  5243. block_stats();
  5244. entry_stats();
  5245. bcache_stats();
  5246. dr_flush_tcache(0);
  5247. dr_flush_tcache(1);
  5248. dr_flush_tcache(2);
  5249. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  5250. }
  5251. void sh2_drc_mem_setup(SH2 *sh2)
  5252. {
  5253. // fill the DRC-only convenience pointers
  5254. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  5255. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  5256. }
  5257. int sh2_drc_init(SH2 *sh2)
  5258. {
  5259. int i;
  5260. if (block_tables[0] == NULL)
  5261. {
  5262. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5263. block_tables[i] = calloc(BLOCK_MAX_COUNT(i), sizeof(*block_tables[0]));
  5264. if (block_tables[i] == NULL)
  5265. goto fail;
  5266. entry_tables[i] = calloc(ENTRY_MAX_COUNT(i), sizeof(*entry_tables[0]));
  5267. if (entry_tables[i] == NULL)
  5268. goto fail;
  5269. block_link_pool[i] = calloc(BLOCK_LINK_MAX_COUNT(i),
  5270. sizeof(*block_link_pool[0]));
  5271. if (block_link_pool[i] == NULL)
  5272. goto fail;
  5273. inval_lookup[i] = calloc(RAM_SIZE(i) / INVAL_PAGE_SIZE,
  5274. sizeof(inval_lookup[0]));
  5275. if (inval_lookup[i] == NULL)
  5276. goto fail;
  5277. hash_tables[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*hash_tables[0]));
  5278. if (hash_tables[i] == NULL)
  5279. goto fail;
  5280. unresolved_links[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*unresolved_links[0]));
  5281. if (unresolved_links[i] == NULL)
  5282. goto fail;
  5283. //atexit(sh2_drc_finish);
  5284. RING_INIT(&block_ring[i], block_tables[i], BLOCK_MAX_COUNT(i));
  5285. RING_INIT(&entry_ring[i], entry_tables[i], ENTRY_MAX_COUNT(i));
  5286. }
  5287. block_list_pool = calloc(BLOCK_LIST_MAX_COUNT, sizeof(*block_list_pool));
  5288. if (block_list_pool == NULL)
  5289. goto fail;
  5290. block_list_pool_count = 0;
  5291. blist_free = NULL;
  5292. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  5293. memset(blink_free, 0, sizeof(blink_free));
  5294. drc_cmn_init();
  5295. rcache_init();
  5296. tcache_ptr = tcache;
  5297. sh2_generate_utils();
  5298. host_instructions_updated(tcache, tcache_ptr, 1);
  5299. emith_update_cache();
  5300. i = tcache_ptr - tcache;
  5301. RING_INIT(&tcache_ring[0], tcache_ptr, tcache_sizes[0] - i);
  5302. for (i = 1; i < ARRAY_SIZE(tcache_ring); i++) {
  5303. RING_INIT(&tcache_ring[i], tcache_ring[i-1].base + tcache_ring[i-1].size,
  5304. tcache_sizes[i]);
  5305. }
  5306. #if (DRC_DEBUG & 4)
  5307. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  5308. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5309. // disasm the utils
  5310. tcache_dsm_ptrs[0] = tcache;
  5311. do_host_disasm(0);
  5312. fflush(stdout);
  5313. #endif
  5314. #if (DRC_DEBUG & 1)
  5315. hash_collisions = 0;
  5316. #endif
  5317. }
  5318. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  5319. memset(sh2->rts_cache, -1, sizeof(sh2->rts_cache));
  5320. sh2->rts_cache_idx = 0;
  5321. return 0;
  5322. fail:
  5323. sh2_drc_finish(sh2);
  5324. return -1;
  5325. }
  5326. void sh2_drc_finish(SH2 *sh2)
  5327. {
  5328. int i;
  5329. if (block_tables[0] == NULL)
  5330. return;
  5331. #if (DRC_DEBUG & (256|512))
  5332. if (trace[0]) fclose(trace[0]);
  5333. if (trace[1]) fclose(trace[1]);
  5334. trace[0] = trace[1] = NULL;
  5335. #endif
  5336. #if (DRC_DEBUG & 4)
  5337. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5338. printf("~~~ tcache %d\n", i);
  5339. #if 0
  5340. if (tcache_ring[i].first < tcache_ring[i].next) {
  5341. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5342. tcache_ptr = tcache_ring[i].next;
  5343. do_host_disasm(i);
  5344. } else if (tcache_ring[i].used) {
  5345. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5346. tcache_ptr = tcache_ring[i].base + tcache_ring[i].size;
  5347. do_host_disasm(i);
  5348. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5349. tcache_ptr = tcache_ring[i].next;
  5350. do_host_disasm(i);
  5351. }
  5352. #endif
  5353. printf("max links: %d\n", block_link_pool_counts[i]);
  5354. }
  5355. printf("max block list: %d\n", block_list_pool_count);
  5356. #endif
  5357. sh2_drc_flush_all();
  5358. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5359. if (block_tables[i] != NULL)
  5360. free(block_tables[i]);
  5361. block_tables[i] = NULL;
  5362. if (entry_tables[i] != NULL)
  5363. free(entry_tables[i]);
  5364. entry_tables[i] = NULL;
  5365. if (block_link_pool[i] != NULL)
  5366. free(block_link_pool[i]);
  5367. block_link_pool[i] = NULL;
  5368. blink_free[i] = NULL;
  5369. if (inval_lookup[i] != NULL)
  5370. free(inval_lookup[i]);
  5371. inval_lookup[i] = NULL;
  5372. if (hash_tables[i] != NULL) {
  5373. free(hash_tables[i]);
  5374. hash_tables[i] = NULL;
  5375. }
  5376. }
  5377. if (block_list_pool != NULL)
  5378. free(block_list_pool);
  5379. block_list_pool = NULL;
  5380. blist_free = NULL;
  5381. drc_cmn_cleanup();
  5382. }
  5383. #endif /* DRC_SH2 */
  5384. static void *dr_get_pc_base(u32 pc, SH2 *sh2)
  5385. {
  5386. void *ret;
  5387. u32 mask = 0;
  5388. ret = p32x_sh2_get_mem_ptr(pc, &mask, sh2);
  5389. if (ret == (void *)-1)
  5390. return ret;
  5391. return (char *)ret - (pc & ~mask);
  5392. }
  5393. u16 scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  5394. u32 *base_literals_out, u32 *end_literals_out)
  5395. {
  5396. u16 *dr_pc_base;
  5397. u32 pc, op, tmp;
  5398. u32 end_pc, end_literals = 0;
  5399. u32 lowest_literal = 0;
  5400. u32 lowest_mova = 0;
  5401. struct op_data *opd;
  5402. int next_is_delay = 0;
  5403. int end_block = 0;
  5404. int is_divop;
  5405. int i, i_end, i_div = -1;
  5406. u32 crc = 0;
  5407. // 2nd pass stuff
  5408. int last_btarget; // loop detector
  5409. enum { T_UNKNOWN, T_CLEAR, T_SET } t; // T propagation state
  5410. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  5411. op_flags[0] |= OF_BTARGET; // block start is always a target
  5412. dr_pc_base = dr_get_pc_base(base_pc, &sh2s[!!is_slave]);
  5413. // 1st pass: disassemble
  5414. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  5415. // we need an ops[] entry after the last one initialized,
  5416. // so do it before end_block checks
  5417. opd = &ops[i];
  5418. opd->op = OP_UNHANDLED;
  5419. opd->rm = -1;
  5420. opd->source = opd->dest = 0;
  5421. opd->cycles = 1;
  5422. opd->imm = 0;
  5423. if (next_is_delay) {
  5424. op_flags[i] |= OF_DELAY_OP;
  5425. next_is_delay = 0;
  5426. }
  5427. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  5428. break;
  5429. else if ((lowest_mova && lowest_mova <= pc) ||
  5430. (lowest_literal && lowest_literal <= pc))
  5431. break; // text area collides with data area
  5432. is_divop = 0;
  5433. op = FETCH_OP(pc);
  5434. switch ((op & 0xf000) >> 12)
  5435. {
  5436. /////////////////////////////////////////////
  5437. case 0x00:
  5438. switch (op & 0x0f)
  5439. {
  5440. case 0x02:
  5441. switch (GET_Fx())
  5442. {
  5443. case 0: // STC SR,Rn 0000nnnn00000010
  5444. tmp = BITMASK2(SHR_SR, SHR_T);
  5445. break;
  5446. case 1: // STC GBR,Rn 0000nnnn00010010
  5447. tmp = BITMASK1(SHR_GBR);
  5448. break;
  5449. case 2: // STC VBR,Rn 0000nnnn00100010
  5450. tmp = BITMASK1(SHR_VBR);
  5451. break;
  5452. default:
  5453. goto undefined;
  5454. }
  5455. opd->op = OP_MOVE;
  5456. opd->source = tmp;
  5457. opd->dest = BITMASK1(GET_Rn());
  5458. break;
  5459. case 0x03:
  5460. CHECK_UNHANDLED_BITS(0xd0, undefined);
  5461. // BRAF Rm 0000mmmm00100011
  5462. // BSRF Rm 0000mmmm00000011
  5463. opd->op = OP_BRANCH_RF;
  5464. opd->rm = GET_Rn();
  5465. opd->source = BITMASK2(SHR_PC, opd->rm);
  5466. opd->dest = BITMASK1(SHR_PC);
  5467. if (!(op & 0x20))
  5468. opd->dest |= BITMASK1(SHR_PR);
  5469. opd->cycles = 2;
  5470. next_is_delay = 1;
  5471. if (!(opd->dest & BITMASK1(SHR_PR)))
  5472. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5473. else
  5474. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5475. break;
  5476. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  5477. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  5478. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  5479. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  5480. opd->dest = BITMASK1(SHR_MEM);
  5481. break;
  5482. case 0x07:
  5483. // MUL.L Rm,Rn 0000nnnnmmmm0111
  5484. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5485. opd->dest = BITMASK1(SHR_MACL);
  5486. opd->cycles = 2;
  5487. break;
  5488. case 0x08:
  5489. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5490. switch (GET_Fx())
  5491. {
  5492. case 0: // CLRT 0000000000001000
  5493. opd->op = OP_SETCLRT;
  5494. opd->dest = BITMASK1(SHR_T);
  5495. opd->imm = 0;
  5496. break;
  5497. case 1: // SETT 0000000000011000
  5498. opd->op = OP_SETCLRT;
  5499. opd->dest = BITMASK1(SHR_T);
  5500. opd->imm = 1;
  5501. break;
  5502. case 2: // CLRMAC 0000000000101000
  5503. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5504. break;
  5505. default:
  5506. goto undefined;
  5507. }
  5508. break;
  5509. case 0x09:
  5510. switch (GET_Fx())
  5511. {
  5512. case 0: // NOP 0000000000001001
  5513. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5514. break;
  5515. case 1: // DIV0U 0000000000011001
  5516. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5517. opd->op = OP_DIV0;
  5518. opd->source = BITMASK1(SHR_SR);
  5519. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5520. div(opd) = (struct div){ .rn=SHR_MEM, .rm=SHR_MEM, .ro=SHR_MEM };
  5521. i_div = i;
  5522. is_divop = 1;
  5523. break;
  5524. case 2: // MOVT Rn 0000nnnn00101001
  5525. opd->source = BITMASK1(SHR_T);
  5526. opd->dest = BITMASK1(GET_Rn());
  5527. break;
  5528. default:
  5529. goto undefined;
  5530. }
  5531. break;
  5532. case 0x0a:
  5533. switch (GET_Fx())
  5534. {
  5535. case 0: // STS MACH,Rn 0000nnnn00001010
  5536. tmp = SHR_MACH;
  5537. break;
  5538. case 1: // STS MACL,Rn 0000nnnn00011010
  5539. tmp = SHR_MACL;
  5540. break;
  5541. case 2: // STS PR,Rn 0000nnnn00101010
  5542. tmp = SHR_PR;
  5543. break;
  5544. default:
  5545. goto undefined;
  5546. }
  5547. opd->op = OP_MOVE;
  5548. opd->source = BITMASK1(tmp);
  5549. opd->dest = BITMASK1(GET_Rn());
  5550. break;
  5551. case 0x0b:
  5552. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5553. switch (GET_Fx())
  5554. {
  5555. case 0: // RTS 0000000000001011
  5556. opd->op = OP_BRANCH_R;
  5557. opd->rm = SHR_PR;
  5558. opd->source = BITMASK1(opd->rm);
  5559. opd->dest = BITMASK1(SHR_PC);
  5560. opd->cycles = 2;
  5561. next_is_delay = 1;
  5562. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5563. break;
  5564. case 1: // SLEEP 0000000000011011
  5565. opd->op = OP_SLEEP;
  5566. end_block = 1;
  5567. break;
  5568. case 2: // RTE 0000000000101011
  5569. opd->op = OP_RTE;
  5570. opd->source = BITMASK1(SHR_SP);
  5571. opd->dest = BITMASK4(SHR_SP, SHR_SR, SHR_T, SHR_PC);
  5572. opd->cycles = 4;
  5573. next_is_delay = 1;
  5574. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5575. break;
  5576. default:
  5577. goto undefined;
  5578. }
  5579. break;
  5580. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  5581. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  5582. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  5583. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  5584. opd->dest = BITMASK1(GET_Rn());
  5585. op_flags[i] |= OF_POLL_INSN;
  5586. break;
  5587. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  5588. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5589. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5590. opd->cycles = 3;
  5591. break;
  5592. default:
  5593. goto undefined;
  5594. }
  5595. break;
  5596. /////////////////////////////////////////////
  5597. case 0x01:
  5598. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  5599. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5600. opd->dest = BITMASK1(SHR_MEM);
  5601. opd->imm = (op & 0x0f) * 4;
  5602. break;
  5603. /////////////////////////////////////////////
  5604. case 0x02:
  5605. switch (op & 0x0f)
  5606. {
  5607. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  5608. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  5609. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  5610. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5611. opd->dest = BITMASK1(SHR_MEM);
  5612. break;
  5613. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  5614. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  5615. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  5616. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5617. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5618. break;
  5619. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  5620. opd->op = OP_DIV0;
  5621. opd->source = BITMASK3(SHR_SR, GET_Rm(), GET_Rn());
  5622. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5623. div(opd) = (struct div){ .rn=GET_Rn(), .rm=GET_Rm(), .ro=SHR_MEM };
  5624. i_div = i;
  5625. is_divop = 1;
  5626. break;
  5627. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  5628. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5629. opd->dest = BITMASK1(SHR_T);
  5630. break;
  5631. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  5632. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  5633. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  5634. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5635. opd->dest = BITMASK1(GET_Rn());
  5636. break;
  5637. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  5638. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5639. opd->dest = BITMASK1(SHR_T);
  5640. break;
  5641. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  5642. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5643. opd->dest = BITMASK1(GET_Rn());
  5644. break;
  5645. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  5646. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  5647. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5648. opd->dest = BITMASK1(SHR_MACL);
  5649. break;
  5650. default:
  5651. goto undefined;
  5652. }
  5653. break;
  5654. /////////////////////////////////////////////
  5655. case 0x03:
  5656. switch (op & 0x0f)
  5657. {
  5658. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  5659. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  5660. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  5661. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  5662. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  5663. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5664. opd->dest = BITMASK1(SHR_T);
  5665. break;
  5666. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  5667. opd->source = BITMASK4(GET_Rm(), GET_Rn(), SHR_SR, SHR_T);
  5668. opd->dest = BITMASK3(GET_Rn(), SHR_SR, SHR_T);
  5669. if (i_div >= 0) {
  5670. // divide operation: all DIV1 operations must use the same reg pair
  5671. if (div(&ops[i_div]).rn == SHR_MEM)
  5672. div(&ops[i_div]).rn=GET_Rn(), div(&ops[i_div]).rm=GET_Rm();
  5673. if (div(&ops[i_div]).rn == GET_Rn() && div(&ops[i_div]).rm == GET_Rm()) {
  5674. div(&ops[i_div]).div1 += 1;
  5675. div(&ops[i_div]).state = 0;
  5676. is_divop = 1;
  5677. } else {
  5678. ops[i_div].imm = 0;
  5679. i_div = -1;
  5680. }
  5681. }
  5682. break;
  5683. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  5684. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  5685. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5686. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5687. opd->cycles = 2;
  5688. break;
  5689. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  5690. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  5691. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5692. opd->dest = BITMASK1(GET_Rn());
  5693. break;
  5694. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  5695. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  5696. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  5697. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5698. break;
  5699. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  5700. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  5701. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5702. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5703. break;
  5704. default:
  5705. goto undefined;
  5706. }
  5707. break;
  5708. /////////////////////////////////////////////
  5709. case 0x04:
  5710. switch (op & 0x0f)
  5711. {
  5712. case 0x00:
  5713. switch (GET_Fx())
  5714. {
  5715. case 0: // SHLL Rn 0100nnnn00000000
  5716. case 2: // SHAL Rn 0100nnnn00100000
  5717. opd->source = BITMASK1(GET_Rn());
  5718. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5719. break;
  5720. case 1: // DT Rn 0100nnnn00010000
  5721. opd->source = BITMASK1(GET_Rn());
  5722. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5723. op_flags[i] |= OF_DELAY_INSN;
  5724. break;
  5725. default:
  5726. goto undefined;
  5727. }
  5728. break;
  5729. case 0x01:
  5730. switch (GET_Fx())
  5731. {
  5732. case 0: // SHLR Rn 0100nnnn00000001
  5733. case 2: // SHAR Rn 0100nnnn00100001
  5734. opd->source = BITMASK1(GET_Rn());
  5735. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5736. break;
  5737. case 1: // CMP/PZ Rn 0100nnnn00010001
  5738. opd->source = BITMASK1(GET_Rn());
  5739. opd->dest = BITMASK1(SHR_T);
  5740. break;
  5741. default:
  5742. goto undefined;
  5743. }
  5744. break;
  5745. case 0x02:
  5746. case 0x03:
  5747. switch (op & 0x3f)
  5748. {
  5749. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  5750. tmp = BITMASK1(SHR_MACH);
  5751. break;
  5752. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  5753. tmp = BITMASK1(SHR_MACL);
  5754. break;
  5755. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  5756. tmp = BITMASK1(SHR_PR);
  5757. break;
  5758. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  5759. tmp = BITMASK2(SHR_SR, SHR_T);
  5760. opd->cycles = 2;
  5761. break;
  5762. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  5763. tmp = BITMASK1(SHR_GBR);
  5764. opd->cycles = 2;
  5765. break;
  5766. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  5767. tmp = BITMASK1(SHR_VBR);
  5768. opd->cycles = 2;
  5769. break;
  5770. default:
  5771. goto undefined;
  5772. }
  5773. opd->source = BITMASK1(GET_Rn()) | tmp;
  5774. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5775. break;
  5776. case 0x04:
  5777. case 0x05:
  5778. switch (op & 0x3f)
  5779. {
  5780. case 0x04: // ROTL Rn 0100nnnn00000100
  5781. case 0x05: // ROTR Rn 0100nnnn00000101
  5782. opd->source = BITMASK1(GET_Rn());
  5783. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5784. break;
  5785. case 0x24: // ROTCL Rn 0100nnnn00100100
  5786. if (i_div >= 0) {
  5787. // divide operation: all ROTCL operations must use the same register
  5788. if (div(&ops[i_div]).ro == SHR_MEM)
  5789. div(&ops[i_div]).ro = GET_Rn();
  5790. if (div(&ops[i_div]).ro == GET_Rn() && !div(&ops[i_div]).state) {
  5791. div(&ops[i_div]).rotcl += 1;
  5792. div(&ops[i_div]).state = 1;
  5793. is_divop = 1;
  5794. } else {
  5795. ops[i_div].imm = 0;
  5796. i_div = -1;
  5797. }
  5798. }
  5799. case 0x25: // ROTCR Rn 0100nnnn00100101
  5800. opd->source = BITMASK2(GET_Rn(), SHR_T);
  5801. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5802. break;
  5803. case 0x15: // CMP/PL Rn 0100nnnn00010101
  5804. opd->source = BITMASK1(GET_Rn());
  5805. opd->dest = BITMASK1(SHR_T);
  5806. break;
  5807. default:
  5808. goto undefined;
  5809. }
  5810. break;
  5811. case 0x06:
  5812. case 0x07:
  5813. switch (op & 0x3f)
  5814. {
  5815. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  5816. tmp = BITMASK1(SHR_MACH);
  5817. break;
  5818. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  5819. tmp = BITMASK1(SHR_MACL);
  5820. break;
  5821. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  5822. tmp = BITMASK1(SHR_PR);
  5823. break;
  5824. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  5825. tmp = BITMASK2(SHR_SR, SHR_T);
  5826. opd->op = OP_LDC;
  5827. opd->cycles = 3;
  5828. break;
  5829. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  5830. tmp = BITMASK1(SHR_GBR);
  5831. opd->op = OP_LDC;
  5832. opd->cycles = 3;
  5833. break;
  5834. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  5835. tmp = BITMASK1(SHR_VBR);
  5836. opd->op = OP_LDC;
  5837. opd->cycles = 3;
  5838. break;
  5839. default:
  5840. goto undefined;
  5841. }
  5842. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5843. opd->dest = BITMASK1(GET_Rn()) | tmp;
  5844. break;
  5845. case 0x08:
  5846. case 0x09:
  5847. switch (GET_Fx())
  5848. {
  5849. case 0:
  5850. // SHLL2 Rn 0100nnnn00001000
  5851. // SHLR2 Rn 0100nnnn00001001
  5852. break;
  5853. case 1:
  5854. // SHLL8 Rn 0100nnnn00011000
  5855. // SHLR8 Rn 0100nnnn00011001
  5856. break;
  5857. case 2:
  5858. // SHLL16 Rn 0100nnnn00101000
  5859. // SHLR16 Rn 0100nnnn00101001
  5860. break;
  5861. default:
  5862. goto undefined;
  5863. }
  5864. opd->source = BITMASK1(GET_Rn());
  5865. opd->dest = BITMASK1(GET_Rn());
  5866. break;
  5867. case 0x0a:
  5868. switch (GET_Fx())
  5869. {
  5870. case 0: // LDS Rm,MACH 0100mmmm00001010
  5871. tmp = SHR_MACH;
  5872. break;
  5873. case 1: // LDS Rm,MACL 0100mmmm00011010
  5874. tmp = SHR_MACL;
  5875. break;
  5876. case 2: // LDS Rm,PR 0100mmmm00101010
  5877. tmp = SHR_PR;
  5878. break;
  5879. default:
  5880. goto undefined;
  5881. }
  5882. opd->op = OP_MOVE;
  5883. opd->source = BITMASK1(GET_Rn());
  5884. opd->dest = BITMASK1(tmp);
  5885. break;
  5886. case 0x0b:
  5887. switch (GET_Fx())
  5888. {
  5889. case 0: // JSR @Rm 0100mmmm00001011
  5890. opd->dest = BITMASK1(SHR_PR);
  5891. case 2: // JMP @Rm 0100mmmm00101011
  5892. opd->op = OP_BRANCH_R;
  5893. opd->rm = GET_Rn();
  5894. opd->source = BITMASK1(opd->rm);
  5895. opd->dest |= BITMASK1(SHR_PC);
  5896. opd->cycles = 2;
  5897. next_is_delay = 1;
  5898. if (!(opd->dest & BITMASK1(SHR_PR)))
  5899. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5900. else
  5901. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5902. break;
  5903. case 1: // TAS.B @Rn 0100nnnn00011011
  5904. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5905. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  5906. opd->cycles = 4;
  5907. break;
  5908. default:
  5909. goto undefined;
  5910. }
  5911. break;
  5912. case 0x0e:
  5913. switch (GET_Fx())
  5914. {
  5915. case 0: // LDC Rm,SR 0100mmmm00001110
  5916. tmp = BITMASK2(SHR_SR, SHR_T);
  5917. break;
  5918. case 1: // LDC Rm,GBR 0100mmmm00011110
  5919. tmp = BITMASK1(SHR_GBR);
  5920. break;
  5921. case 2: // LDC Rm,VBR 0100mmmm00101110
  5922. tmp = BITMASK1(SHR_VBR);
  5923. break;
  5924. default:
  5925. goto undefined;
  5926. }
  5927. opd->op = OP_LDC;
  5928. opd->source = BITMASK1(GET_Rn());
  5929. opd->dest = tmp;
  5930. break;
  5931. case 0x0f:
  5932. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  5933. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5934. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5935. opd->cycles = 3;
  5936. break;
  5937. default:
  5938. goto undefined;
  5939. }
  5940. break;
  5941. /////////////////////////////////////////////
  5942. case 0x05:
  5943. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  5944. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5945. opd->dest = BITMASK1(GET_Rn());
  5946. opd->imm = (op & 0x0f) * 4;
  5947. op_flags[i] |= OF_POLL_INSN;
  5948. break;
  5949. /////////////////////////////////////////////
  5950. case 0x06:
  5951. switch (op & 0x0f)
  5952. {
  5953. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  5954. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  5955. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  5956. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  5957. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5958. break;
  5959. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  5960. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  5961. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  5962. opd->dest = BITMASK1(GET_Rn());
  5963. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5964. op_flags[i] |= OF_POLL_INSN;
  5965. break;
  5966. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  5967. opd->source = BITMASK2(GET_Rm(), SHR_T);
  5968. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5969. break;
  5970. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  5971. opd->op = OP_MOVE;
  5972. goto arith_rmrn;
  5973. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  5974. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  5975. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  5976. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  5977. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  5978. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  5979. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  5980. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  5981. arith_rmrn:
  5982. opd->source = BITMASK1(GET_Rm());
  5983. opd->dest = BITMASK1(GET_Rn());
  5984. break;
  5985. }
  5986. break;
  5987. /////////////////////////////////////////////
  5988. case 0x07:
  5989. // ADD #imm,Rn 0111nnnniiiiiiii
  5990. opd->source = opd->dest = BITMASK1(GET_Rn());
  5991. opd->imm = (s8)op;
  5992. break;
  5993. /////////////////////////////////////////////
  5994. case 0x08:
  5995. switch (op & 0x0f00)
  5996. {
  5997. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  5998. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  5999. opd->dest = BITMASK1(SHR_MEM);
  6000. opd->imm = (op & 0x0f);
  6001. break;
  6002. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  6003. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  6004. opd->dest = BITMASK1(SHR_MEM);
  6005. opd->imm = (op & 0x0f) * 2;
  6006. break;
  6007. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  6008. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6009. opd->dest = BITMASK1(SHR_R0);
  6010. opd->imm = (op & 0x0f);
  6011. op_flags[i] |= OF_POLL_INSN;
  6012. break;
  6013. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  6014. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6015. opd->dest = BITMASK1(SHR_R0);
  6016. opd->imm = (op & 0x0f) * 2;
  6017. op_flags[i] |= OF_POLL_INSN;
  6018. break;
  6019. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  6020. opd->source = BITMASK1(SHR_R0);
  6021. opd->dest = BITMASK1(SHR_T);
  6022. opd->imm = (s8)op;
  6023. break;
  6024. case 0x0d00: // BT/S label 10001101dddddddd
  6025. case 0x0f00: // BF/S label 10001111dddddddd
  6026. next_is_delay = 1;
  6027. // fallthrough
  6028. case 0x0900: // BT label 10001001dddddddd
  6029. case 0x0b00: // BF label 10001011dddddddd
  6030. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  6031. opd->source = BITMASK2(SHR_PC, SHR_T);
  6032. opd->dest = BITMASK1(SHR_PC);
  6033. opd->imm = ((signed int)(op << 24) >> 23);
  6034. opd->imm += pc + 4;
  6035. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  6036. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6037. break;
  6038. default:
  6039. goto undefined;
  6040. }
  6041. break;
  6042. /////////////////////////////////////////////
  6043. case 0x09:
  6044. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  6045. opd->op = OP_LOAD_POOL;
  6046. tmp = pc + 2;
  6047. if (op_flags[i] & OF_DELAY_OP) {
  6048. if (ops[i-1].op == OP_BRANCH)
  6049. tmp = ops[i-1].imm;
  6050. else if (ops[i-1].op != OP_BRANCH_N)
  6051. tmp = 0;
  6052. }
  6053. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6054. opd->dest = BITMASK1(GET_Rn());
  6055. if (tmp) {
  6056. opd->imm = tmp + 2 + (op & 0xff) * 2;
  6057. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6058. lowest_literal = opd->imm;
  6059. }
  6060. opd->size = 1;
  6061. break;
  6062. /////////////////////////////////////////////
  6063. case 0x0b:
  6064. // BSR label 1011dddddddddddd
  6065. opd->dest = BITMASK1(SHR_PR);
  6066. case 0x0a:
  6067. // BRA label 1010dddddddddddd
  6068. opd->op = OP_BRANCH;
  6069. opd->source = BITMASK1(SHR_PC);
  6070. opd->dest |= BITMASK1(SHR_PC);
  6071. opd->imm = ((signed int)(op << 20) >> 19);
  6072. opd->imm += pc + 4;
  6073. opd->cycles = 2;
  6074. next_is_delay = 1;
  6075. if (!(opd->dest & BITMASK1(SHR_PR))) {
  6076. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  6077. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6078. if (opd->imm <= pc)
  6079. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6080. } else
  6081. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6082. } else
  6083. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  6084. break;
  6085. /////////////////////////////////////////////
  6086. case 0x0c:
  6087. switch (op & 0x0f00)
  6088. {
  6089. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  6090. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  6091. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  6092. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  6093. opd->dest = BITMASK1(SHR_MEM);
  6094. opd->size = (op & 0x300) >> 8;
  6095. opd->imm = (op & 0xff) << opd->size;
  6096. break;
  6097. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  6098. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  6099. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  6100. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  6101. opd->dest = BITMASK1(SHR_R0);
  6102. opd->size = (op & 0x300) >> 8;
  6103. opd->imm = (op & 0xff) << opd->size;
  6104. op_flags[i] |= OF_POLL_INSN;
  6105. break;
  6106. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  6107. opd->op = OP_TRAPA;
  6108. opd->source = BITMASK4(SHR_SP, SHR_PC, SHR_SR, SHR_T);
  6109. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  6110. opd->imm = (op & 0xff);
  6111. opd->cycles = 8;
  6112. op_flags[i+1] |= OF_BTARGET;
  6113. break;
  6114. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  6115. opd->op = OP_MOVA;
  6116. tmp = pc + 2;
  6117. if (op_flags[i] & OF_DELAY_OP) {
  6118. if (ops[i-1].op == OP_BRANCH)
  6119. tmp = ops[i-1].imm;
  6120. else if (ops[i-1].op != OP_BRANCH_N)
  6121. tmp = 0;
  6122. }
  6123. opd->dest = BITMASK1(SHR_R0);
  6124. if (tmp) {
  6125. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6126. if (opd->imm >= base_pc) {
  6127. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6128. lowest_mova = opd->imm;
  6129. }
  6130. }
  6131. break;
  6132. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  6133. opd->source = BITMASK1(SHR_R0);
  6134. opd->dest = BITMASK1(SHR_T);
  6135. opd->imm = op & 0xff;
  6136. break;
  6137. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  6138. opd->source = opd->dest = BITMASK1(SHR_R0);
  6139. opd->imm = op & 0xff;
  6140. break;
  6141. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  6142. opd->source = opd->dest = BITMASK1(SHR_R0);
  6143. opd->imm = op & 0xff;
  6144. break;
  6145. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  6146. opd->source = opd->dest = BITMASK1(SHR_R0);
  6147. opd->imm = op & 0xff;
  6148. break;
  6149. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  6150. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6151. opd->dest = BITMASK1(SHR_T);
  6152. opd->imm = op & 0xff;
  6153. op_flags[i] |= OF_POLL_INSN;
  6154. opd->cycles = 3;
  6155. break;
  6156. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  6157. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  6158. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  6159. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6160. opd->dest = BITMASK1(SHR_MEM);
  6161. opd->imm = op & 0xff;
  6162. opd->cycles = 3;
  6163. break;
  6164. default:
  6165. goto undefined;
  6166. }
  6167. break;
  6168. /////////////////////////////////////////////
  6169. case 0x0d:
  6170. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  6171. opd->op = OP_LOAD_POOL;
  6172. tmp = pc + 2;
  6173. if (op_flags[i] & OF_DELAY_OP) {
  6174. if (ops[i-1].op == OP_BRANCH)
  6175. tmp = ops[i-1].imm;
  6176. else if (ops[i-1].op != OP_BRANCH_N)
  6177. tmp = 0;
  6178. }
  6179. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6180. opd->dest = BITMASK1(GET_Rn());
  6181. if (tmp) {
  6182. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6183. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6184. lowest_literal = opd->imm;
  6185. }
  6186. opd->size = 2;
  6187. break;
  6188. /////////////////////////////////////////////
  6189. case 0x0e:
  6190. // MOV #imm,Rn 1110nnnniiiiiiii
  6191. opd->op = OP_LOAD_CONST;
  6192. opd->dest = BITMASK1(GET_Rn());
  6193. opd->imm = (s8)op;
  6194. break;
  6195. default:
  6196. undefined:
  6197. opd->op = OP_UNDEFINED;
  6198. // an unhandled instruction is probably not code if it's not the 1st insn
  6199. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  6200. goto end;
  6201. break;
  6202. }
  6203. if (op_flags[i] & OF_DELAY_OP) {
  6204. switch (opd->op) {
  6205. case OP_BRANCH:
  6206. case OP_BRANCH_N:
  6207. case OP_BRANCH_CT:
  6208. case OP_BRANCH_CF:
  6209. case OP_BRANCH_R:
  6210. case OP_BRANCH_RF:
  6211. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  6212. is_slave ? 's' : 'm', pc);
  6213. opd->op = OP_UNDEFINED;
  6214. op_flags[i] |= OF_B_IN_DS;
  6215. next_is_delay = 0;
  6216. break;
  6217. }
  6218. } else if (!is_divop && i_div >= 0)
  6219. i_div = -1; // divide parser stop
  6220. }
  6221. end:
  6222. i_end = i;
  6223. end_pc = pc;
  6224. // 2nd pass: some analysis
  6225. lowest_literal = end_literals = lowest_mova = 0;
  6226. t = T_UNKNOWN; // T flag state
  6227. last_btarget = 0;
  6228. op = 0; // delay/poll insns counter
  6229. is_divop = 0; // divide op insns counter
  6230. i_div = -1; // index of current divide op
  6231. for (i = 0, pc = base_pc; i < i_end; i++, pc += 2) {
  6232. opd = &ops[i];
  6233. crc += FETCH_OP(pc);
  6234. // propagate T (TODO: DIV0U)
  6235. if (op_flags[i] & OF_BTARGET)
  6236. t = T_UNKNOWN;
  6237. if ((opd->op == OP_BRANCH_CT && t == T_SET) ||
  6238. (opd->op == OP_BRANCH_CF && t == T_CLEAR)) {
  6239. opd->op = OP_BRANCH;
  6240. opd->cycles = (op_flags[i + 1] & OF_DELAY_OP) ? 2 : 3;
  6241. } else if ((opd->op == OP_BRANCH_CT && t == T_CLEAR) ||
  6242. (opd->op == OP_BRANCH_CF && t == T_SET))
  6243. opd->op = OP_BRANCH_N;
  6244. else if (OP_ISBRACND(opd->op))
  6245. t = (opd->op == OP_BRANCH_CF ? T_SET : T_CLEAR);
  6246. else if (opd->op == OP_SETCLRT)
  6247. t = (opd->imm ? T_SET : T_CLEAR);
  6248. else if (opd->dest & BITMASK1(SHR_T))
  6249. t = T_UNKNOWN;
  6250. // "overscan" detection: unreachable code after unconditional branch
  6251. // this can happen if the insn after a forward branch isn't a local target
  6252. if (OP_ISBRAUC(opd->op)) {
  6253. if (op_flags[i + 1] & OF_DELAY_OP) {
  6254. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  6255. i_end = i + 2;
  6256. } else {
  6257. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  6258. i_end = i + 1;
  6259. }
  6260. }
  6261. // divide operation verification:
  6262. // 1. there must not be a branch target inside
  6263. // 2. nothing is in a delay slot (could only be DIV0)
  6264. // 2. DIV0/n*(ROTCL+DIV1)/ROTCL:
  6265. // div.div1 > 0 && div.rotcl == div.div1+1 && div.rn =! div.ro
  6266. // 3. DIV0/n*DIV1/ROTCL:
  6267. // div.div1 > 0 && div.rotcl == 1 && div.ro == div.rn
  6268. if (i_div >= 0) {
  6269. if (op_flags[i] & OF_BTARGET) { // condition 1
  6270. ops[i_div].imm = 0;
  6271. i_div = -1;
  6272. } else if (--is_divop == 0)
  6273. i_div = -1;
  6274. } else if (opd->op == OP_DIV0) {
  6275. struct div *div = &div(opd);
  6276. is_divop = div->div1 + div->rotcl;
  6277. if (op_flags[i] & OF_DELAY_OP) // condition 2
  6278. opd->imm = 0;
  6279. else if (! div->div1 || ! ((div->ro == div->rn && div->rotcl == 1) ||
  6280. (div->ro != div->rn && div->rotcl == div->div1+1)))
  6281. opd->imm = 0; // condition 3+4
  6282. else if (is_divop)
  6283. i_div = i;
  6284. }
  6285. // literal pool size detection
  6286. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  6287. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6288. lowest_mova = opd->imm;
  6289. if (opd->op == OP_LOAD_POOL) {
  6290. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  6291. if (end_literals < opd->imm + opd->size * 2)
  6292. end_literals = opd->imm + opd->size * 2;
  6293. if (lowest_literal == 0 || lowest_literal > opd->imm)
  6294. lowest_literal = opd->imm;
  6295. if (opd->size == 2) {
  6296. // tweak for NFL: treat a 32bit literal as an address and check if it
  6297. // points to the literal space. In that case handle it like MOVA.
  6298. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  6299. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  6300. if (lowest_mova == 0 || tmp < lowest_mova)
  6301. lowest_mova = tmp;
  6302. }
  6303. }
  6304. }
  6305. #if LOOP_DETECTION
  6306. // inner loop detection
  6307. // 1. a loop always starts with a branch target (for the backwards jump)
  6308. // 2. it doesn't contain more than one polling and/or delaying insn
  6309. // 3. it doesn't contain unconditional jumps
  6310. // 4. no overlapping of loops
  6311. if (op_flags[i] & OF_BTARGET) {
  6312. last_btarget = i; // possible loop starting point
  6313. op = 0;
  6314. }
  6315. // XXX let's hope nobody is putting a delay or poll insn in a delay slot :-/
  6316. if (OP_ISBRAIMM(opd->op)) {
  6317. // BSR, BRA, BT, BF with immediate target
  6318. int i_tmp = (opd->imm - base_pc) / 2; // branch target, index in ops
  6319. if (i_tmp == last_btarget) // candidate for basic loop optimizer
  6320. op_flags[i_tmp] |= OF_BASIC_LOOP;
  6321. if (i_tmp == last_btarget && op <= 1) {
  6322. op_flags[i_tmp] |= OF_LOOP; // conditions met -> mark loop
  6323. last_btarget = i+1; // condition 4
  6324. } else if (opd->op == OP_BRANCH)
  6325. last_btarget = i+1; // condition 3
  6326. }
  6327. else if (OP_ISBRAIND(opd->op))
  6328. // BRAF, BSRF, JMP, JSR, register indirect. treat it as off-limits jump
  6329. last_btarget = i+1; // condition 3
  6330. else if (op_flags[i] & (OF_POLL_INSN|OF_DELAY_INSN))
  6331. op ++; // condition 2
  6332. #endif
  6333. }
  6334. end_pc = pc;
  6335. // end_literals is used to decide to inline a literal or not
  6336. // XXX: need better detection if this actually is used in write
  6337. if (lowest_literal >= base_pc) {
  6338. if (lowest_literal < end_pc) {
  6339. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  6340. // TODO: does this always mean end_pc covers data?
  6341. }
  6342. }
  6343. if (lowest_mova >= base_pc) {
  6344. if (lowest_mova < end_literals) {
  6345. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  6346. end_literals = lowest_mova;
  6347. }
  6348. if (lowest_mova < end_pc) {
  6349. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  6350. end_literals = end_pc;
  6351. }
  6352. }
  6353. if (lowest_literal >= end_literals)
  6354. lowest_literal = end_literals;
  6355. if (lowest_literal && end_literals)
  6356. for (pc = lowest_literal; pc < end_literals; pc += 2)
  6357. crc += FETCH_OP(pc);
  6358. *end_pc_out = end_pc;
  6359. if (base_literals_out != NULL)
  6360. *base_literals_out = (lowest_literal ? lowest_literal : end_pc);
  6361. if (end_literals_out != NULL)
  6362. *end_literals_out = (end_literals ? end_literals : end_pc);
  6363. // crc overflow handling, twice to collect all overflows
  6364. crc = (crc & 0xffff) + (crc >> 16);
  6365. crc = (crc & 0xffff) + (crc >> 16);
  6366. return crc;
  6367. }
  6368. // vim:shiftwidth=2:ts=2:expandtab