compiler.c 203 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. * (C) kub, 2018,2019
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. *
  9. * notes:
  10. * - tcache, block descriptor, block entry buffer overflows result in oldest
  11. * blocks being deleted until enough space is available
  12. * - link and list element buffer overflows result in failure and exit
  13. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  14. * except jumps from global to CPU-local tcaches
  15. *
  16. * implemented:
  17. * - static register allocation
  18. * - remaining register caching and tracking in temporaries
  19. * - block-local branch linking
  20. * - block linking
  21. * - some constant propagation
  22. * - call stack caching for host block entry address
  23. * - delay, poll, and idle loop detection and handling
  24. * - some T/M flag optimizations where the value is known or isn't used
  25. *
  26. * TODO:
  27. * - better constant propagation
  28. * - bug fixing
  29. */
  30. #include <stddef.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <assert.h>
  34. #include "../../pico/pico_int.h"
  35. #include "../../pico/arm_features.h"
  36. #include "sh2.h"
  37. #include "compiler.h"
  38. #include "../drc/cmn.h"
  39. #include "../debug.h"
  40. // features
  41. #define PROPAGATE_CONSTANTS 1
  42. #define LINK_BRANCHES 1
  43. #define BRANCH_CACHE 1
  44. #define CALL_STACK 1
  45. #define ALIAS_REGISTERS 1
  46. #define REMAP_REGISTER 1
  47. #define LOOP_DETECTION 1
  48. #define LOOP_OPTIMIZER 1
  49. #define T_OPTIMIZER 1
  50. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  51. #define MAX_LOCAL_TARGETS (BLOCK_INSN_LIMIT / 4)
  52. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 2)
  53. // debug stuff
  54. // 01 - warnings/errors
  55. // 02 - block info/smc
  56. // 04 - asm
  57. // 08 - runtime block entry log
  58. // 10 - smc self-check
  59. // 20 - runtime block entry counter
  60. // 40 - rcache checking
  61. // 80 - branch cache statistics
  62. // 100 - write trace
  63. // 200 - compare trace
  64. // 400 - block entry backtrace on exit
  65. // 800 - state dump on exit
  66. // {
  67. #ifndef DRC_DEBUG
  68. #define DRC_DEBUG 0//x847
  69. #endif
  70. #if DRC_DEBUG
  71. #define dbg(l,...) { \
  72. if ((l) & DRC_DEBUG) \
  73. elprintf(EL_STATUS, ##__VA_ARGS__); \
  74. }
  75. #include "mame/sh2dasm.h"
  76. #include <platform/libpicofe/linux/host_dasm.h>
  77. static int insns_compiled, hash_collisions, host_insn_count;
  78. #define COUNT_OP \
  79. host_insn_count++
  80. #else // !DRC_DEBUG
  81. #define COUNT_OP
  82. #define dbg(...)
  83. #endif
  84. ///
  85. #define FETCH_OP(pc) \
  86. dr_pc_base[(pc) / 2]
  87. #define FETCH32(a) \
  88. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  89. #define CHECK_UNHANDLED_BITS(mask, label) { \
  90. if ((op & (mask)) != 0) \
  91. goto label; \
  92. }
  93. #define GET_Fx() \
  94. ((op >> 4) & 0x0f)
  95. #define GET_Rm GET_Fx
  96. #define GET_Rn() \
  97. ((op >> 8) & 0x0f)
  98. #define SHR_T 30 // separate T for not-used detection
  99. #define SHR_MEM 31
  100. #define SHR_TMP -1
  101. #define T 0x00000001
  102. #define S 0x00000002
  103. #define I 0x000000f0
  104. #define Q 0x00000100
  105. #define M 0x00000200
  106. #define T_save 0x00000800
  107. #define I_SHIFT 4
  108. #define Q_SHIFT 8
  109. #define M_SHIFT 9
  110. #define T_SHIFT 11
  111. static struct op_data {
  112. u8 op;
  113. u8 cycles;
  114. u8 size; // 0, 1, 2 - byte, word, long
  115. s8 rm; // branch or load/store data reg
  116. u32 source; // bitmask of src regs
  117. u32 dest; // bitmask of dest regs
  118. u32 imm; // immediate/io address/branch target
  119. // (for literal - address, not value)
  120. } ops[BLOCK_INSN_LIMIT];
  121. enum op_types {
  122. OP_UNHANDLED = 0,
  123. OP_BRANCH,
  124. OP_BRANCH_N, // conditional known not to be taken
  125. OP_BRANCH_CT, // conditional, branch if T set
  126. OP_BRANCH_CF, // conditional, branch if T clear
  127. OP_BRANCH_R, // indirect
  128. OP_BRANCH_RF, // indirect far (PC + Rm)
  129. OP_SETCLRT, // T flag set/clear
  130. OP_MOVE, // register move
  131. OP_LOAD_CONST,// load const to register
  132. OP_LOAD_POOL, // literal pool load, imm is address
  133. OP_MOVA, // MOVA instruction
  134. OP_SLEEP, // SLEEP instruction
  135. OP_RTE, // RTE instruction
  136. OP_TRAPA, // TRAPA instruction
  137. OP_LDC, // LDC instruction
  138. OP_UNDEFINED,
  139. };
  140. // XXX consider trap insns: OP_TRAPA, OP_UNDEFINED?
  141. #define OP_ISBRANCH(op) ((BITRANGE(OP_BRANCH, OP_BRANCH_RF)| BITMASK1(OP_RTE)) \
  142. & BITMASK1(op))
  143. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  144. & BITMASK1(op))
  145. #define OP_ISBRACND(op) (BITMASK2(OP_BRANCH_CT, OP_BRANCH_CF) \
  146. & BITMASK1(op))
  147. #define OP_ISBRAIMM(op) (BITMASK3(OP_BRANCH, OP_BRANCH_CT, OP_BRANCH_CF) \
  148. & BITMASK1(op))
  149. #define OP_ISBRAIND(op) (BITMASK3(OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  150. & BITMASK1(op))
  151. #ifdef DRC_SH2
  152. #if (DRC_DEBUG & 4)
  153. static u8 *tcache_dsm_ptrs[3];
  154. static char sh2dasm_buff[64];
  155. #define do_host_disasm(tcid) \
  156. host_dasm(tcache_dsm_ptrs[tcid], emith_insn_ptr() - tcache_dsm_ptrs[tcid]); \
  157. tcache_dsm_ptrs[tcid] = emith_insn_ptr()
  158. #else
  159. #define do_host_disasm(x)
  160. #endif
  161. #define SH2_DUMP(sh2, reason) { \
  162. char ms = (sh2)->is_slave ? 's' : 'm'; \
  163. printf("%csh2 %s %08x\n", ms, reason, (sh2)->pc); \
  164. printf("%csh2 r0-7 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  165. (sh2)->r[0], (sh2)->r[1], (sh2)->r[2], (sh2)->r[3], \
  166. (sh2)->r[4], (sh2)->r[5], (sh2)->r[6], (sh2)->r[7]); \
  167. printf("%csh2 r8-15 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  168. (sh2)->r[8], (sh2)->r[9], (sh2)->r[10], (sh2)->r[11], \
  169. (sh2)->r[12], (sh2)->r[13], (sh2)->r[14], (sh2)->r[15]); \
  170. printf("%csh2 pc-ml %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  171. (sh2)->pc, (sh2)->ppc, (sh2)->pr, (sh2)->sr&0x3ff, \
  172. (sh2)->gbr, (sh2)->vbr, (sh2)->mach, (sh2)->macl); \
  173. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  174. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  175. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  176. (sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  177. }
  178. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  179. #if (DRC_DEBUG & (256|512|1024))
  180. static SH2 csh2[2][8];
  181. static FILE *trace[2];
  182. #endif
  183. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  184. {
  185. if (block != NULL) {
  186. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  187. sh2->pc, block, (signed int)sr >> 12);
  188. #if defined PDB
  189. pdb_step(sh2, sh2->pc);
  190. #elif (DRC_DEBUG & 256)
  191. {
  192. int idx = sh2->is_slave;
  193. if (!trace[0]) {
  194. trace[0] = fopen("pico.trace0", "wb");
  195. trace[1] = fopen("pico.trace1", "wb");
  196. }
  197. if (csh2[idx][0].pc != sh2->pc) {
  198. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  199. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  200. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  201. csh2[idx][0].is_slave = idx;
  202. }
  203. }
  204. #elif (DRC_DEBUG & 512)
  205. {
  206. static SH2 fsh2;
  207. int idx = sh2->is_slave;
  208. if (!trace[0]) {
  209. trace[0] = fopen("pico.trace0", "rb");
  210. trace[1] = fopen("pico.trace1", "rb");
  211. }
  212. if (csh2[idx][0].pc != sh2->pc) {
  213. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  214. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  215. printf("trace eof at %08lx\n",ftell(trace[idx]));
  216. exit(1);
  217. }
  218. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  219. fsh2.is_slave = idx;
  220. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  221. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  222. {
  223. printf("difference at %08lx!\n",ftell(trace[idx]));
  224. SH2_DUMP(&fsh2, "file");
  225. SH2_DUMP(sh2, "current");
  226. SH2_DUMP(&csh2[idx][0], "previous");
  227. exit(1);
  228. }
  229. csh2[idx][0] = fsh2;
  230. }
  231. }
  232. #elif (DRC_DEBUG & 1024)
  233. {
  234. int x = sh2->is_slave, i;
  235. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  236. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  237. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  238. csh2[x][0].is_slave = x;
  239. }
  240. #endif
  241. }
  242. return block;
  243. }
  244. #endif
  245. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  246. // BIOS shares tcache with data array because it's only used for init
  247. // and can be discarded early
  248. #define TCACHE_BUFFERS 3
  249. struct ring_buffer {
  250. u8 *base; // ring buffer memory
  251. unsigned item_sz; // size of one buffer item
  252. unsigned size; // number of itmes in ring
  253. int first, next; // read and write pointers
  254. int used; // number of used items in ring
  255. };
  256. enum { BL_JMP=1, BL_LDJMP, BL_JCCBLX };
  257. struct block_link {
  258. short tcache_id;
  259. short type; // BL_JMP et al
  260. u32 target_pc;
  261. void *jump; // insn address
  262. void *blx; // block link/exit area if any
  263. u8 jdisp[12]; // jump backup buffer
  264. struct block_link *next; // either in block_entry->links or unresolved
  265. struct block_link *o_next; // ...in block_entry->o_links
  266. struct block_link *prev;
  267. struct block_link *o_prev;
  268. struct block_entry *target;// target block this is linked in (be->links)
  269. };
  270. struct block_entry {
  271. u32 pc;
  272. u8 *tcache_ptr; // translated block for above PC
  273. struct block_entry *next; // chain in hash_table with same pc hash
  274. struct block_entry *prev;
  275. struct block_link *links; // incoming links to this entry
  276. struct block_link *o_links;// outgoing links from this entry
  277. #if (DRC_DEBUG & 2)
  278. struct block_desc *block;
  279. #endif
  280. #if (DRC_DEBUG & 32)
  281. int entry_count;
  282. #endif
  283. };
  284. struct block_desc {
  285. u32 addr; // block start SH2 PC address
  286. u32 addr_lit; // block start SH2 literal pool addr
  287. int size; // ..of recompiled insns
  288. int size_lit; // ..of (insns+)literal pool
  289. u8 *tcache_ptr; // start address of block in cache
  290. u16 crc; // crc of insns and literals
  291. u16 active; // actively used or deactivated?
  292. struct block_list *list;
  293. #if (DRC_DEBUG & 2)
  294. int refcount;
  295. #endif
  296. int entry_count;
  297. struct block_entry *entryp;
  298. };
  299. struct block_list {
  300. struct block_desc *block; // block reference
  301. struct block_list *next; // pointers for doubly linked list
  302. struct block_list *prev;
  303. struct block_list **head; // list head (for removing from list)
  304. struct block_list *l_next;
  305. };
  306. static u8 *tcache_ptr; // ptr for code emitters
  307. // XXX: need to tune sizes
  308. static struct ring_buffer tcache_ring[TCACHE_BUFFERS];
  309. static const int tcache_sizes[TCACHE_BUFFERS] = {
  310. DRC_TCACHE_SIZE * 30 / 32, // ROM (rarely used), DRAM
  311. DRC_TCACHE_SIZE / 32, // BIOS, data array in master sh2
  312. DRC_TCACHE_SIZE / 32, // ... slave
  313. };
  314. #define BLOCK_MAX_COUNT(tcid) ((tcid) ? 256 : 32*256)
  315. static struct ring_buffer block_ring[TCACHE_BUFFERS];
  316. static struct block_desc *block_tables[TCACHE_BUFFERS];
  317. #define ENTRY_MAX_COUNT(tcid) ((tcid) ? 8*512 : 256*512)
  318. static struct ring_buffer entry_ring[TCACHE_BUFFERS];
  319. static struct block_entry *entry_tables[TCACHE_BUFFERS];
  320. // we have block_link_pool to avoid using mallocs
  321. #define BLOCK_LINK_MAX_COUNT(tcid) ((tcid) ? 512 : 32*512)
  322. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  323. static int block_link_pool_counts[TCACHE_BUFFERS];
  324. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  325. static struct block_link *blink_free[TCACHE_BUFFERS];
  326. // used for invalidation
  327. #define RAM_SIZE(tcid) ((tcid) ? 0x1000 : 0x40000)
  328. #define INVAL_PAGE_SIZE 0x100
  329. static struct block_list *inactive_blocks[TCACHE_BUFFERS];
  330. // array of pointers to block_lists for RAM and 2 data arrays
  331. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  332. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  333. #define HASH_TABLE_SIZE(tcid) ((tcid) ? 512 : 64*512)
  334. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  335. #define HASH_FUNC(hash_tab, addr, mask) \
  336. (hash_tab)[((addr) >> 1) & (mask)]
  337. #define BLOCK_LIST_MAX_COUNT (64*1024)
  338. static struct block_list *block_list_pool;
  339. static int block_list_pool_count;
  340. static struct block_list *blist_free;
  341. #if (DRC_DEBUG & 128)
  342. #if BRANCH_CACHE
  343. int bchit, bcmiss;
  344. #endif
  345. #if CALL_STACK
  346. int rchit, rcmiss;
  347. #endif
  348. #endif
  349. // host register tracking
  350. enum cache_reg_htype {
  351. HRT_TEMP = 1, // is for temps and args
  352. HRT_REG = 2, // is for sh2 regs
  353. HRT_STATIC = 2, // is for static mappings (same as HRT_REG)
  354. };
  355. enum cache_reg_flags {
  356. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  357. HRF_PINNED = 1 << 1, // has a pinned mapping
  358. };
  359. enum cache_reg_type {
  360. HR_FREE,
  361. HR_CACHED, // vreg has sh2_reg_e
  362. HR_TEMP, // reg used for temp storage
  363. };
  364. typedef struct {
  365. u8 hreg; // "host" reg
  366. u8 htype:2; // TEMP or REG?
  367. u8 flags:2; // DIRTY, PINNED?
  368. u8 type:2; // CACHED or TEMP?
  369. u8 locked:2; // LOCKED reference counter
  370. u16 stamp; // kind of a timestamp
  371. u32 gregs; // "guest" reg mask
  372. } cache_reg_t;
  373. // guest register tracking
  374. enum guest_reg_flags {
  375. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  376. GRF_CONST = 1 << 1, // reg has a constant
  377. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  378. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  379. GRF_PINNED = 1 << 4, // reg has pinned mapping to vreg
  380. };
  381. typedef struct {
  382. u8 flags; // guest flags: is constant, is dirty?
  383. s8 sreg; // cache reg for static mapping
  384. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  385. s8 cnst; // const index if this is constant
  386. } guest_reg_t;
  387. // possibly needed in code emitter
  388. static int rcache_get_tmp(void);
  389. static void rcache_free_tmp(int hr);
  390. // Note: Register assignment goes by ABI convention. Caller save registers are
  391. // TEMPORARY, callee save registers are PRESERVED. Unusable regs are omitted.
  392. // there must be at least the free (not context or statically mapped) amount of
  393. // PRESERVED/TEMPORARY registers used by handlers in worst case (currently 4).
  394. // there must be at least 3 PARAM, and PARAM+TEMPORARY must be at least 4.
  395. // SR must and R0 should by all means be statically mapped.
  396. // XXX the static definition of SR MUST match that in compiler.h
  397. #ifdef __arm__
  398. #include "../drc/emit_arm.c"
  399. #elif defined(__aarch64__)
  400. #include "../drc/emit_arm64.c"
  401. #elif defined(__mips__)
  402. #include "../drc/emit_mips.c"
  403. #elif defined(__riscv__) || defined(__riscv)
  404. #include "../drc/emit_riscv.c"
  405. #elif defined(__i386__)
  406. #include "../drc/emit_x86.c"
  407. #elif defined(__x86_64__)
  408. #include "../drc/emit_x86.c"
  409. #else
  410. #error unsupported arch
  411. #endif
  412. static const signed char hregs_param[] = PARAM_REGS;
  413. static const signed char hregs_temp [] = TEMPORARY_REGS;
  414. static const signed char hregs_saved[] = PRESERVED_REGS;
  415. static const signed char regs_static[] = STATIC_SH2_REGS;
  416. #define CACHE_REGS \
  417. (ARRAY_SIZE(hregs_param)+ARRAY_SIZE(hregs_temp)+ARRAY_SIZE(hregs_saved)-1)
  418. static cache_reg_t cache_regs[CACHE_REGS];
  419. static signed char reg_map_host[HOST_REGS];
  420. static guest_reg_t guest_regs[SH2_REGS];
  421. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  422. static void REGPARM(1) (*sh2_drc_dispatcher)(u32 pc);
  423. #if CALL_STACK
  424. static u32 REGPARM(2) (*sh2_drc_dispatcher_call)(u32 pc);
  425. static void REGPARM(1) (*sh2_drc_dispatcher_return)(u32 pc);
  426. #endif
  427. static void REGPARM(1) (*sh2_drc_exit)(u32 pc);
  428. static void (*sh2_drc_test_irq)(void);
  429. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  430. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  431. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  432. static u32 REGPARM(1) (*sh2_drc_read8_poll)(u32 a);
  433. static u32 REGPARM(1) (*sh2_drc_read16_poll)(u32 a);
  434. static u32 REGPARM(1) (*sh2_drc_read32_poll)(u32 a);
  435. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  436. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  437. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  438. #ifdef DRC_SR_REG
  439. void REGPARM(1) (*sh2_drc_save_sr)(SH2 *sh2);
  440. void REGPARM(1) (*sh2_drc_restore_sr)(SH2 *sh2);
  441. #endif
  442. // flags for memory access
  443. #define MF_SIZEMASK 0x03 // size of access
  444. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  445. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  446. #define MF_POLLING 0x20 // include polling check in read
  447. // address space stuff
  448. static int dr_is_rom(u32 a)
  449. {
  450. // tweak for WWF Raw which writes data to some high ROM addresses
  451. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  452. }
  453. static int dr_ctx_get_mem_ptr(SH2 *sh2, u32 a, u32 *mask)
  454. {
  455. void *memptr;
  456. int poffs = -1;
  457. // check if region is mapped memory
  458. memptr = p32x_sh2_get_mem_ptr(a, mask, sh2);
  459. if (memptr == NULL)
  460. return poffs;
  461. if (memptr == sh2->p_bios) // BIOS
  462. poffs = offsetof(SH2, p_bios);
  463. else if (memptr == sh2->p_da) // data array
  464. poffs = offsetof(SH2, p_da);
  465. else if (memptr == sh2->p_sdram) // SDRAM
  466. poffs = offsetof(SH2, p_sdram);
  467. else if (memptr == sh2->p_rom) // ROM
  468. poffs = offsetof(SH2, p_rom);
  469. return poffs;
  470. }
  471. static int dr_get_tcache_id(u32 pc, int is_slave)
  472. {
  473. u32 tcid = 0;
  474. if ((pc & 0xe0000000) == 0xc0000000)
  475. tcid = 1 + is_slave; // data array
  476. if ((pc & ~0xfff) == 0)
  477. tcid = 1 + is_slave; // BIOS
  478. return tcid;
  479. }
  480. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  481. {
  482. struct block_entry *be;
  483. *tcache_id = dr_get_tcache_id(pc, is_slave);
  484. be = HASH_FUNC(hash_tables[*tcache_id], pc, HASH_TABLE_SIZE(*tcache_id) - 1);
  485. if (be != NULL) // don't ask... gcc code generation hint
  486. for (; be != NULL; be = be->next)
  487. if (be->pc == pc)
  488. return be;
  489. return NULL;
  490. }
  491. // ---------------------------------------------------------------
  492. // ring buffer management
  493. #define RING_INIT(r,m,n) *(r) = (struct ring_buffer) { .base = (u8 *)m, \
  494. .item_sz = sizeof(*(m)), .size = n };
  495. static void *ring_alloc(struct ring_buffer *rb, int count)
  496. {
  497. // allocate space in ring buffer
  498. void *p;
  499. p = rb->base + rb->next * rb->item_sz;
  500. if (rb->next+count > rb->size) {
  501. rb->used += rb->size - rb->next;
  502. p = rb->base; // wrap if overflow at end
  503. rb->next = count;
  504. } else {
  505. rb->next += count;
  506. if (rb->next == rb->size) rb->next = 0;
  507. }
  508. rb->used += count;
  509. return p;
  510. }
  511. static void ring_wrap(struct ring_buffer *rb)
  512. {
  513. // insufficient space at end of buffer memory, wrap around
  514. rb->used += rb->size - rb->next;
  515. rb->next = 0;
  516. }
  517. static void ring_free(struct ring_buffer *rb, int count)
  518. {
  519. // free oldest space in ring buffer
  520. rb->first += count;
  521. if (rb->first >= rb->size) rb->first -= rb->size;
  522. rb->used -= count;
  523. }
  524. static void ring_free_p(struct ring_buffer *rb, void *p)
  525. {
  526. // free ring buffer space upto given pointer
  527. rb->first = ((u8 *)p - rb->base) / rb->item_sz;
  528. rb->used = rb->next - rb->first;
  529. if (rb->used < 0) rb->used += rb->size;
  530. }
  531. static void *ring_reset(struct ring_buffer *rb)
  532. {
  533. // reset to initial state
  534. rb->first = rb->next = rb->used = 0;
  535. return rb->base + rb->next * rb->item_sz;
  536. }
  537. static void *ring_first(struct ring_buffer *rb)
  538. {
  539. return rb->base + rb->first * rb->item_sz;
  540. }
  541. static void *ring_next(struct ring_buffer *rb)
  542. {
  543. return rb->base + rb->next * rb->item_sz;
  544. }
  545. // block management
  546. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  547. {
  548. struct block_list *added;
  549. if (blist_free) {
  550. added = blist_free;
  551. blist_free = added->next;
  552. } else if (block_list_pool_count >= BLOCK_LIST_MAX_COUNT) {
  553. printf( "block list overflow\n");
  554. exit(1);
  555. } else {
  556. added = block_list_pool + block_list_pool_count;
  557. block_list_pool_count ++;
  558. }
  559. added->block = block;
  560. added->l_next = block->list;
  561. block->list = added;
  562. added->head = blist;
  563. added->prev = NULL;
  564. if (*blist)
  565. (*blist)->prev = added;
  566. added->next = *blist;
  567. *blist = added;
  568. }
  569. static void rm_from_block_lists(struct block_desc *block)
  570. {
  571. struct block_list *entry;
  572. entry = block->list;
  573. while (entry != NULL) {
  574. if (entry->prev != NULL)
  575. entry->prev->next = entry->next;
  576. else
  577. *(entry->head) = entry->next;
  578. if (entry->next != NULL)
  579. entry->next->prev = entry->prev;
  580. entry->next = blist_free;
  581. blist_free = entry;
  582. entry = entry->l_next;
  583. }
  584. block->list = NULL;
  585. }
  586. static void discard_block_list(struct block_list **blist)
  587. {
  588. struct block_list *next, *current = *blist;
  589. while (current != NULL) {
  590. next = current->next;
  591. current->next = blist_free;
  592. blist_free = current;
  593. current = next;
  594. }
  595. *blist = NULL;
  596. }
  597. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  598. {
  599. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  600. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  601. be->prev = NULL;
  602. if (*head)
  603. (*head)->prev = be;
  604. be->next = *head;
  605. *head = be;
  606. #if (DRC_DEBUG & 2)
  607. if (be->next != NULL) {
  608. printf(" %08x@%p: entry hash collision with %08x@%p\n",
  609. be->pc, be->tcache_ptr, be->next->pc, be->next->tcache_ptr);
  610. hash_collisions++;
  611. }
  612. #endif
  613. }
  614. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  615. {
  616. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  617. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  618. #if DRC_DEBUG & 1
  619. struct block_entry *current = be;
  620. while (current->prev != NULL)
  621. current = current->prev;
  622. if (current != *head)
  623. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  624. #endif
  625. if (be->prev != NULL)
  626. be->prev->next = be->next;
  627. else
  628. *head = be->next;
  629. if (be->next != NULL)
  630. be->next->prev = be->prev;
  631. }
  632. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  633. {
  634. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  635. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  636. #if DRC_DEBUG & 1
  637. struct block_link *current = *head;
  638. while (current != NULL && current != bl)
  639. current = current->next;
  640. if (current == bl)
  641. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  642. #endif
  643. bl->target = NULL; // marker for not resolved
  644. bl->prev = NULL;
  645. if (*head)
  646. (*head)->prev = bl;
  647. bl->next = *head;
  648. *head = bl;
  649. }
  650. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  651. {
  652. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  653. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  654. #if DRC_DEBUG & 1
  655. struct block_link *current = bl;
  656. while (current->prev != NULL)
  657. current = current->prev;
  658. if (current != *head)
  659. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  660. #endif
  661. if (bl->prev != NULL)
  662. bl->prev->next = bl->next;
  663. else
  664. *head = bl->next;
  665. if (bl->next != NULL)
  666. bl->next->prev = bl->prev;
  667. }
  668. #if LINK_BRANCHES
  669. static void dr_block_link(struct block_entry *be, struct block_link *bl, int emit_jump)
  670. {
  671. dbg(2, "- %slink from %p to pc %08x entry %p", emit_jump ? "":"early ",
  672. bl->jump, bl->target_pc, be->tcache_ptr);
  673. if (emit_jump) {
  674. u8 *jump = bl->jump;
  675. int jsz = emith_jump_patch_size();
  676. if (bl->type == BL_JMP) { // patch: jump @entry
  677. // inlined: @jump far jump to target
  678. emith_jump_patch(jump, be->tcache_ptr, &jump);
  679. } else if (bl->type == BL_LDJMP) { // write: jump @entry
  680. // inlined: @jump far jump to target
  681. emith_jump_at(jump, be->tcache_ptr);
  682. jsz = emith_jump_at_size();
  683. } else if (bl->type == BL_JCCBLX) { // patch: jump cond -> jump @entry
  684. if (emith_jump_patch_inrange(bl->jump, be->tcache_ptr)) {
  685. // inlined: @jump near jumpcc to target
  686. emith_jump_patch(jump, be->tcache_ptr, &jump);
  687. } else { // dispatcher cond immediate
  688. // via blx: @jump near jumpcc to blx; @blx far jump
  689. emith_jump_patch(jump, bl->blx, &jump);
  690. emith_jump_at(bl->blx, be->tcache_ptr);
  691. if ((((uintptr_t)bl->blx & 0x1f) + emith_jump_at_size()-1) > 0x1f)
  692. host_instructions_updated(bl->blx, bl->blx + emith_jump_at_size()-1);
  693. }
  694. } else {
  695. printf("unknown BL type %d\n", bl->type);
  696. exit(1);
  697. }
  698. // only needs sync if patch is possibly crossing cacheline (assume 32 byte)
  699. if ((((uintptr_t)jump & 0x1f) + jsz-1) > 0x1f)
  700. host_instructions_updated(jump, jump + jsz-1);
  701. }
  702. // move bl to block_entry
  703. bl->target = be;
  704. bl->prev = NULL;
  705. if (be->links)
  706. be->links->prev = bl;
  707. bl->next = be->links;
  708. be->links = bl;
  709. }
  710. static void dr_block_unlink(struct block_link *bl, int emit_jump)
  711. {
  712. dbg(2,"- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  713. if (bl->target) {
  714. if (emit_jump) {
  715. u8 *jump = bl->jump;
  716. int jsz = emith_jump_patch_size();
  717. if (bl->type == BL_JMP) { // jump_patch @dispatcher
  718. // inlined: @jump far jump to dispatcher
  719. emith_jump_patch(jump, sh2_drc_dispatcher, &jump);
  720. } else if (bl->type == BL_LDJMP) { // restore: load pc, jump @dispatcher
  721. // inlined: @jump load target_pc, far jump to dispatcher
  722. memcpy(jump, bl->jdisp, emith_jump_at_size());
  723. jsz = emith_jump_at_size();
  724. } else if (bl->type == BL_JCCBLX) { // jump cond @blx; @blx: load pc, jump
  725. // via blx: @jump near jumpcc to blx; @blx load target_pc, far jump
  726. emith_jump_patch(bl->jump, bl->blx, &jump);
  727. memcpy(bl->blx, bl->jdisp, emith_jump_at_size());
  728. host_instructions_updated(bl->blx, bl->blx + emith_jump_at_size()-1);
  729. } else {
  730. printf("unknown BL type %d\n", bl->type);
  731. exit(1);
  732. }
  733. // update cpu caches since the previous jump target doesn't exist anymore
  734. host_instructions_updated(jump, jump + jsz-1);
  735. }
  736. if (bl->prev)
  737. bl->prev->next = bl->next;
  738. else
  739. bl->target->links = bl->next;
  740. if (bl->next)
  741. bl->next->prev = bl->prev;
  742. bl->target = NULL;
  743. }
  744. }
  745. #endif
  746. static struct block_link *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  747. {
  748. #if LINK_BRANCHES
  749. struct block_link *bl = block_link_pool[tcache_id];
  750. int cnt = block_link_pool_counts[tcache_id];
  751. int target_tcache_id;
  752. // get the target block entry
  753. target_tcache_id = dr_get_tcache_id(pc, is_slave);
  754. if (target_tcache_id && target_tcache_id != tcache_id)
  755. return NULL;
  756. // get a block link
  757. if (blink_free[tcache_id] != NULL) {
  758. bl = blink_free[tcache_id];
  759. blink_free[tcache_id] = bl->next;
  760. } else if (cnt >= BLOCK_LINK_MAX_COUNT(tcache_id)) {
  761. dbg(1, "bl overflow for tcache %d", tcache_id);
  762. return NULL;
  763. } else {
  764. bl += cnt;
  765. block_link_pool_counts[tcache_id] = cnt+1;
  766. }
  767. // prepare link and add to outgoing list of owner
  768. bl->tcache_id = tcache_id;
  769. bl->target_pc = pc;
  770. bl->jump = tcache_ptr;
  771. bl->blx = NULL;
  772. bl->o_next = owner->o_links;
  773. owner->o_links = bl;
  774. add_to_hashlist_unresolved(bl, tcache_id);
  775. return bl;
  776. #else
  777. return NULL;
  778. #endif
  779. }
  780. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  781. {
  782. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  783. u32 addr, end, mask = 0, shift = 0, idx;
  784. // mark memory blocks as containing compiled code
  785. if ((block->addr & 0xc7fc0000) == 0x06000000
  786. || (block->addr & 0xfffff000) == 0xc0000000)
  787. {
  788. if (tcache_id != 0) {
  789. // data array
  790. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  791. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  792. shift = SH2_DRCBLK_DA_SHIFT;
  793. }
  794. else {
  795. // SDRAM
  796. drc_ram_blk = Pico32xMem->drcblk_ram;
  797. lit_ram_blk = Pico32xMem->drclit_ram;
  798. shift = SH2_DRCBLK_RAM_SHIFT;
  799. }
  800. mask = RAM_SIZE(tcache_id) - 1;
  801. // mark recompiled insns
  802. addr = block->addr & ~((1 << shift) - 1);
  803. end = block->addr + block->size;
  804. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  805. drc_ram_blk[idx++] += mark;
  806. // mark literal pool
  807. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  808. addr = block->addr_lit & ~((1 << shift) - 1);
  809. end = block->addr_lit + block->size_lit;
  810. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  811. drc_ram_blk[idx++] += mark;
  812. // mark for literals disabled
  813. if (nolit) {
  814. addr = nolit & ~((1 << shift) - 1);
  815. end = block->addr_lit + block->size_lit;
  816. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  817. lit_ram_blk[idx++] = 1;
  818. }
  819. if (mark < 0)
  820. rm_from_block_lists(block);
  821. else {
  822. // add to invalidation lookup lists
  823. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  824. end = block->addr + block->size;
  825. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  826. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  827. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  828. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  829. end = block->addr_lit + block->size_lit;
  830. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  831. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  832. }
  833. }
  834. }
  835. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  836. {
  837. u8 *lit_ram_blk = NULL;
  838. u32 mask = 0, shift = 0, addr, idx;
  839. if ((start & 0xc7fc0000) == 0x06000000
  840. || (start & 0xfffff000) == 0xc0000000)
  841. {
  842. if (tcache_id != 0) {
  843. // data array
  844. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  845. shift = SH2_DRCBLK_DA_SHIFT;
  846. }
  847. else {
  848. // SDRAM
  849. lit_ram_blk = Pico32xMem->drclit_ram;
  850. shift = SH2_DRCBLK_RAM_SHIFT;
  851. }
  852. mask = RAM_SIZE(tcache_id) - 1;
  853. addr = start & ~((1 << shift) - 1);
  854. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  855. if (lit_ram_blk[idx++])
  856. break;
  857. return (addr < start ? start : addr > end ? end : addr);
  858. }
  859. return end;
  860. }
  861. static void dr_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free)
  862. {
  863. struct block_link *bl;
  864. u32 i;
  865. free = free || nolit; // block is invalid if literals are overwritten
  866. dbg(2," %sing block %08x-%08x,%08x-%08x, blkid %d,%d", free?"delet":"disabl",
  867. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  868. tcache_id, bd - block_tables[tcache_id]);
  869. if (bd->addr == 0 || bd->entry_count == 0) {
  870. dbg(1, " killing dead block!? %08x", bd->addr);
  871. return;
  872. }
  873. #if LINK_BRANCHES
  874. // remove from hash table, make incoming links unresolved
  875. if (bd->active) {
  876. for (i = 0; i < bd->entry_count; i++) {
  877. rm_from_hashlist(&bd->entryp[i], tcache_id);
  878. while ((bl = bd->entryp[i].links) != NULL) {
  879. dr_block_unlink(bl, 1);
  880. add_to_hashlist_unresolved(bl, tcache_id);
  881. }
  882. }
  883. dr_mark_memory(-1, bd, tcache_id, nolit);
  884. add_to_block_list(&inactive_blocks[tcache_id], bd);
  885. }
  886. bd->active = 0;
  887. #endif
  888. if (free) {
  889. #if LINK_BRANCHES
  890. // revoke outgoing links
  891. for (bl = bd->entryp[0].o_links; bl != NULL; bl = bl->o_next) {
  892. if (bl->target)
  893. dr_block_unlink(bl, 0);
  894. else
  895. rm_from_hashlist_unresolved(bl, tcache_id);
  896. bl->jump = NULL;
  897. bl->next = blink_free[bl->tcache_id];
  898. blink_free[bl->tcache_id] = bl;
  899. }
  900. bd->entryp[0].o_links = NULL;
  901. #endif
  902. // invalidate block
  903. rm_from_block_lists(bd);
  904. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  905. bd->entry_count = 0;
  906. bd->entryp = NULL;
  907. }
  908. emith_update_cache();
  909. }
  910. static struct block_desc *dr_find_inactive_block(int tcache_id, u16 crc,
  911. u32 addr, int size, u32 addr_lit, int size_lit)
  912. {
  913. struct block_list **head = &inactive_blocks[tcache_id];
  914. struct block_list *current;
  915. for (current = *head; current != NULL; current = current->next) {
  916. struct block_desc *block = current->block;
  917. if (block->crc == crc && block->addr == addr && block->size == size &&
  918. block->addr_lit == addr_lit && block->size_lit == size_lit)
  919. {
  920. rm_from_block_lists(block);
  921. return block;
  922. }
  923. }
  924. return NULL;
  925. }
  926. static struct block_desc *dr_add_block(int entries, u32 addr, int size,
  927. u32 addr_lit, int size_lit, u16 crc, int is_slave, int *blk_id)
  928. {
  929. struct block_entry *be;
  930. struct block_desc *bd;
  931. int tcache_id;
  932. // do a lookup to get tcache_id and override check
  933. be = dr_get_entry(addr, is_slave, &tcache_id);
  934. if (be != NULL)
  935. dbg(1, "block override for %08x", addr);
  936. if (block_ring[tcache_id].used + 1 > block_ring[tcache_id].size ||
  937. entry_ring[tcache_id].used + entries > entry_ring[tcache_id].size) {
  938. dbg(1, "bd overflow for tcache %d", tcache_id);
  939. return NULL;
  940. }
  941. *blk_id = block_ring[tcache_id].next;
  942. bd = ring_alloc(&block_ring[tcache_id], 1);
  943. bd->entryp = ring_alloc(&entry_ring[tcache_id], entries);
  944. bd->addr = addr;
  945. bd->size = size;
  946. bd->addr_lit = addr_lit;
  947. bd->size_lit = size_lit;
  948. bd->tcache_ptr = tcache_ptr;
  949. bd->crc = crc;
  950. bd->active = 0;
  951. bd->list = NULL;
  952. bd->entry_count = 0;
  953. #if (DRC_DEBUG & 2)
  954. bd->refcount = 0;
  955. #endif
  956. return bd;
  957. }
  958. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  959. {
  960. #if LINK_BRANCHES
  961. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  962. u32 pc = be->pc;
  963. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  964. struct block_link *bl = *head, *next;
  965. while (bl != NULL) {
  966. next = bl->next;
  967. if (bl->target_pc == pc && (!bl->tcache_id || bl->tcache_id == tcache_id)) {
  968. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  969. dr_block_link(be, bl, 1);
  970. }
  971. bl = next;
  972. }
  973. #endif
  974. }
  975. static void dr_link_outgoing(struct block_entry *be, int tcache_id, int is_slave)
  976. {
  977. #if LINK_BRANCHES
  978. struct block_link *bl;
  979. int target_tcache_id;
  980. for (bl = be->o_links; bl; bl = bl->o_next) {
  981. if (bl->target == NULL) {
  982. be = dr_get_entry(bl->target_pc, is_slave, &target_tcache_id);
  983. if (be != NULL && (!target_tcache_id || target_tcache_id == tcache_id)) {
  984. // remove bl from unresolved_links (must've been since target was NULL)
  985. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  986. dr_block_link(be, bl, 1);
  987. }
  988. }
  989. }
  990. #endif
  991. }
  992. static void dr_activate_block(struct block_desc *bd, int tcache_id, int is_slave)
  993. {
  994. int i;
  995. // connect branches
  996. for (i = 0; i < bd->entry_count; i++) {
  997. struct block_entry *entry = &bd->entryp[i];
  998. add_to_hashlist(entry, tcache_id);
  999. // incoming branches
  1000. dr_link_blocks(entry, tcache_id);
  1001. if (!tcache_id)
  1002. dr_link_blocks(entry, is_slave?2:1);
  1003. // outgoing branches
  1004. dr_link_outgoing(entry, tcache_id, is_slave);
  1005. }
  1006. // mark memory for overwrite detection
  1007. dr_mark_memory(1, bd, tcache_id, 0);
  1008. bd->active = 1;
  1009. }
  1010. static void REGPARM(3) *dr_lookup_block(u32 pc, SH2 *sh2, int *tcache_id)
  1011. {
  1012. struct block_entry *be = NULL;
  1013. void *block = NULL;
  1014. be = dr_get_entry(pc, sh2->is_slave, tcache_id);
  1015. if (be != NULL)
  1016. block = be->tcache_ptr;
  1017. #if (DRC_DEBUG & 2)
  1018. if (be != NULL)
  1019. be->block->refcount++;
  1020. #endif
  1021. return block;
  1022. }
  1023. static void dr_free_oldest_block(int tcache_id)
  1024. {
  1025. struct block_desc *bf;
  1026. bf = ring_first(&block_ring[tcache_id]);
  1027. if (bf->addr && bf->entry_count)
  1028. dr_rm_block_entry(bf, tcache_id, 0, 1);
  1029. ring_free(&block_ring[tcache_id], 1);
  1030. if (block_ring[tcache_id].used) {
  1031. bf = ring_first(&block_ring[tcache_id]);
  1032. ring_free_p(&entry_ring[tcache_id], bf->entryp);
  1033. ring_free_p(&tcache_ring[tcache_id], bf->tcache_ptr);
  1034. } else {
  1035. // reset since size of code block isn't known if no successor block exists
  1036. ring_reset(&block_ring[tcache_id]);
  1037. ring_reset(&entry_ring[tcache_id]);
  1038. ring_reset(&tcache_ring[tcache_id]);
  1039. }
  1040. }
  1041. static inline void dr_reserve_cache(int tcache_id, struct ring_buffer *rb, int count)
  1042. {
  1043. // while not enough space available
  1044. if (rb->next + count >= rb->size){
  1045. // not enough space in rest of buffer -> wrap around
  1046. while (rb->first >= rb->next && rb->used)
  1047. dr_free_oldest_block(tcache_id);
  1048. if (rb->first == 0 && rb->used)
  1049. dr_free_oldest_block(tcache_id);
  1050. ring_wrap(rb);
  1051. }
  1052. while (rb->first >= rb->next && rb->next + count > rb->first && rb->used)
  1053. dr_free_oldest_block(tcache_id);
  1054. }
  1055. static u8 *dr_prepare_cache(int tcache_id, int insn_count, int entry_count)
  1056. {
  1057. int bf = block_ring[tcache_id].first;
  1058. // reserve one block desc
  1059. if (block_ring[tcache_id].used >= block_ring[tcache_id].size)
  1060. dr_free_oldest_block(tcache_id);
  1061. // reserve block entries
  1062. dr_reserve_cache(tcache_id, &entry_ring[tcache_id], entry_count);
  1063. // reserve cache space
  1064. dr_reserve_cache(tcache_id, &tcache_ring[tcache_id], insn_count*128);
  1065. if (bf != block_ring[tcache_id].first) {
  1066. // deleted some block(s), clear branch cache and return stack
  1067. #if BRANCH_CACHE
  1068. if (tcache_id)
  1069. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1070. else {
  1071. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1072. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  1073. }
  1074. #endif
  1075. #if CALL_STACK
  1076. if (tcache_id) {
  1077. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1078. sh2s[tcache_id-1].rts_cache_idx = 0;
  1079. } else {
  1080. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1081. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  1082. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1083. }
  1084. #endif
  1085. }
  1086. return ring_next(&tcache_ring[tcache_id]);
  1087. }
  1088. static void dr_flush_tcache(int tcid)
  1089. {
  1090. int i;
  1091. #if (DRC_DEBUG & 1)
  1092. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d bes %d/%d)", tcid,
  1093. tcache_ring[tcid].used, tcache_ring[tcid].size, block_ring[tcid].used,
  1094. block_ring[tcid].size, entry_ring[tcid].used, entry_ring[tcid].size);
  1095. #endif
  1096. ring_reset(&tcache_ring[tcid]);
  1097. ring_reset(&block_ring[tcid]);
  1098. ring_reset(&entry_ring[tcid]);
  1099. block_link_pool_counts[tcid] = 0;
  1100. blink_free[tcid] = NULL;
  1101. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * HASH_TABLE_SIZE(tcid));
  1102. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * HASH_TABLE_SIZE(tcid));
  1103. if (Pico32xMem->sdram != NULL) {
  1104. if (tcid == 0) { // ROM, RAM
  1105. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1106. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1107. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1108. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  1109. memset(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1110. memset(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache));
  1111. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1112. } else {
  1113. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1114. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1115. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  1116. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  1117. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1118. memset(sh2s[tcid - 1].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1119. sh2s[tcid - 1].rts_cache_idx = 0;
  1120. }
  1121. }
  1122. #if (DRC_DEBUG & 4)
  1123. tcache_dsm_ptrs[tcid] = tcache_ring[tcid].base;
  1124. #endif
  1125. for (i = 0; i < RAM_SIZE(tcid) / INVAL_PAGE_SIZE; i++)
  1126. discard_block_list(&inval_lookup[tcid][i]);
  1127. discard_block_list(&inactive_blocks[tcid]);
  1128. }
  1129. static void *dr_failure(void)
  1130. {
  1131. printf("recompilation failed\n");
  1132. exit(1);
  1133. }
  1134. // ---------------------------------------------------------------
  1135. // NB rcache allocation dependencies:
  1136. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  1137. // - get_reg(..., NULL) before get_reg(..., &hr) if it might get the same reg
  1138. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  1139. // register cache / constant propagation stuff
  1140. typedef enum {
  1141. RC_GR_READ,
  1142. RC_GR_WRITE,
  1143. RC_GR_RMW,
  1144. } rc_gr_mode;
  1145. typedef struct {
  1146. u32 gregs;
  1147. u32 val;
  1148. } gconst_t;
  1149. gconst_t gconsts[ARRAY_SIZE(guest_regs)];
  1150. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  1151. static inline int rcache_is_cached(sh2_reg_e r);
  1152. static void rcache_add_vreg_alias(int x, sh2_reg_e r);
  1153. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  1154. static void rcache_evict_vreg(int x);
  1155. static void rcache_remap_vreg(int x);
  1156. #define RCACHE_DUMP(msg) { \
  1157. cache_reg_t *cp; \
  1158. guest_reg_t *gp; \
  1159. int i; \
  1160. printf("cache dump %s:\n",msg); \
  1161. printf(" cache_regs:\n"); \
  1162. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1163. cp = &cache_regs[i]; \
  1164. if (cp->type != HR_FREE || cp->gregs || cp->locked || cp->flags) \
  1165. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1166. } \
  1167. printf(" guest_regs:\n"); \
  1168. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1169. gp = &guest_regs[i]; \
  1170. if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
  1171. printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
  1172. } \
  1173. printf(" gconsts:\n"); \
  1174. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1175. if (gconsts[i].gregs) \
  1176. printf(" %d: m=%x v=%x\n", i, gconsts[i].gregs, gconsts[i].val); \
  1177. } \
  1178. }
  1179. #define RCACHE_CHECK(msg) { \
  1180. cache_reg_t *cp; \
  1181. guest_reg_t *gp; \
  1182. int i, x, m = 0, d = 0; \
  1183. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1184. cp = &cache_regs[i]; \
  1185. if (cp->flags & HRF_PINNED) m |= (1 << i); \
  1186. if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
  1187. /* check connectivity greg->vreg */ \
  1188. FOR_ALL_BITS_SET_DO(cp->gregs, x, \
  1189. if (guest_regs[x].vreg != i) \
  1190. { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
  1191. ) \
  1192. } \
  1193. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1194. gp = &guest_regs[i]; \
  1195. if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
  1196. { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
  1197. if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_CACHED) \
  1198. { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
  1199. if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
  1200. { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
  1201. if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)))\
  1202. { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
  1203. if (gp->flags & (GRF_STATIC|GRF_PINNED)) { \
  1204. if (gp->sreg == -1 || !(cache_regs[gp->sreg].flags & HRF_PINNED))\
  1205. { d = 1; printf("cache check r=%d v=%d not pinned?\n", i, gp->vreg); } \
  1206. else m &= ~(1 << gp->sreg); \
  1207. } \
  1208. } \
  1209. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1210. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
  1211. if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
  1212. { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
  1213. ) \
  1214. } \
  1215. if (m) \
  1216. { d = 1; printf("cache check m=%x pinning wrong?\n",m); } \
  1217. if (d) RCACHE_DUMP(msg) \
  1218. /* else { \
  1219. printf("locked regs %s:\n",msg); \
  1220. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1221. cp = &cache_regs[i]; \
  1222. if (cp->locked) \
  1223. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1224. } \
  1225. } */ \
  1226. }
  1227. #if PROPAGATE_CONSTANTS
  1228. static inline int gconst_alloc(sh2_reg_e r)
  1229. {
  1230. int i, n = -1;
  1231. for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
  1232. gconsts[i].gregs &= ~(1 << r);
  1233. if (gconsts[i].gregs == 0 && n < 0)
  1234. n = i;
  1235. }
  1236. if (n >= 0)
  1237. gconsts[n].gregs = (1 << r);
  1238. else {
  1239. printf("all gconst buffers in use, aborting\n");
  1240. exit(1); // cannot happen - more constants than guest regs?
  1241. }
  1242. return n;
  1243. }
  1244. static void gconst_set(sh2_reg_e r, u32 val)
  1245. {
  1246. int i = gconst_alloc(r);
  1247. guest_regs[r].flags |= GRF_CONST;
  1248. guest_regs[r].cnst = i;
  1249. gconsts[i].val = val;
  1250. }
  1251. static void gconst_new(sh2_reg_e r, u32 val)
  1252. {
  1253. gconst_set(r, val);
  1254. guest_regs[r].flags |= GRF_CDIRTY;
  1255. // throw away old r that we might have cached
  1256. if (guest_regs[r].vreg >= 0)
  1257. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1258. }
  1259. #endif
  1260. static int gconst_get(sh2_reg_e r, u32 *val)
  1261. {
  1262. if (guest_regs[r].flags & GRF_CONST) {
  1263. *val = gconsts[guest_regs[r].cnst].val;
  1264. return 1;
  1265. }
  1266. *val = 0;
  1267. return 0;
  1268. }
  1269. static int gconst_check(sh2_reg_e r)
  1270. {
  1271. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1272. return 1;
  1273. return 0;
  1274. }
  1275. // update hr if dirty, else do nothing
  1276. static int gconst_try_read(int vreg, sh2_reg_e r)
  1277. {
  1278. int i, x;
  1279. if (guest_regs[r].flags & GRF_CDIRTY) {
  1280. x = guest_regs[r].cnst;
  1281. emith_move_r_imm(cache_regs[vreg].hreg, gconsts[x].val);
  1282. FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
  1283. {
  1284. if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
  1285. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  1286. if (guest_regs[i].vreg < 0)
  1287. rcache_add_vreg_alias(vreg, i);
  1288. guest_regs[i].flags &= ~GRF_CDIRTY;
  1289. guest_regs[i].flags |= GRF_DIRTY;
  1290. });
  1291. cache_regs[vreg].type = HR_CACHED;
  1292. cache_regs[vreg].flags |= HRF_DIRTY;
  1293. return 1;
  1294. }
  1295. return 0;
  1296. }
  1297. static u32 gconst_dirty_mask(void)
  1298. {
  1299. u32 mask = 0;
  1300. int i;
  1301. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1302. if (guest_regs[i].flags & GRF_CDIRTY)
  1303. mask |= (1 << i);
  1304. return mask;
  1305. }
  1306. static void gconst_kill(sh2_reg_e r)
  1307. {
  1308. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1309. gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
  1310. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1311. }
  1312. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1313. {
  1314. gconst_kill(rd);
  1315. if (guest_regs[rs].flags & GRF_CONST) {
  1316. guest_regs[rd].flags |= GRF_CONST;
  1317. if (guest_regs[rd].vreg < 0)
  1318. guest_regs[rd].flags |= GRF_CDIRTY;
  1319. guest_regs[rd].cnst = guest_regs[rs].cnst;
  1320. gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
  1321. }
  1322. }
  1323. static void gconst_clean(void)
  1324. {
  1325. int i;
  1326. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1327. if (guest_regs[i].flags & GRF_CDIRTY) {
  1328. // using RC_GR_READ here: it will call gconst_try_read,
  1329. // cache the reg and mark it dirty.
  1330. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1331. }
  1332. }
  1333. static void gconst_invalidate(void)
  1334. {
  1335. int i;
  1336. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1337. if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
  1338. gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
  1339. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1340. }
  1341. }
  1342. static u16 rcache_counter;
  1343. // SH2 register usage bitmasks
  1344. static u32 rcache_vregs_reg; // regs of type HRT_REG (for pinning)
  1345. static u32 rcache_regs_static; // statically allocated regs
  1346. static u32 rcache_regs_pinned; // pinned regs
  1347. static u32 rcache_regs_now; // regs used in current insn
  1348. static u32 rcache_regs_soon; // regs used in the next few insns
  1349. static u32 rcache_regs_late; // regs used in later insns
  1350. static u32 rcache_regs_discard; // regs overwritten without being used
  1351. static u32 rcache_regs_clean; // regs needing cleaning
  1352. static void rcache_lock_vreg(int x)
  1353. {
  1354. if (x >= 0) {
  1355. cache_regs[x].locked ++;
  1356. #if DRC_DEBUG & 64
  1357. if (cache_regs[x].type == HR_FREE) {
  1358. printf("locking free vreg %x, aborting\n", x);
  1359. exit(1);
  1360. }
  1361. if (!cache_regs[x].locked) {
  1362. printf("locking overflow vreg %x, aborting\n", x);
  1363. exit(1);
  1364. }
  1365. #endif
  1366. }
  1367. }
  1368. static void rcache_unlock_vreg(int x)
  1369. {
  1370. if (x >= 0) {
  1371. #if DRC_DEBUG & 64
  1372. if (cache_regs[x].type == HR_FREE) {
  1373. printf("unlocking free vreg %x, aborting\n", x);
  1374. exit(1);
  1375. }
  1376. #endif
  1377. if (cache_regs[x].locked)
  1378. cache_regs[x].locked --;
  1379. }
  1380. }
  1381. static void rcache_free_vreg(int x)
  1382. {
  1383. cache_regs[x].type = cache_regs[x].locked ? HR_TEMP : HR_FREE;
  1384. cache_regs[x].flags &= HRF_PINNED;
  1385. cache_regs[x].gregs = 0;
  1386. }
  1387. static void rcache_unmap_vreg(int x)
  1388. {
  1389. int i;
  1390. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1391. if (guest_regs[i].flags & GRF_DIRTY) {
  1392. // if a dirty reg is unmapped save its value to context
  1393. if ((~rcache_regs_discard | rcache_regs_now) & (1 << i))
  1394. emith_ctx_write(cache_regs[x].hreg, i * 4);
  1395. guest_regs[i].flags &= ~GRF_DIRTY;
  1396. }
  1397. guest_regs[i].vreg = -1);
  1398. rcache_free_vreg(x);
  1399. }
  1400. static void rcache_move_vreg(int d, int x)
  1401. {
  1402. int i;
  1403. cache_regs[d].type = HR_CACHED;
  1404. cache_regs[d].gregs = cache_regs[x].gregs;
  1405. cache_regs[d].flags &= HRF_PINNED;
  1406. cache_regs[d].flags |= cache_regs[x].flags & ~HRF_PINNED;
  1407. cache_regs[d].locked = 0;
  1408. cache_regs[d].stamp = cache_regs[x].stamp;
  1409. emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
  1410. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1411. if (guest_regs[i].vreg == x)
  1412. guest_regs[i].vreg = d;
  1413. rcache_free_vreg(x);
  1414. }
  1415. static void rcache_clean_vreg(int x)
  1416. {
  1417. u32 rns = rcache_regs_now | rcache_regs_soon;
  1418. int r;
  1419. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1420. cache_regs[x].flags &= ~HRF_DIRTY;
  1421. rcache_lock_vreg(x);
  1422. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1423. if (guest_regs[r].flags & GRF_DIRTY) {
  1424. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) {
  1425. if (guest_regs[r].vreg != guest_regs[r].sreg &&
  1426. !cache_regs[guest_regs[r].sreg].locked &&
  1427. ((~rcache_regs_discard | rcache_regs_now) & (1 << r)) &&
  1428. !(rns & cache_regs[guest_regs[r].sreg].gregs)) {
  1429. // statically mapped reg not in its sreg. move back to sreg
  1430. rcache_evict_vreg(guest_regs[r].sreg);
  1431. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
  1432. cache_regs[guest_regs[r].vreg].hreg);
  1433. rcache_remove_vreg_alias(x, r);
  1434. rcache_add_vreg_alias(guest_regs[r].sreg, r);
  1435. cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
  1436. } else
  1437. // cannot remap. keep dirty for writeback in unmap
  1438. cache_regs[x].flags |= HRF_DIRTY;
  1439. } else {
  1440. if ((~rcache_regs_discard | rcache_regs_now) & (1 << r))
  1441. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1442. guest_regs[r].flags &= ~GRF_DIRTY;
  1443. }
  1444. rcache_regs_clean &= ~(1 << r);
  1445. })
  1446. rcache_unlock_vreg(x);
  1447. }
  1448. #if DRC_DEBUG & 64
  1449. RCACHE_CHECK("after clean");
  1450. #endif
  1451. }
  1452. static void rcache_add_vreg_alias(int x, sh2_reg_e r)
  1453. {
  1454. cache_regs[x].gregs |= (1 << r);
  1455. guest_regs[r].vreg = x;
  1456. cache_regs[x].type = HR_CACHED;
  1457. }
  1458. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1459. {
  1460. cache_regs[x].gregs &= ~(1 << r);
  1461. if (!cache_regs[x].gregs) {
  1462. // no reg mapped -> free vreg
  1463. if (cache_regs[x].locked)
  1464. cache_regs[x].type = HR_TEMP;
  1465. else
  1466. rcache_free_vreg(x);
  1467. }
  1468. guest_regs[r].vreg = -1;
  1469. }
  1470. static void rcache_evict_vreg(int x)
  1471. {
  1472. #if REMAP_REGISTER
  1473. rcache_remap_vreg(x);
  1474. #else
  1475. rcache_clean_vreg(x);
  1476. #endif
  1477. rcache_unmap_vreg(x);
  1478. }
  1479. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1480. {
  1481. rcache_remove_vreg_alias(x, r);
  1482. rcache_evict_vreg(x);
  1483. rcache_add_vreg_alias(x, r);
  1484. }
  1485. static int rcache_allocate(int what, int minprio)
  1486. {
  1487. // evict reg with oldest stamp (only for HRT_REG, no temps)
  1488. int i, i_prio, oldest = -1, prio = 0;
  1489. u16 min_stamp = (u16)-1;
  1490. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--) {
  1491. // consider only non-static, unpinned, unlocked REG or TEMP
  1492. if ((cache_regs[i].flags & HRF_PINNED) || cache_regs[i].locked)
  1493. continue;
  1494. if ((what > 0 && !(cache_regs[i].htype & HRT_REG)) || // get a REG
  1495. (what == 0 && (cache_regs[i].htype & HRT_TEMP)) || // get a non-TEMP
  1496. (what < 0 && !(cache_regs[i].htype & HRT_TEMP))) // get a TEMP
  1497. continue;
  1498. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
  1499. // REG is free
  1500. prio = 10;
  1501. oldest = i;
  1502. break;
  1503. }
  1504. if (cache_regs[i].type == HR_CACHED) {
  1505. if (rcache_regs_now & cache_regs[i].gregs)
  1506. // REGs needed for the current insn
  1507. i_prio = 0;
  1508. else if (rcache_regs_soon & cache_regs[i].gregs)
  1509. // REGs needed in the next insns
  1510. i_prio = 2;
  1511. else if (rcache_regs_late & cache_regs[i].gregs)
  1512. // REGs needed in some future insn
  1513. i_prio = 4;
  1514. else if (~rcache_regs_discard & cache_regs[i].gregs)
  1515. // REGs not needed in the foreseeable future
  1516. i_prio = 6;
  1517. else
  1518. // REGs soon overwritten anyway
  1519. i_prio = 8;
  1520. if (!(cache_regs[i].flags & HRF_DIRTY)) i_prio ++;
  1521. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1522. min_stamp = cache_regs[i].stamp;
  1523. oldest = i;
  1524. prio = i_prio;
  1525. }
  1526. }
  1527. }
  1528. if (prio < minprio || oldest == -1)
  1529. return -1;
  1530. if (cache_regs[oldest].type == HR_CACHED)
  1531. rcache_evict_vreg(oldest);
  1532. else
  1533. rcache_free_vreg(oldest);
  1534. return oldest;
  1535. }
  1536. static int rcache_allocate_vreg(int needed)
  1537. {
  1538. int x;
  1539. x = rcache_allocate(1, needed ? 0 : 4);
  1540. if (x < 0)
  1541. x = rcache_allocate(-1, 0);
  1542. return x;
  1543. }
  1544. static int rcache_allocate_nontemp(void)
  1545. {
  1546. int x = rcache_allocate(0, 4);
  1547. return x;
  1548. }
  1549. static int rcache_allocate_temp(void)
  1550. {
  1551. int x = rcache_allocate(-1, 0);
  1552. if (x < 0)
  1553. x = rcache_allocate(0, 0);
  1554. return x;
  1555. }
  1556. #if REMAP_REGISTER
  1557. // maps a host register to a REG
  1558. static int rcache_map_reg(sh2_reg_e r, int hr, int mode)
  1559. {
  1560. int x, i;
  1561. gconst_kill(r);
  1562. // lookup the TEMP hr maps to
  1563. i = reg_map_host[hr];
  1564. if (i < 0) {
  1565. // must not happen
  1566. printf("invalid host register %d\n", hr);
  1567. exit(1);
  1568. }
  1569. // deal with statically mapped regs
  1570. if (mode == RC_GR_RMW && (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED))) {
  1571. x = guest_regs[r].sreg;
  1572. if (guest_regs[r].vreg == x) {
  1573. // STATIC in its sreg with no aliases, and some processing pending
  1574. if (cache_regs[x].gregs == 1 << r)
  1575. return cache_regs[x].hreg;
  1576. } else if (cache_regs[x].type == HR_FREE ||
  1577. (cache_regs[x].type == HR_TEMP && !cache_regs[x].locked))
  1578. // STATIC not in its sreg, with sreg available -> move it
  1579. i = guest_regs[r].sreg;
  1580. }
  1581. // remove old mappings of r and i if one exists
  1582. if (guest_regs[r].vreg >= 0)
  1583. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1584. if (cache_regs[i].type == HR_CACHED)
  1585. rcache_evict_vreg(i);
  1586. // set new mappping
  1587. cache_regs[i].type = HR_CACHED;
  1588. cache_regs[i].gregs = 1 << r;
  1589. cache_regs[i].flags &= HRF_PINNED;
  1590. cache_regs[i].locked = 0;
  1591. cache_regs[i].stamp = ++rcache_counter;
  1592. cache_regs[i].flags |= HRF_DIRTY;
  1593. rcache_lock_vreg(i);
  1594. guest_regs[r].flags |= GRF_DIRTY;
  1595. guest_regs[r].vreg = i;
  1596. #if DRC_DEBUG & 64
  1597. RCACHE_CHECK("after map");
  1598. #endif
  1599. return cache_regs[i].hreg;
  1600. }
  1601. // remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
  1602. static void rcache_remap_vreg(int x)
  1603. {
  1604. u32 rsl_d = rcache_regs_soon | rcache_regs_late;
  1605. int d;
  1606. // x must be a cached vreg
  1607. if (cache_regs[x].type != HR_CACHED || cache_regs[x].locked)
  1608. return;
  1609. // don't do it if x isn't used
  1610. if (!(rsl_d & cache_regs[x].gregs)) {
  1611. // clean here to avoid data loss on invalidation
  1612. rcache_clean_vreg(x);
  1613. return;
  1614. }
  1615. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, d,
  1616. if ((guest_regs[d].flags & (GRF_STATIC|GRF_PINNED)) &&
  1617. !cache_regs[guest_regs[d].sreg].locked &&
  1618. !((rsl_d|rcache_regs_now) & cache_regs[guest_regs[d].sreg].gregs)) {
  1619. // STATIC not in its sreg and sreg is available
  1620. rcache_evict_vreg(guest_regs[d].sreg);
  1621. rcache_move_vreg(guest_regs[d].sreg, x);
  1622. return;
  1623. }
  1624. )
  1625. // allocate a non-TEMP vreg
  1626. rcache_lock_vreg(x); // lock to avoid evicting x
  1627. d = rcache_allocate_nontemp();
  1628. rcache_unlock_vreg(x);
  1629. if (d < 0) {
  1630. rcache_clean_vreg(x);
  1631. return;
  1632. }
  1633. // move vreg to new location
  1634. rcache_move_vreg(d, x);
  1635. #if DRC_DEBUG & 64
  1636. RCACHE_CHECK("after remap");
  1637. #endif
  1638. }
  1639. #endif
  1640. #if ALIAS_REGISTERS
  1641. static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
  1642. {
  1643. int x;
  1644. // if s isn't constant, it must be in cache for aliasing
  1645. if (!gconst_check(rs))
  1646. rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
  1647. // if d and s are not already aliased
  1648. x = guest_regs[rs].vreg;
  1649. if (guest_regs[rd].vreg != x) {
  1650. // remove possible old mapping of dst
  1651. if (guest_regs[rd].vreg >= 0)
  1652. rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
  1653. // make dst an alias of src
  1654. if (x >= 0)
  1655. rcache_add_vreg_alias(x, rd);
  1656. // if d is now in cache, it must be dirty
  1657. if (guest_regs[rd].vreg >= 0) {
  1658. x = guest_regs[rd].vreg;
  1659. cache_regs[x].flags |= HRF_DIRTY;
  1660. guest_regs[rd].flags |= GRF_DIRTY;
  1661. }
  1662. }
  1663. gconst_copy(rd, rs);
  1664. #if DRC_DEBUG & 64
  1665. RCACHE_CHECK("after alias");
  1666. #endif
  1667. }
  1668. #endif
  1669. // note: must not be called when doing conditional code
  1670. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1671. {
  1672. int src, dst, ali;
  1673. cache_reg_t *tr;
  1674. u32 rsp_d = (rcache_regs_soon | rcache_regs_static | rcache_regs_pinned) &
  1675. ~rcache_regs_discard;
  1676. dst = src = guest_regs[r].vreg;
  1677. rcache_lock_vreg(src); // lock to avoid evicting src
  1678. // good opportunity to relocate a remapped STATIC?
  1679. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1680. src != guest_regs[r].sreg && (src < 0 || mode != RC_GR_READ) &&
  1681. !cache_regs[guest_regs[r].sreg].locked &&
  1682. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[r].sreg].gregs)) {
  1683. dst = guest_regs[r].sreg;
  1684. rcache_evict_vreg(dst);
  1685. } else if (dst < 0) {
  1686. // allocate a cache register
  1687. if ((dst = rcache_allocate_vreg(rsp_d & (1 << r))) < 0) {
  1688. printf("no registers to evict, aborting\n");
  1689. exit(1);
  1690. }
  1691. }
  1692. tr = &cache_regs[dst];
  1693. tr->stamp = rcache_counter;
  1694. // remove r from src
  1695. if (src >= 0 && src != dst)
  1696. rcache_remove_vreg_alias(src, r);
  1697. rcache_unlock_vreg(src);
  1698. // if r has a constant it may have aliases
  1699. if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
  1700. src = dst;
  1701. // if r will be modified, check for aliases being needed rsn
  1702. ali = tr->gregs & ~(1 << r);
  1703. if (mode != RC_GR_READ && src == dst && ali) {
  1704. int x = -1;
  1705. if ((rsp_d|rcache_regs_now) & ali) {
  1706. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1707. guest_regs[r].sreg == dst && !tr->locked) {
  1708. // split aliases if r is STATIC in sreg and dst isn't already locked
  1709. int t;
  1710. FOR_ALL_BITS_SET_DO(ali, t,
  1711. if ((guest_regs[t].flags & (GRF_STATIC|GRF_PINNED)) &&
  1712. !(ali & ~(1 << t)) &&
  1713. !cache_regs[guest_regs[t].sreg].locked &&
  1714. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[t].sreg].gregs)) {
  1715. // alias is a single STATIC and its sreg is available
  1716. x = guest_regs[t].sreg;
  1717. rcache_evict_vreg(x);
  1718. } else {
  1719. rcache_lock_vreg(dst); // lock to avoid evicting dst
  1720. x = rcache_allocate_vreg(rsp_d & ali);
  1721. rcache_unlock_vreg(dst);
  1722. }
  1723. break;
  1724. )
  1725. if (x >= 0) {
  1726. rcache_remove_vreg_alias(src, r);
  1727. src = dst;
  1728. rcache_move_vreg(x, dst);
  1729. }
  1730. } else {
  1731. // split r
  1732. rcache_lock_vreg(src); // lock to avoid evicting src
  1733. x = rcache_allocate_vreg(rsp_d & (1 << r));
  1734. rcache_unlock_vreg(src);
  1735. if (x >= 0) {
  1736. rcache_remove_vreg_alias(src, r);
  1737. dst = x;
  1738. tr = &cache_regs[dst];
  1739. tr->stamp = rcache_counter;
  1740. }
  1741. }
  1742. }
  1743. if (x < 0)
  1744. // aliases not needed or no vreg available, remove them
  1745. rcache_evict_vreg_aliases(dst, r);
  1746. }
  1747. // assign r to dst
  1748. rcache_add_vreg_alias(dst, r);
  1749. // handle dst register transfer
  1750. if (src < 0 && mode != RC_GR_WRITE)
  1751. emith_ctx_read(tr->hreg, r * 4);
  1752. if (hr) {
  1753. *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
  1754. rcache_lock_vreg(src >= 0 ? src : dst);
  1755. } else if (src >= 0 && mode != RC_GR_WRITE && cache_regs[src].hreg != tr->hreg)
  1756. emith_move_r_r(tr->hreg, cache_regs[src].hreg);
  1757. // housekeeping
  1758. if (do_locking)
  1759. rcache_lock_vreg(dst);
  1760. if (mode != RC_GR_READ) {
  1761. tr->flags |= HRF_DIRTY;
  1762. guest_regs[r].flags |= GRF_DIRTY;
  1763. gconst_kill(r);
  1764. }
  1765. #if DRC_DEBUG & 64
  1766. RCACHE_CHECK("after getreg");
  1767. #endif
  1768. return tr->hreg;
  1769. }
  1770. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1771. {
  1772. return rcache_get_reg_(r, mode, 1, hr);
  1773. }
  1774. static void rcache_pin_reg(sh2_reg_e r)
  1775. {
  1776. int hr, x;
  1777. // don't pin if static or already pinned
  1778. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED))
  1779. return;
  1780. rcache_regs_soon |= (1 << r); // kludge to prevent allocation of a temp
  1781. hr = rcache_get_reg_(r, RC_GR_RMW, 0, NULL);
  1782. x = reg_map_host[hr];
  1783. // can only pin non-TEMPs
  1784. if (!(cache_regs[x].htype & HRT_TEMP)) {
  1785. guest_regs[r].flags |= GRF_PINNED;
  1786. cache_regs[x].flags |= HRF_PINNED;
  1787. guest_regs[r].sreg = x;
  1788. rcache_regs_pinned |= (1 << r);
  1789. }
  1790. #if DRC_DEBUG & 64
  1791. RCACHE_CHECK("after pin");
  1792. #endif
  1793. }
  1794. static int rcache_get_tmp(void)
  1795. {
  1796. int i;
  1797. i = rcache_allocate_temp();
  1798. if (i < 0) {
  1799. printf("cannot allocate temp\n");
  1800. exit(1);
  1801. }
  1802. cache_regs[i].type = HR_TEMP;
  1803. rcache_lock_vreg(i);
  1804. return cache_regs[i].hreg;
  1805. }
  1806. static int rcache_get_vreg_hr(int hr)
  1807. {
  1808. int i;
  1809. i = reg_map_host[hr];
  1810. if (i < 0 || cache_regs[i].locked) {
  1811. printf("host register %d is locked\n", hr);
  1812. exit(1);
  1813. }
  1814. if (cache_regs[i].type == HR_CACHED)
  1815. rcache_evict_vreg(i);
  1816. else if (cache_regs[i].type == HR_TEMP && cache_regs[i].locked) {
  1817. printf("host reg %d already used, aborting\n", hr);
  1818. exit(1);
  1819. }
  1820. return i;
  1821. }
  1822. static int rcache_get_vreg_arg(int arg)
  1823. {
  1824. int hr = 0;
  1825. host_arg2reg(hr, arg);
  1826. return rcache_get_vreg_hr(hr);
  1827. }
  1828. // get a reg to be used as function arg
  1829. static int rcache_get_tmp_arg(int arg)
  1830. {
  1831. int x = rcache_get_vreg_arg(arg);
  1832. cache_regs[x].type = HR_TEMP;
  1833. rcache_lock_vreg(x);
  1834. return cache_regs[x].hreg;
  1835. }
  1836. // ... as return value after a call
  1837. static int rcache_get_tmp_ret(void)
  1838. {
  1839. int x = rcache_get_vreg_hr(RET_REG);
  1840. cache_regs[x].type = HR_TEMP;
  1841. rcache_lock_vreg(x);
  1842. return cache_regs[x].hreg;
  1843. }
  1844. // same but caches a reg if access is readonly (announced by hr being NULL)
  1845. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1846. {
  1847. int i, srcr, dstr, dstid, keep;
  1848. u32 val;
  1849. host_arg2reg(dstr, arg);
  1850. i = guest_regs[r].vreg;
  1851. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1852. // r is already in arg, avoid evicting
  1853. dstid = i;
  1854. else
  1855. dstid = rcache_get_vreg_arg(arg);
  1856. dstr = cache_regs[dstid].hreg;
  1857. if (rcache_is_cached(r)) {
  1858. // r is needed later on anyway
  1859. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1860. keep = 1;
  1861. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1862. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1863. srcr = dstr;
  1864. emith_move_r_imm(srcr, val);
  1865. keep = 0;
  1866. } else {
  1867. // must read from ctx
  1868. srcr = dstr;
  1869. emith_ctx_read(srcr, r * 4);
  1870. keep = 1;
  1871. }
  1872. if (cache_regs[dstid].type == HR_CACHED)
  1873. rcache_evict_vreg(dstid);
  1874. cache_regs[dstid].type = HR_TEMP;
  1875. if (hr == NULL) {
  1876. if (dstr != srcr)
  1877. // arg is a copy of cached r
  1878. emith_move_r_r(dstr, srcr);
  1879. else if (keep && guest_regs[r].vreg < 0)
  1880. // keep arg as vreg for r
  1881. rcache_add_vreg_alias(dstid, r);
  1882. } else {
  1883. *hr = srcr;
  1884. if (dstr != srcr) // must lock srcr if not copied here
  1885. rcache_lock_vreg(reg_map_host[srcr]);
  1886. }
  1887. cache_regs[dstid].stamp = ++rcache_counter;
  1888. rcache_lock_vreg(dstid);
  1889. #if DRC_DEBUG & 64
  1890. RCACHE_CHECK("after getarg");
  1891. #endif
  1892. return dstr;
  1893. }
  1894. static void rcache_free_tmp(int hr)
  1895. {
  1896. int i = reg_map_host[hr];
  1897. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1898. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1899. exit(1);
  1900. }
  1901. rcache_unlock_vreg(i);
  1902. }
  1903. // saves temporary result either in REG or in drctmp
  1904. static int rcache_save_tmp(int hr)
  1905. {
  1906. int i;
  1907. // find REG, either free or unlocked temp or oldest non-hinted cached
  1908. i = rcache_allocate_nontemp();
  1909. if (i < 0) {
  1910. // if none is available, store in drctmp
  1911. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1912. rcache_free_tmp(hr);
  1913. return -1;
  1914. }
  1915. cache_regs[i].type = HR_CACHED;
  1916. cache_regs[i].gregs = 0; // not storing any guest register
  1917. cache_regs[i].flags &= HRF_PINNED;
  1918. cache_regs[i].locked = 0;
  1919. cache_regs[i].stamp = ++rcache_counter;
  1920. rcache_lock_vreg(i);
  1921. emith_move_r_r(cache_regs[i].hreg, hr);
  1922. rcache_free_tmp(hr);
  1923. return i;
  1924. }
  1925. static int rcache_restore_tmp(int x)
  1926. {
  1927. int hr;
  1928. // find REG with tmp store: cached but with no gregs
  1929. if (x >= 0) {
  1930. if (cache_regs[x].type != HR_CACHED || cache_regs[x].gregs) {
  1931. printf("invalid tmp storage %d\n", x);
  1932. exit(1);
  1933. }
  1934. // found, transform to a TEMP
  1935. cache_regs[x].type = HR_TEMP;
  1936. return cache_regs[x].hreg;
  1937. }
  1938. // if not available, create a TEMP store and fetch from drctmp
  1939. hr = rcache_get_tmp();
  1940. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1941. return hr;
  1942. }
  1943. static void rcache_free(int hr)
  1944. {
  1945. int x = reg_map_host[hr];
  1946. rcache_unlock_vreg(x);
  1947. }
  1948. static void rcache_unlock(int x)
  1949. {
  1950. if (x >= 0)
  1951. cache_regs[x].locked = 0;
  1952. }
  1953. static void rcache_unlock_all(void)
  1954. {
  1955. int i;
  1956. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1957. cache_regs[i].locked = 0;
  1958. }
  1959. static void rcache_unpin_all(void)
  1960. {
  1961. int i;
  1962. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1963. if (guest_regs[i].flags & GRF_PINNED) {
  1964. guest_regs[i].flags &= ~GRF_PINNED;
  1965. cache_regs[guest_regs[i].sreg].flags &= ~HRF_PINNED;
  1966. guest_regs[i].sreg = -1;
  1967. rcache_regs_pinned &= ~(1 << i);
  1968. }
  1969. }
  1970. #if DRC_DEBUG & 64
  1971. RCACHE_CHECK("after unpin");
  1972. #endif
  1973. }
  1974. static void rcache_save_pinned(void)
  1975. {
  1976. int i;
  1977. // save pinned regs to context
  1978. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1979. if ((guest_regs[i].flags & GRF_PINNED) && guest_regs[i].vreg >= 0)
  1980. emith_ctx_write(cache_regs[guest_regs[i].vreg].hreg, i * 4);
  1981. }
  1982. static inline void rcache_set_usage_now(u32 mask)
  1983. {
  1984. rcache_regs_now = mask;
  1985. }
  1986. static inline void rcache_set_usage_soon(u32 mask)
  1987. {
  1988. rcache_regs_soon = mask;
  1989. }
  1990. static inline void rcache_set_usage_late(u32 mask)
  1991. {
  1992. rcache_regs_late = mask;
  1993. }
  1994. static inline void rcache_set_usage_discard(u32 mask)
  1995. {
  1996. rcache_regs_discard = mask;
  1997. }
  1998. static inline int rcache_is_cached(sh2_reg_e r)
  1999. {
  2000. // is r in cache or needed RSN?
  2001. u32 rsc = rcache_regs_soon | rcache_regs_clean;
  2002. return (guest_regs[r].vreg >= 0 || (rsc & (1 << r)));
  2003. }
  2004. static inline int rcache_is_hreg_used(int hr)
  2005. {
  2006. int x = reg_map_host[hr];
  2007. // is hr in use?
  2008. return cache_regs[x].type != HR_FREE &&
  2009. (cache_regs[x].type != HR_TEMP || cache_regs[x].locked);
  2010. }
  2011. static inline u32 rcache_used_hregs_mask(void)
  2012. {
  2013. u32 mask = 0;
  2014. int i;
  2015. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2016. if ((cache_regs[i].htype & HRT_TEMP) && cache_regs[i].type != HR_FREE &&
  2017. (cache_regs[i].type != HR_TEMP || cache_regs[i].locked))
  2018. mask |= 1 << cache_regs[i].hreg;
  2019. return mask;
  2020. }
  2021. static inline u32 rcache_dirty_mask(void)
  2022. {
  2023. u32 mask = 0;
  2024. int i;
  2025. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2026. if (guest_regs[i].flags & GRF_DIRTY)
  2027. mask |= 1 << i;
  2028. mask |= gconst_dirty_mask();
  2029. return mask;
  2030. }
  2031. static inline u32 rcache_cached_mask(void)
  2032. {
  2033. u32 mask = 0;
  2034. int i;
  2035. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2036. if (cache_regs[i].type == HR_CACHED)
  2037. mask |= cache_regs[i].gregs;
  2038. return mask;
  2039. }
  2040. static void rcache_clean_tmp(void)
  2041. {
  2042. int i;
  2043. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2044. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2045. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].htype & HRT_TEMP)) {
  2046. rcache_unlock(i);
  2047. #if REMAP_REGISTER
  2048. rcache_remap_vreg(i);
  2049. #else
  2050. rcache_clean_vreg(i);
  2051. #endif
  2052. }
  2053. rcache_regs_clean = 0;
  2054. }
  2055. static void rcache_clean_masked(u32 mask)
  2056. {
  2057. int i, r, hr;
  2058. u32 m;
  2059. rcache_regs_clean |= mask;
  2060. mask = rcache_regs_clean;
  2061. // clean constants where all aliases are covered by the mask, exempt statics
  2062. // to avoid flushing them to context if sreg isn't available
  2063. m = mask & ~(rcache_regs_static | rcache_regs_pinned);
  2064. for (i = 0; i < ARRAY_SIZE(gconsts); i++)
  2065. if ((gconsts[i].gregs & m) && !(gconsts[i].gregs & ~mask)) {
  2066. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, r,
  2067. if (guest_regs[r].flags & GRF_CDIRTY) {
  2068. hr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  2069. rcache_clean_vreg(reg_map_host[hr]);
  2070. break;
  2071. });
  2072. }
  2073. // clean vregs where all aliases are covered by the mask
  2074. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2075. if (cache_regs[i].type == HR_CACHED &&
  2076. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  2077. rcache_clean_vreg(i);
  2078. }
  2079. static void rcache_clean(void)
  2080. {
  2081. int i;
  2082. gconst_clean();
  2083. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2084. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  2085. if (cache_regs[i].type == HR_CACHED)
  2086. rcache_clean_vreg(i);
  2087. // relocate statics to their sregs (necessary before conditional jumps)
  2088. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2089. if ((guest_regs[i].flags & (GRF_STATIC|GRF_PINNED)) &&
  2090. guest_regs[i].vreg != guest_regs[i].sreg) {
  2091. rcache_lock_vreg(guest_regs[i].vreg);
  2092. rcache_evict_vreg(guest_regs[i].sreg);
  2093. rcache_unlock_vreg(guest_regs[i].vreg);
  2094. if (guest_regs[i].vreg < 0)
  2095. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  2096. else {
  2097. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  2098. cache_regs[guest_regs[i].vreg].hreg);
  2099. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  2100. }
  2101. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2102. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2103. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2104. guest_regs[i].flags |= GRF_DIRTY;
  2105. guest_regs[i].vreg = guest_regs[i].sreg;
  2106. }
  2107. }
  2108. rcache_regs_clean = 0;
  2109. }
  2110. static void rcache_invalidate_tmp(void)
  2111. {
  2112. int i;
  2113. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2114. if (cache_regs[i].htype & HRT_TEMP) {
  2115. rcache_unlock(i);
  2116. if (cache_regs[i].type == HR_CACHED)
  2117. rcache_evict_vreg(i);
  2118. else
  2119. rcache_free_vreg(i);
  2120. }
  2121. }
  2122. }
  2123. static void rcache_invalidate(void)
  2124. {
  2125. int i;
  2126. gconst_invalidate();
  2127. rcache_unlock_all();
  2128. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2129. rcache_free_vreg(i);
  2130. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2131. guest_regs[i].flags &= GRF_STATIC;
  2132. if (!(guest_regs[i].flags & GRF_STATIC))
  2133. guest_regs[i].vreg = -1;
  2134. else {
  2135. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2136. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2137. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2138. guest_regs[i].flags |= GRF_DIRTY;
  2139. guest_regs[i].vreg = guest_regs[i].sreg;
  2140. }
  2141. }
  2142. rcache_counter = 0;
  2143. rcache_regs_now = rcache_regs_soon = rcache_regs_late = 0;
  2144. rcache_regs_discard = rcache_regs_clean = 0;
  2145. }
  2146. static void rcache_flush(void)
  2147. {
  2148. rcache_clean();
  2149. rcache_invalidate();
  2150. }
  2151. static void rcache_create(void)
  2152. {
  2153. int x = 0, i;
  2154. // create cache_regs as host register representation
  2155. // RET_REG/params should be first TEMPs to avoid allocation conflicts in calls
  2156. cache_regs[x++] = (cache_reg_t) {.hreg = RET_REG, .htype = HRT_TEMP};
  2157. for (i = 0; i < ARRAY_SIZE(hregs_param); i++)
  2158. if (hregs_param[i] != RET_REG)
  2159. cache_regs[x++] = (cache_reg_t){.hreg = hregs_param[i],.htype = HRT_TEMP};
  2160. for (i = 0; i < ARRAY_SIZE(hregs_temp); i++)
  2161. if (hregs_temp[i] != RET_REG)
  2162. cache_regs[x++] = (cache_reg_t){.hreg = hregs_temp[i], .htype = HRT_TEMP};
  2163. for (i = ARRAY_SIZE(hregs_saved)-1; i >= 0; i--)
  2164. if (hregs_saved[i] != CONTEXT_REG)
  2165. cache_regs[x++] = (cache_reg_t){.hreg = hregs_saved[i], .htype = HRT_REG};
  2166. if (x != ARRAY_SIZE(cache_regs)) {
  2167. printf("rcache_create failed (conflicting register count)\n");
  2168. exit(1);
  2169. }
  2170. // mapping from host_register to cache regs index
  2171. memset(reg_map_host, -1, sizeof(reg_map_host));
  2172. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2173. if (cache_regs[i].htype)
  2174. reg_map_host[cache_regs[i].hreg] = i;
  2175. if (cache_regs[i].htype == HRT_REG)
  2176. rcache_vregs_reg |= (1 << i);
  2177. }
  2178. // create static host register mapping for SH2 regs
  2179. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2180. guest_regs[i] = (guest_reg_t){.sreg = -1};
  2181. }
  2182. for (i = 0; i < ARRAY_SIZE(regs_static); i += 2) {
  2183. for (x = ARRAY_SIZE(cache_regs)-1; x >= 0; x--)
  2184. if (cache_regs[x].hreg == regs_static[i+1]) break;
  2185. if (x >= 0) {
  2186. guest_regs[regs_static[i]] = (guest_reg_t){.flags = GRF_STATIC,.sreg = x};
  2187. rcache_regs_static |= (1 << regs_static[i]);
  2188. rcache_vregs_reg &= ~(1 << x);
  2189. }
  2190. }
  2191. printf("DRC registers created, %ld host regs (%d REG, %d STATIC, 1 CTX)\n",
  2192. CACHE_REGS+1L, count_bits(rcache_vregs_reg),count_bits(rcache_regs_static));
  2193. }
  2194. static void rcache_init(void)
  2195. {
  2196. // create DRC data structures
  2197. rcache_create();
  2198. rcache_invalidate();
  2199. #if DRC_DEBUG & 64
  2200. RCACHE_CHECK("after init");
  2201. #endif
  2202. }
  2203. // ---------------------------------------------------------------
  2204. // NB may return either REG or TEMP
  2205. static int emit_get_rbase_and_offs(SH2 *sh2, sh2_reg_e r, int rmode, u32 *offs)
  2206. {
  2207. uptr omask = emith_rw_offs_max(); // offset mask
  2208. u32 mask = 0;
  2209. u32 a;
  2210. int poffs;
  2211. int hr, hr2;
  2212. uptr la;
  2213. // is r constant and points to a memory region?
  2214. if (! gconst_get(r, &a))
  2215. return -1;
  2216. poffs = dr_ctx_get_mem_ptr(sh2, a, &mask);
  2217. if (poffs == -1)
  2218. return -1;
  2219. if (mask < 0x20000) {
  2220. // data array, BIOS, DRAM, can't safely access directly since host addr may
  2221. // change (BIOS,da code may run on either core, DRAM may be switched)
  2222. hr = rcache_get_tmp();
  2223. a = (a + *offs) & mask;
  2224. if (poffs == offsetof(SH2, p_da)) {
  2225. // access sh2->data_array directly
  2226. a += offsetof(SH2, data_array);
  2227. emith_add_r_r_ptr_imm(hr, CONTEXT_REG, a & ~omask);
  2228. } else {
  2229. emith_ctx_read_ptr(hr, poffs);
  2230. if (a & ~omask)
  2231. emith_add_r_r_ptr_imm(hr, hr, a & ~omask);
  2232. }
  2233. *offs = a & omask;
  2234. return hr;
  2235. }
  2236. // ROM, SDRAM. Host address should be mmapped to be equal to SH2 address.
  2237. la = (uptr)*(void **)((char *)sh2 + poffs);
  2238. // if r is in rcache or needed soon anyway, and offs is relative to region,
  2239. // and address translation fits in add_ptr_imm (s32), then use rcached const
  2240. if (la == (s32)la && !(*offs & ~mask) && rcache_is_cached(r)) {
  2241. u32 odd = a & 1; // need to fix odd address for correct byte addressing
  2242. la -= (s32)((a & ~mask) - *offs - odd); // diff between reg and memory
  2243. hr = hr2 = rcache_get_reg(r, rmode, NULL);
  2244. if ((la & ~omask) - odd) {
  2245. hr = rcache_get_tmp();
  2246. emith_add_r_r_ptr_imm(hr, hr2, (la & ~omask) - odd);
  2247. rcache_free(hr2);
  2248. }
  2249. *offs = (la & omask);
  2250. } else {
  2251. // known fixed host address
  2252. la += (a + *offs) & mask;
  2253. hr = rcache_get_tmp();
  2254. emith_move_r_ptr_imm(hr, la & ~omask);
  2255. *offs = la & omask;
  2256. }
  2257. return hr;
  2258. }
  2259. // read const data from const ROM address
  2260. static int emit_get_rom_data(SH2 *sh2, sh2_reg_e r, u32 offs, int size, u32 *val)
  2261. {
  2262. u32 a, mask;
  2263. *val = 0;
  2264. if (gconst_get(r, &a)) {
  2265. a += offs;
  2266. // check if rom is memory mapped (not bank switched), and address is in rom
  2267. if (dr_is_rom(a) && p32x_sh2_get_mem_ptr(a, &mask, sh2) == sh2->p_rom) {
  2268. switch (size & MF_SIZEMASK) {
  2269. case 0: *val = (s8)p32x_sh2_read8(a, sh2s); break; // 8
  2270. case 1: *val = (s16)p32x_sh2_read16(a, sh2s); break; // 16
  2271. case 2: *val = p32x_sh2_read32(a, sh2s); break; // 32
  2272. }
  2273. return 1;
  2274. }
  2275. }
  2276. return 0;
  2277. }
  2278. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  2279. {
  2280. #if PROPAGATE_CONSTANTS
  2281. gconst_new(dst, imm);
  2282. #else
  2283. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2284. emith_move_r_imm(hr, imm);
  2285. #endif
  2286. }
  2287. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  2288. {
  2289. if (gconst_check(src) || rcache_is_cached(src)) {
  2290. #if ALIAS_REGISTERS
  2291. rcache_alias_vreg(dst, src);
  2292. #else
  2293. int hr_s = rcache_get_reg(src, RC_GR_READ, NULL);
  2294. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2295. emith_move_r_r(hr_d, hr_s);
  2296. gconst_copy(dst, src);
  2297. #endif
  2298. } else {
  2299. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2300. emith_ctx_read(hr_d, src * 4);
  2301. }
  2302. }
  2303. static void emit_add_r_imm(sh2_reg_e r, u32 imm)
  2304. {
  2305. u32 val;
  2306. int isgc = gconst_get(r, &val);
  2307. int hr, hr2;
  2308. if (!isgc || rcache_is_cached(r)) {
  2309. // not constant, or r is already in cache
  2310. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2311. emith_add_r_r_imm(hr, hr2, imm);
  2312. rcache_free(hr2);
  2313. if (isgc)
  2314. gconst_set(r, val + imm);
  2315. } else
  2316. gconst_new(r, val + imm);
  2317. }
  2318. static void emit_sub_r_imm(sh2_reg_e r, u32 imm)
  2319. {
  2320. u32 val;
  2321. int isgc = gconst_get(r, &val);
  2322. int hr, hr2;
  2323. if (!isgc || rcache_is_cached(r)) {
  2324. // not constant, or r is already in cache
  2325. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2326. emith_sub_r_r_imm(hr, hr2, imm);
  2327. rcache_free(hr2);
  2328. if (isgc)
  2329. gconst_set(r, val - imm);
  2330. } else
  2331. gconst_new(r, val - imm);
  2332. }
  2333. static void emit_sync_t_to_sr(void)
  2334. {
  2335. // avoid reloading SR from context if there's nothing to do
  2336. if (emith_get_t_cond() >= 0) {
  2337. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2338. emith_sync_t(sr);
  2339. }
  2340. }
  2341. // rd = @(arg0)
  2342. static int emit_memhandler_read(int size)
  2343. {
  2344. emit_sync_t_to_sr();
  2345. rcache_clean_tmp();
  2346. #ifndef DRC_SR_REG
  2347. // must writeback cycles for poll detection stuff
  2348. if (guest_regs[SHR_SR].vreg != -1)
  2349. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2350. #endif
  2351. rcache_invalidate_tmp();
  2352. if (size & MF_POLLING)
  2353. switch (size & MF_SIZEMASK) {
  2354. case 0: emith_call(sh2_drc_read8_poll); break; // 8
  2355. case 1: emith_call(sh2_drc_read16_poll); break; // 16
  2356. case 2: emith_call(sh2_drc_read32_poll); break; // 32
  2357. }
  2358. else
  2359. switch (size & MF_SIZEMASK) {
  2360. case 0: emith_call(sh2_drc_read8); break; // 8
  2361. case 1: emith_call(sh2_drc_read16); break; // 16
  2362. case 2: emith_call(sh2_drc_read32); break; // 32
  2363. }
  2364. return rcache_get_tmp_ret();
  2365. }
  2366. // @(arg0) = arg1
  2367. static void emit_memhandler_write(int size)
  2368. {
  2369. emit_sync_t_to_sr();
  2370. rcache_clean_tmp();
  2371. #ifndef DRC_SR_REG
  2372. if (guest_regs[SHR_SR].vreg != -1)
  2373. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2374. #endif
  2375. rcache_invalidate_tmp();
  2376. switch (size & MF_SIZEMASK) {
  2377. case 0: emith_call(sh2_drc_write8); break; // 8
  2378. case 1: emith_call(sh2_drc_write16); break; // 16
  2379. case 2: emith_call(sh2_drc_write32); break; // 32
  2380. }
  2381. }
  2382. // rd = @(Rs,#offs); rd < 0 -> return a temp
  2383. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2384. {
  2385. int hr, hr2;
  2386. u32 val;
  2387. #if PROPAGATE_CONSTANTS
  2388. if (emit_get_rom_data(sh2, rs, offs, size, &val)) {
  2389. if (rd == SHR_TMP) {
  2390. hr2 = rcache_get_tmp();
  2391. emith_move_r_imm(hr2, val);
  2392. } else {
  2393. emit_move_r_imm32(rd, val);
  2394. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  2395. }
  2396. if (size & MF_POSTINCR)
  2397. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2398. return hr2;
  2399. }
  2400. val = size & MF_POSTINCR;
  2401. hr = emit_get_rbase_and_offs(sh2, rs, val ? RC_GR_RMW : RC_GR_READ, &offs);
  2402. if (hr != -1) {
  2403. if (rd == SHR_TMP)
  2404. hr2 = rcache_get_tmp();
  2405. else
  2406. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2407. switch (size & MF_SIZEMASK) {
  2408. case 0: emith_read8s_r_r_offs(hr2, hr, offs ^ 1); break; // 8
  2409. case 1: emith_read16s_r_r_offs(hr2, hr, offs); break; // 16
  2410. case 2: emith_read_r_r_offs(hr2, hr, offs); emith_ror(hr2, hr2, 16); break;
  2411. }
  2412. rcache_free(hr);
  2413. if (size & MF_POSTINCR)
  2414. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2415. return hr2;
  2416. }
  2417. #endif
  2418. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2419. hr = rcache_get_tmp_arg(0);
  2420. emith_move_r_imm(hr, val + offs);
  2421. if (size & MF_POSTINCR)
  2422. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  2423. } else if (size & MF_POSTINCR) {
  2424. hr = rcache_get_tmp_arg(0);
  2425. hr2 = rcache_get_reg(rs, RC_GR_RMW, NULL);
  2426. emith_add_r_r_imm(hr, hr2, offs);
  2427. emith_add_r_imm(hr2, 1 << (size & MF_SIZEMASK));
  2428. if (gconst_get(rs, &val))
  2429. gconst_set(rs, val + (1 << (size & MF_SIZEMASK)));
  2430. } else {
  2431. hr = rcache_get_reg_arg(0, rs, &hr2);
  2432. if (offs || hr != hr2)
  2433. emith_add_r_r_imm(hr, hr2, offs);
  2434. }
  2435. hr = emit_memhandler_read(size);
  2436. size &= MF_SIZEMASK;
  2437. if (rd == SHR_TMP)
  2438. hr2 = hr;
  2439. else
  2440. #if REMAP_REGISTER
  2441. hr2 = rcache_map_reg(rd, hr, RC_GR_WRITE);
  2442. #else
  2443. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2444. #endif
  2445. if (hr != hr2) {
  2446. emith_move_r_r(hr2, hr);
  2447. rcache_free_tmp(hr);
  2448. }
  2449. return hr2;
  2450. }
  2451. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  2452. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2453. {
  2454. int hr, hr2;
  2455. u32 val;
  2456. if (rd == SHR_TMP) {
  2457. host_arg2reg(hr2, 1); // already locked and prepared by caller
  2458. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  2459. hr2 = rcache_get_reg_arg(1, rd, &hr);
  2460. if (hr != hr2) {
  2461. emith_move_r_r(hr2, hr);
  2462. rcache_free(hr2);
  2463. }
  2464. } else
  2465. hr2 = rcache_get_reg_arg(1, rd, NULL);
  2466. if (rd != SHR_TMP)
  2467. rcache_unlock(guest_regs[rd].vreg); // unlock in case rd is in arg0
  2468. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2469. hr = rcache_get_tmp_arg(0);
  2470. if (size & MF_PREDECR) {
  2471. val -= 1 << (size & MF_SIZEMASK);
  2472. gconst_new(rs, val);
  2473. }
  2474. emith_move_r_imm(hr, val + offs);
  2475. } else if (offs || (size & MF_PREDECR)) {
  2476. if (size & MF_PREDECR)
  2477. emit_sub_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2478. rcache_unlock(guest_regs[rs].vreg); // unlock in case rs is in arg0
  2479. hr = rcache_get_reg_arg(0, rs, &hr2);
  2480. if (offs || hr != hr2)
  2481. emith_add_r_r_imm(hr, hr2, offs);
  2482. } else
  2483. hr = rcache_get_reg_arg(0, rs, NULL);
  2484. emit_memhandler_write(size);
  2485. }
  2486. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2487. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2488. {
  2489. int hr, hr2;
  2490. int tx, ty;
  2491. #if PROPAGATE_CONSTANTS
  2492. u32 offs;
  2493. // if offs is larger than 0x01000000, it's most probably the base address part
  2494. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2495. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2496. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2497. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2498. #endif
  2499. hr = rcache_get_reg_arg(0, rx, &tx);
  2500. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2501. emith_add_r_r_r(hr, tx, ty);
  2502. hr = emit_memhandler_read(size);
  2503. size &= MF_SIZEMASK;
  2504. if (rd == SHR_TMP)
  2505. hr2 = hr;
  2506. else
  2507. #if REMAP_REGISTER
  2508. hr2 = rcache_map_reg(rd, hr, RC_GR_WRITE);
  2509. #else
  2510. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2511. #endif
  2512. if (hr != hr2) {
  2513. emith_move_r_r(hr2, hr);
  2514. rcache_free_tmp(hr);
  2515. }
  2516. return hr2;
  2517. }
  2518. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2519. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2520. {
  2521. int hr, tx, ty;
  2522. #if PROPAGATE_CONSTANTS
  2523. u32 offs;
  2524. // if offs is larger than 0x01000000, it's most probably the base address part
  2525. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2526. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2527. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2528. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2529. #endif
  2530. if (rd != SHR_TMP)
  2531. rcache_get_reg_arg(1, rd, NULL);
  2532. hr = rcache_get_reg_arg(0, rx, &tx);
  2533. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2534. emith_add_r_r_r(hr, tx, ty);
  2535. emit_memhandler_write(size);
  2536. }
  2537. // @Rn+,@Rm+
  2538. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2539. {
  2540. int tmp;
  2541. // unlock rn, rm here to avoid REG shortage in MAC operation
  2542. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2543. rcache_unlock(guest_regs[rn].vreg);
  2544. tmp = rcache_save_tmp(tmp);
  2545. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2546. rcache_unlock(guest_regs[rm].vreg);
  2547. *rnr = rcache_restore_tmp(tmp);
  2548. }
  2549. static void emit_do_static_regs(int is_write, int tmpr)
  2550. {
  2551. int i, r, count;
  2552. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2553. if (guest_regs[i].flags & (GRF_STATIC|GRF_PINNED))
  2554. r = cache_regs[guest_regs[i].vreg].hreg;
  2555. else
  2556. continue;
  2557. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2558. if ((guest_regs[i + 1].flags & (GRF_STATIC|GRF_PINNED)) &&
  2559. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2560. count++;
  2561. else
  2562. break;
  2563. }
  2564. if (count > 1) {
  2565. // i, r point to last item
  2566. if (is_write)
  2567. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2568. else
  2569. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2570. } else {
  2571. if (is_write)
  2572. emith_ctx_write(r, i * 4);
  2573. else
  2574. emith_ctx_read(r, i * 4);
  2575. }
  2576. }
  2577. }
  2578. // block local link stuff
  2579. struct linkage {
  2580. u32 pc;
  2581. void *ptr;
  2582. struct block_link *bl;
  2583. u32 mask;
  2584. };
  2585. static inline int find_in_linkage(const struct linkage *array, int size, u32 pc)
  2586. {
  2587. size_t i;
  2588. for (i = 0; i < size; i++)
  2589. if (pc == array[i].pc)
  2590. return i;
  2591. return -1;
  2592. }
  2593. static int find_in_sorted_linkage(const struct linkage *array, int size, u32 pc)
  2594. {
  2595. // binary search in sorted array
  2596. int left = 0, right = size-1;
  2597. while (left <= right)
  2598. {
  2599. int middle = (left + right) / 2;
  2600. if (array[middle].pc == pc)
  2601. return middle;
  2602. else if (array[middle].pc < pc)
  2603. left = middle + 1;
  2604. else
  2605. right = middle - 1;
  2606. }
  2607. return -1;
  2608. }
  2609. static void emit_branch_linkage_code(SH2 *sh2, struct block_desc *block, int tcache_id,
  2610. const struct linkage *targets, int target_count,
  2611. const struct linkage *links, int link_count)
  2612. {
  2613. struct block_link *bl;
  2614. int u, v, tmp;
  2615. emith_flush();
  2616. for (u = 0; u < link_count; u++) {
  2617. emith_pool_check();
  2618. // look up local branch targets
  2619. if (links[u].mask & 0x2) {
  2620. v = find_in_sorted_linkage(targets, target_count, links[u].pc);
  2621. if (v < 0 || ! targets[v].ptr) {
  2622. // forward branch not yet resolved, prepare external linking
  2623. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2624. bl = dr_prepare_ext_branch(block->entryp, links[u].pc, sh2->is_slave, tcache_id);
  2625. if (bl)
  2626. bl->type = BL_LDJMP;
  2627. tmp = rcache_get_tmp_arg(0);
  2628. emith_move_r_imm(tmp, links[u].pc);
  2629. rcache_free_tmp(tmp);
  2630. emith_jump_patchable(sh2_drc_dispatcher);
  2631. } else if (emith_jump_patch_inrange(links[u].ptr, targets[v].ptr)) {
  2632. // inrange local branch
  2633. emith_jump_patch(links[u].ptr, targets[v].ptr, NULL);
  2634. } else {
  2635. // far local branch
  2636. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2637. emith_jump(targets[v].ptr);
  2638. }
  2639. } else {
  2640. // external or exit, emit blx area entry
  2641. void *target = (links[u].mask & 0x1 ? sh2_drc_exit : sh2_drc_dispatcher);
  2642. if (links[u].bl)
  2643. links[u].bl->blx = tcache_ptr;
  2644. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2645. tmp = rcache_get_tmp_arg(0);
  2646. emith_move_r_imm(tmp, links[u].pc & ~1);
  2647. rcache_free_tmp(tmp);
  2648. emith_jump(target);
  2649. }
  2650. }
  2651. }
  2652. #define DELAY_SAVE_T(sr) { \
  2653. int t_ = rcache_get_tmp(); \
  2654. emith_bic_r_imm(sr, T_save); \
  2655. emith_and_r_r_imm(t_, sr, 1); \
  2656. emith_or_r_r_lsl(sr, t_, T_SHIFT); \
  2657. rcache_free_tmp(t_); \
  2658. }
  2659. #define FLUSH_CYCLES(sr) \
  2660. if (cycles > 0) { \
  2661. emith_sub_r_imm(sr, cycles << 12); \
  2662. cycles = 0; \
  2663. }
  2664. static void *dr_get_pc_base(u32 pc, SH2 *sh2);
  2665. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2666. {
  2667. // branch targets in current block
  2668. static struct linkage branch_targets[MAX_LOCAL_TARGETS];
  2669. int branch_target_count = 0;
  2670. // unresolved local or external targets with block link/exit area if needed
  2671. static struct linkage blx_targets[MAX_LOCAL_BRANCHES];
  2672. int blx_target_count = 0;
  2673. static u8 op_flags[BLOCK_INSN_LIMIT];
  2674. enum flg_states { FLG_UNKNOWN, FLG_UNUSED, FLG_0, FLG_1 };
  2675. struct drcf {
  2676. int delay_reg:8;
  2677. u32 loop_type:8;
  2678. u32 polling:8;
  2679. u32 pinning:1;
  2680. u32 test_irq:1;
  2681. u32 pending_branch_direct:1;
  2682. u32 pending_branch_indirect:1;
  2683. u32 Tflag:2, Mflag:2;
  2684. } drcf = { 0, };
  2685. #if LOOP_OPTIMIZER
  2686. // loops with pinned registers for optimzation
  2687. // pinned regs are like statics and don't need saving/restoring inside a loop
  2688. static struct linkage pinned_loops[MAX_LOCAL_TARGETS/16];
  2689. int pinned_loop_count = 0;
  2690. #endif
  2691. // PC of current, first, last SH2 insn
  2692. u32 pc, base_pc, end_pc;
  2693. u32 base_literals, end_literals;
  2694. u8 *block_entry_ptr;
  2695. struct block_desc *block;
  2696. struct block_entry *entry;
  2697. struct block_link *bl;
  2698. u16 *dr_pc_base;
  2699. struct op_data *opd;
  2700. int blkid_main = 0;
  2701. int tmp, tmp2;
  2702. int cycles;
  2703. int i, v;
  2704. u32 u, m1, m2, m3, m4;
  2705. int op;
  2706. u16 crc;
  2707. base_pc = sh2->pc;
  2708. // get base/validate PC
  2709. dr_pc_base = dr_get_pc_base(base_pc, sh2);
  2710. if (dr_pc_base == (void *)-1) {
  2711. printf("invalid PC, aborting: %08x\n", base_pc);
  2712. // FIXME: be less destructive
  2713. exit(1);
  2714. }
  2715. // initial passes to disassemble and analyze the block
  2716. crc = scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2717. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2718. if (base_literals == end_literals) // map empty lit section to end of code
  2719. base_literals = end_literals = end_pc;
  2720. // if there is already a translated but inactive block, reuse it
  2721. block = dr_find_inactive_block(tcache_id, crc, base_pc, end_pc - base_pc,
  2722. base_literals, end_literals - base_literals);
  2723. if (block) {
  2724. dbg(2, "== %csh2 reuse block %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2725. base_pc, end_pc, base_literals, end_literals, block->entryp->tcache_ptr);
  2726. dr_activate_block(block, tcache_id, sh2->is_slave);
  2727. emith_update_cache();
  2728. return block->entryp[0].tcache_ptr;
  2729. }
  2730. // collect branch_targets that don't land on delay slots
  2731. m1 = m2 = m3 = m4 = v = op = 0;
  2732. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2733. if (op_flags[i] & OF_DELAY_OP)
  2734. op_flags[i] &= ~OF_BTARGET;
  2735. if (op_flags[i] & OF_BTARGET) {
  2736. if (branch_target_count < ARRAY_SIZE(branch_targets))
  2737. branch_targets[branch_target_count++] = (struct linkage) { .pc = pc };
  2738. else {
  2739. printf("warning: linkage overflow\n");
  2740. end_pc = pc;
  2741. break;
  2742. }
  2743. }
  2744. if (ops[i].op == OP_LDC && (ops[i].dest & BITMASK1(SHR_SR)) && pc+2 < end_pc)
  2745. op_flags[i+1] |= OF_BTARGET; // RTE entrypoint in case of SR.IMASK change
  2746. // unify T and SR since rcache doesn't know about "virtual" guest regs
  2747. if (ops[i].source & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2748. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2749. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].dest |= BITMASK1(SHR_SR);
  2750. #if LOOP_DETECTION
  2751. // loop types detected:
  2752. // 1. target: ... BRA target -> idle loop
  2753. // 2. target: ... delay insn ... BF target -> delay loop
  2754. // 3. target: ... poll insn ... BF/BT target -> poll loop
  2755. // 4. target: ... poll insn ... BF/BT exit ... BRA target, exit: -> poll
  2756. // conditions:
  2757. // a. no further branch targets between target and back jump.
  2758. // b. no unconditional branch insn inside the loop.
  2759. // c. exactly one poll or delay insn is allowed inside a delay/poll loop
  2760. // (scan_block marks loops only if they meet conditions a through c)
  2761. // d. idle loops do not modify anything but PC,SR and contain no branches
  2762. // e. delay/poll loops do not modify anything but the concerned reg,PC,SR
  2763. // f. loading constants into registers inside the loop is allowed
  2764. // g. a delay/poll loop must have a conditional branch somewhere
  2765. // h. an idle loop must not have a conditional branch
  2766. if (op_flags[i] & OF_BTARGET) {
  2767. // possible loop entry point
  2768. drcf.loop_type = op_flags[i] & OF_LOOP;
  2769. drcf.pending_branch_direct = drcf.pending_branch_indirect = 0;
  2770. op = OF_IDLE_LOOP; // loop type
  2771. v = i;
  2772. m1 = m2 = m3 = m4 = 0;
  2773. if (!drcf.loop_type) // reset basic loop it it isn't recognized as loop
  2774. op_flags[i] &= ~OF_BASIC_LOOP;
  2775. }
  2776. if (drcf.loop_type) {
  2777. // calculate reg masks for loop pinning
  2778. m4 |= ops[i].source & ~m3;
  2779. m3 |= ops[i].dest;
  2780. // detect loop type, and store poll/delay register
  2781. if (op_flags[i] & OF_POLL_INSN) {
  2782. op = OF_POLL_LOOP;
  2783. m1 |= ops[i].dest; // loop poll/delay regs
  2784. } else if (op_flags[i] & OF_DELAY_INSN) {
  2785. op = OF_DELAY_LOOP;
  2786. m1 |= ops[i].dest;
  2787. } else if (ops[i].op != OP_LOAD_POOL && ops[i].op != OP_LOAD_CONST
  2788. && (ops[i].op != OP_MOVE || op != OF_POLL_LOOP)) {
  2789. // not (MOV @(PC) or MOV # or (MOV reg and poll)), condition f
  2790. m2 |= ops[i].dest; // regs modified by other insns
  2791. }
  2792. // branch detector
  2793. if (OP_ISBRAIMM(ops[i].op)) {
  2794. if (ops[i].imm == base_pc + 2*v)
  2795. drcf.pending_branch_direct = 1; // backward branch detected
  2796. else
  2797. op_flags[v] &= ~OF_BASIC_LOOP; // no basic loop
  2798. }
  2799. if (OP_ISBRACND(ops[i].op))
  2800. drcf.pending_branch_indirect = 1; // conditions g,h - cond.branch
  2801. // poll/idle loops terminate with their backwards branch to the loop start
  2802. if (drcf.pending_branch_direct && !(op_flags[i+1] & OF_DELAY_OP)) {
  2803. m2 &= ~(m1 | BITMASK3(SHR_PC, SHR_SR, SHR_T)); // conditions d,e + g,h
  2804. if (m2 || ((op == OF_IDLE_LOOP) == (drcf.pending_branch_indirect)))
  2805. op = 0; // conditions not met
  2806. op_flags[v] = (op_flags[v] & ~OF_LOOP) | op; // set loop type
  2807. drcf.loop_type = 0;
  2808. #if LOOP_OPTIMIZER
  2809. if (op_flags[v] & OF_BASIC_LOOP) {
  2810. m3 &= ~rcache_regs_static & ~BITMASK5(SHR_PC, SHR_PR, SHR_SR, SHR_T, SHR_MEM);
  2811. if (m3 && count_bits(m3) < count_bits(rcache_vregs_reg) &&
  2812. pinned_loop_count < ARRAY_SIZE(pinned_loops)-1) {
  2813. pinned_loops[pinned_loop_count++] =
  2814. (struct linkage) { .pc = base_pc + 2*v, .mask = m3 };
  2815. } else
  2816. op_flags[v] &= ~OF_BASIC_LOOP;
  2817. }
  2818. #endif
  2819. }
  2820. }
  2821. #endif
  2822. }
  2823. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2, branch_target_count);
  2824. #if (DRC_DEBUG & 4)
  2825. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  2826. #endif
  2827. block = dr_add_block(branch_target_count, base_pc, end_pc - base_pc,
  2828. base_literals, end_literals-base_literals, crc, sh2->is_slave, &blkid_main);
  2829. if (block == NULL)
  2830. return NULL;
  2831. block_entry_ptr = tcache_ptr;
  2832. dbg(2, "== %csh2 block #%d,%d %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2833. tcache_id, blkid_main, base_pc, end_pc, base_literals, end_literals, block_entry_ptr);
  2834. // clear stale state after compile errors
  2835. rcache_invalidate();
  2836. emith_invalidate_t();
  2837. drcf = (struct drcf) { 0 };
  2838. #if LOOP_OPTIMIZER
  2839. pinned_loops[pinned_loop_count].pc = -1;
  2840. pinned_loop_count = 0;
  2841. #endif
  2842. // -------------------------------------------------
  2843. // 3rd pass: actual compilation
  2844. pc = base_pc;
  2845. cycles = 0;
  2846. for (i = 0; pc < end_pc; i++)
  2847. {
  2848. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  2849. int tmp3, tmp4;
  2850. int sr;
  2851. if (op_flags[i] & OF_BTARGET)
  2852. {
  2853. if (pc != base_pc)
  2854. {
  2855. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2856. FLUSH_CYCLES(sr);
  2857. emith_sync_t(sr);
  2858. drcf.Mflag = FLG_UNKNOWN;
  2859. rcache_flush();
  2860. emith_flush();
  2861. }
  2862. // make block entry
  2863. v = block->entry_count;
  2864. entry = &block->entryp[v];
  2865. if (v < branch_target_count)
  2866. {
  2867. entry = &block->entryp[v];
  2868. entry->pc = pc;
  2869. entry->tcache_ptr = tcache_ptr;
  2870. entry->links = entry->o_links = NULL;
  2871. #if (DRC_DEBUG & 2)
  2872. entry->block = block;
  2873. #endif
  2874. block->entry_count++;
  2875. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  2876. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  2877. pc, tcache_ptr);
  2878. }
  2879. else {
  2880. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  2881. tcache_id, blkid_main, pc);
  2882. break;
  2883. }
  2884. v = find_in_sorted_linkage(branch_targets, branch_target_count, pc);
  2885. if (v >= 0)
  2886. branch_targets[v].ptr = tcache_ptr;
  2887. #if LOOP_DETECTION
  2888. drcf.loop_type = op_flags[i] & OF_LOOP;
  2889. drcf.delay_reg = -1;
  2890. drcf.polling = (drcf.loop_type == OF_POLL_LOOP ? MF_POLLING : 0);
  2891. #endif
  2892. rcache_clean();
  2893. #if (DRC_DEBUG & 0x10)
  2894. tmp = rcache_get_tmp_arg(0);
  2895. emith_move_r_imm(tmp, pc);
  2896. tmp = emit_memhandler_read(1);
  2897. tmp2 = rcache_get_tmp();
  2898. tmp3 = rcache_get_tmp();
  2899. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  2900. emith_move_r_imm(tmp3, 0);
  2901. emith_cmp_r_r(tmp, tmp2);
  2902. EMITH_SJMP_START(DCOND_EQ);
  2903. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  2904. EMITH_SJMP_END(DCOND_EQ);
  2905. rcache_free_tmp(tmp);
  2906. rcache_free_tmp(tmp2);
  2907. rcache_free_tmp(tmp3);
  2908. #endif
  2909. // check cycles
  2910. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2911. #if LOOP_OPTIMIZER
  2912. if (op_flags[i] & OF_BASIC_LOOP) {
  2913. if (pinned_loops[pinned_loop_count].pc == pc) {
  2914. // pin needed regs on loop entry
  2915. FOR_ALL_BITS_SET_DO(pinned_loops[pinned_loop_count].mask, v, rcache_pin_reg(v));
  2916. emith_flush();
  2917. // store current PC as loop target
  2918. pinned_loops[pinned_loop_count].ptr = tcache_ptr;
  2919. drcf.pinning = 1;
  2920. } else
  2921. op_flags[i] &= ~OF_BASIC_LOOP;
  2922. }
  2923. if (op_flags[i] & OF_BASIC_LOOP) {
  2924. // if exiting a pinned loop pinned regs must be written back to ctx
  2925. // since they are reloaded in the loop entry code
  2926. emith_cmp_r_imm(sr, 0);
  2927. EMITH_JMP_START(DCOND_GT);
  2928. rcache_save_pinned();
  2929. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  2930. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  2931. blx_targets[blx_target_count++] =
  2932. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  2933. emith_jump_patchable(tcache_ptr);
  2934. } else {
  2935. // blx table full, must inline exit code
  2936. tmp = rcache_get_tmp_arg(0);
  2937. emith_move_r_imm(tmp, pc);
  2938. emith_jump(sh2_drc_exit);
  2939. rcache_free_tmp(tmp);
  2940. }
  2941. EMITH_JMP_END(DCOND_GT);
  2942. } else
  2943. #endif
  2944. {
  2945. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  2946. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  2947. emith_cmp_r_imm(sr, 0);
  2948. blx_targets[blx_target_count++] =
  2949. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  2950. emith_jump_cond_patchable(DCOND_LE, tcache_ptr);
  2951. } else {
  2952. // blx table full, must inline exit code
  2953. tmp = rcache_get_tmp_arg(0);
  2954. emith_cmp_r_imm(sr, 0);
  2955. EMITH_SJMP_START(DCOND_GT);
  2956. emith_move_r_imm_c(DCOND_LE, tmp, pc);
  2957. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  2958. EMITH_SJMP_END(DCOND_GT);
  2959. rcache_free_tmp(tmp);
  2960. }
  2961. }
  2962. #if (DRC_DEBUG & 32)
  2963. // block hit counter
  2964. tmp = rcache_get_tmp_arg(0);
  2965. tmp2 = rcache_get_tmp_arg(1);
  2966. emith_move_r_ptr_imm(tmp, (uptr)entry);
  2967. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  2968. emith_add_r_imm(tmp2, 1);
  2969. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  2970. rcache_free_tmp(tmp);
  2971. rcache_free_tmp(tmp2);
  2972. #endif
  2973. #if (DRC_DEBUG & (8|256|512|1024))
  2974. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2975. emith_sync_t(sr);
  2976. rcache_clean();
  2977. tmp = rcache_used_hregs_mask();
  2978. emith_save_caller_regs(tmp);
  2979. emit_do_static_regs(1, 0);
  2980. rcache_get_reg_arg(2, SHR_SR, NULL);
  2981. tmp2 = rcache_get_tmp_arg(0);
  2982. tmp3 = rcache_get_tmp_arg(1);
  2983. tmp4 = rcache_get_tmp();
  2984. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  2985. emith_move_r_r_ptr(tmp3, CONTEXT_REG);
  2986. emith_move_r_imm(tmp4, pc);
  2987. emith_ctx_write(tmp4, SHR_PC * 4);
  2988. rcache_invalidate_tmp();
  2989. emith_call(sh2_drc_log_entry);
  2990. emith_restore_caller_regs(tmp);
  2991. #endif
  2992. do_host_disasm(tcache_id);
  2993. rcache_unlock_all();
  2994. }
  2995. #ifdef DRC_CMP
  2996. if (!(op_flags[i] & OF_DELAY_OP)) {
  2997. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2998. FLUSH_CYCLES(sr);
  2999. emith_sync_t(sr);
  3000. emit_move_r_imm32(SHR_PC, pc);
  3001. rcache_clean();
  3002. tmp = rcache_used_hregs_mask();
  3003. emith_save_caller_regs(tmp);
  3004. emit_do_static_regs(1, 0);
  3005. emith_pass_arg_r(0, CONTEXT_REG);
  3006. emith_call(do_sh2_cmp);
  3007. emith_restore_caller_regs(tmp);
  3008. }
  3009. #endif
  3010. // emit blx area if limits are approached
  3011. if (blx_target_count && (blx_target_count > ARRAY_SIZE(blx_targets)-4 ||
  3012. !emith_jump_patch_inrange(blx_targets[0].ptr, tcache_ptr+0x100))) {
  3013. u8 *jp;
  3014. rcache_invalidate_tmp();
  3015. jp = tcache_ptr;
  3016. emith_jump_patchable(tcache_ptr);
  3017. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  3018. branch_target_count, blx_targets, blx_target_count);
  3019. blx_target_count = 0;
  3020. do_host_disasm(tcache_id);
  3021. emith_jump_patch(jp, tcache_ptr, NULL);
  3022. }
  3023. emith_pool_check();
  3024. opd = &ops[i];
  3025. op = FETCH_OP(pc);
  3026. #if (DRC_DEBUG & 4)
  3027. DasmSH2(sh2dasm_buff, pc, op);
  3028. if (op_flags[i] & OF_BTARGET) {
  3029. if ((op_flags[i] & OF_LOOP) == OF_DELAY_LOOP) tmp3 = '+';
  3030. else if ((op_flags[i] & OF_LOOP) == OF_POLL_LOOP) tmp3 = '=';
  3031. else if ((op_flags[i] & OF_LOOP) == OF_IDLE_LOOP) tmp3 = '~';
  3032. else tmp3 = '*';
  3033. } else if (drcf.loop_type) tmp3 = '.';
  3034. else tmp3 = ' ';
  3035. printf("%c%08x %04x %s\n", tmp3, pc, op, sh2dasm_buff);
  3036. #endif
  3037. pc += 2;
  3038. #if (DRC_DEBUG & 2)
  3039. insns_compiled++;
  3040. #endif
  3041. if (op_flags[i] & OF_DELAY_OP)
  3042. {
  3043. // handle delay slot dependencies
  3044. delay_dep_fw = opd->dest & ops[i-1].source;
  3045. delay_dep_bk = opd->source & ops[i-1].dest;
  3046. if (delay_dep_fw & BITMASK1(SHR_T)) {
  3047. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3048. emith_sync_t(sr);
  3049. DELAY_SAVE_T(sr);
  3050. }
  3051. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  3052. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  3053. // can only be those 2 really..
  3054. elprintf_sh2(sh2, EL_ANOMALY,
  3055. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  3056. }
  3057. // store PC for MOVA/MOV @PC address calculation
  3058. if (opd->imm != 0)
  3059. ; // case OP_BRANCH - addr already resolved in scan_block
  3060. else {
  3061. switch (ops[i-1].op) {
  3062. case OP_BRANCH:
  3063. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  3064. break;
  3065. case OP_BRANCH_CT:
  3066. case OP_BRANCH_CF:
  3067. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3068. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3069. emith_move_r_imm(tmp, pc);
  3070. tmp2 = emith_tst_t(sr, (ops[i-1].op == OP_BRANCH_CT));
  3071. tmp3 = emith_invert_cond(tmp2);
  3072. EMITH_SJMP_START(tmp3);
  3073. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  3074. EMITH_SJMP_END(tmp3);
  3075. break;
  3076. case OP_BRANCH_N: // BT/BF known not to be taken
  3077. // XXX could modify opd->imm instead?
  3078. emit_move_r_imm32(SHR_PC, pc);
  3079. break;
  3080. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  3081. }
  3082. }
  3083. }
  3084. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  3085. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  3086. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  3087. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  3088. }
  3089. // inform cache about future register usage
  3090. u32 late = 0; // regs read by future ops
  3091. u32 write = 0; // regs written to (to detect write before read)
  3092. u32 soon = 0; // regs read soon
  3093. for (v = 1; v <= 9; v++) {
  3094. // no sense in looking any further than the next rcache flush
  3095. tmp = ((op_flags[i+v] & OF_BTARGET) || (op_flags[i+v-1] & OF_DELAY_OP) ||
  3096. (OP_ISBRACND(opd[v-1].op) && !(op_flags[i+v] & OF_DELAY_OP)));
  3097. // XXX looking behind cond branch to avoid evicting regs used later?
  3098. if (pc + 2*v <= end_pc && !tmp) { // (pc already incremented above)
  3099. late |= opd[v].source & ~write;
  3100. // ignore source regs after they have been written to
  3101. write |= opd[v].dest;
  3102. // regs needed in the next few instructions
  3103. if (v <= 4)
  3104. soon = late;
  3105. } else
  3106. break;
  3107. }
  3108. rcache_set_usage_now(opd[0].source); // current insn
  3109. rcache_set_usage_soon(soon); // insns 1-4
  3110. rcache_set_usage_late(late & ~soon); // insns 5-9
  3111. rcache_set_usage_discard(write & ~(late|soon));
  3112. if (v <= 9)
  3113. // upcoming rcache_flush, start writing back unused dirty stuff
  3114. rcache_clean_masked(rcache_dirty_mask() & ~(write|opd[0].dest));
  3115. switch (opd->op)
  3116. {
  3117. case OP_BRANCH_N:
  3118. // never taken, just use up cycles
  3119. goto end_op;
  3120. case OP_BRANCH:
  3121. case OP_BRANCH_CT:
  3122. case OP_BRANCH_CF:
  3123. if (opd->dest & BITMASK1(SHR_PR))
  3124. emit_move_r_imm32(SHR_PR, pc + 2);
  3125. drcf.pending_branch_direct = 1;
  3126. goto end_op;
  3127. case OP_BRANCH_R:
  3128. if (opd->dest & BITMASK1(SHR_PR))
  3129. emit_move_r_imm32(SHR_PR, pc + 2);
  3130. emit_move_r_r(SHR_PC, opd->rm);
  3131. drcf.pending_branch_indirect = 1;
  3132. goto end_op;
  3133. case OP_BRANCH_RF:
  3134. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3135. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3136. emith_move_r_imm(tmp, pc + 2);
  3137. if (opd->dest & BITMASK1(SHR_PR)) {
  3138. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  3139. emith_move_r_r(tmp3, tmp);
  3140. }
  3141. emith_add_r_r(tmp, tmp2);
  3142. if (gconst_get(GET_Rn(), &u))
  3143. gconst_set(SHR_PC, pc + 2 + u);
  3144. drcf.pending_branch_indirect = 1;
  3145. goto end_op;
  3146. case OP_SLEEP: // SLEEP 0000000000011011
  3147. printf("TODO sleep\n");
  3148. goto end_op;
  3149. case OP_RTE: // RTE 0000000000101011
  3150. emith_invalidate_t();
  3151. // pop PC
  3152. tmp = emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  3153. rcache_free(tmp);
  3154. // pop SR
  3155. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  3156. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3157. emith_write_sr(sr, tmp);
  3158. rcache_free_tmp(tmp);
  3159. drcf.test_irq = 1;
  3160. drcf.pending_branch_indirect = 1;
  3161. goto end_op;
  3162. case OP_UNDEFINED:
  3163. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  3164. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  3165. // fallthrough
  3166. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  3167. // push SR
  3168. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  3169. emith_sync_t(tmp2);
  3170. emith_clear_msb(tmp, tmp2, 22);
  3171. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3172. // push PC
  3173. if (opd->op == OP_TRAPA) {
  3174. tmp = rcache_get_tmp_arg(1);
  3175. emith_move_r_imm(tmp, pc);
  3176. } else if (drcf.pending_branch_indirect) {
  3177. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  3178. } else {
  3179. tmp = rcache_get_tmp_arg(1);
  3180. emith_move_r_imm(tmp, pc - 2);
  3181. }
  3182. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3183. // obtain new PC
  3184. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  3185. // indirect jump -> back to dispatcher
  3186. drcf.pending_branch_indirect = 1;
  3187. goto end_op;
  3188. case OP_LOAD_POOL:
  3189. #if PROPAGATE_CONSTANTS
  3190. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  3191. dr_is_rom(opd->imm))
  3192. {
  3193. if (opd->size == 2)
  3194. u = FETCH32(opd->imm);
  3195. else
  3196. u = (s16)FETCH_OP(opd->imm);
  3197. // tweak for Blackthorne: avoid stack overwriting
  3198. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  3199. gconst_new(GET_Rn(), u);
  3200. }
  3201. else
  3202. #endif
  3203. {
  3204. if (opd->imm != 0) {
  3205. tmp = rcache_get_tmp_arg(0);
  3206. emith_move_r_imm(tmp, opd->imm);
  3207. } else {
  3208. // have to calculate read addr from PC for delay slot
  3209. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  3210. if (opd->size == 2) {
  3211. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3212. emith_bic_r_imm(tmp, 3);
  3213. }
  3214. else
  3215. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  3216. }
  3217. tmp2 = emit_memhandler_read(opd->size);
  3218. #if REMAP_REGISTER
  3219. tmp3 = rcache_map_reg(GET_Rn(), tmp2, RC_GR_WRITE);
  3220. #else
  3221. tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3222. #endif
  3223. if (tmp3 != tmp2) {
  3224. emith_move_r_r(tmp3, tmp2);
  3225. rcache_free_tmp(tmp2);
  3226. }
  3227. }
  3228. goto end_op;
  3229. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  3230. if (opd->imm != 0)
  3231. emit_move_r_imm32(SHR_R0, opd->imm);
  3232. else {
  3233. // have to calculate addr from PC for delay slot
  3234. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  3235. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  3236. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3237. emith_bic_r_imm(tmp, 3);
  3238. }
  3239. goto end_op;
  3240. }
  3241. switch ((op >> 12) & 0x0f)
  3242. {
  3243. /////////////////////////////////////////////
  3244. case 0x00:
  3245. switch (op & 0x0f)
  3246. {
  3247. case 0x02:
  3248. switch (GET_Fx())
  3249. {
  3250. case 0: // STC SR,Rn 0000nnnn00000010
  3251. tmp2 = SHR_SR;
  3252. break;
  3253. case 1: // STC GBR,Rn 0000nnnn00010010
  3254. tmp2 = SHR_GBR;
  3255. break;
  3256. case 2: // STC VBR,Rn 0000nnnn00100010
  3257. tmp2 = SHR_VBR;
  3258. break;
  3259. default:
  3260. goto default_;
  3261. }
  3262. if (tmp2 == SHR_SR) {
  3263. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3264. emith_sync_t(sr);
  3265. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3266. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  3267. } else
  3268. emit_move_r_r(GET_Rn(), tmp2);
  3269. goto end_op;
  3270. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  3271. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  3272. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  3273. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  3274. goto end_op;
  3275. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  3276. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3277. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3278. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3279. emith_mul(tmp3, tmp2, tmp);
  3280. goto end_op;
  3281. case 0x08:
  3282. switch (GET_Fx())
  3283. {
  3284. case 0: // CLRT 0000000000001000
  3285. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3286. #if T_OPTIMIZER
  3287. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3288. #endif
  3289. emith_set_t(sr, 0);
  3290. break;
  3291. case 1: // SETT 0000000000011000
  3292. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3293. #if T_OPTIMIZER
  3294. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3295. #endif
  3296. emith_set_t(sr, 1);
  3297. break;
  3298. case 2: // CLRMAC 0000000000101000
  3299. emit_move_r_imm32(SHR_MACL, 0);
  3300. emit_move_r_imm32(SHR_MACH, 0);
  3301. break;
  3302. default:
  3303. goto default_;
  3304. }
  3305. goto end_op;
  3306. case 0x09:
  3307. switch (GET_Fx())
  3308. {
  3309. case 0: // NOP 0000000000001001
  3310. break;
  3311. case 1: // DIV0U 0000000000011001
  3312. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3313. emith_invalidate_t();
  3314. emith_bic_r_imm(sr, M|Q|T);
  3315. drcf.Mflag = FLG_0;
  3316. break;
  3317. case 2: // MOVT Rn 0000nnnn00101001
  3318. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3319. emith_sync_t(sr);
  3320. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3321. emith_clear_msb(tmp2, sr, 31);
  3322. break;
  3323. default:
  3324. goto default_;
  3325. }
  3326. goto end_op;
  3327. case 0x0a:
  3328. switch (GET_Fx())
  3329. {
  3330. case 0: // STS MACH,Rn 0000nnnn00001010
  3331. tmp2 = SHR_MACH;
  3332. break;
  3333. case 1: // STS MACL,Rn 0000nnnn00011010
  3334. tmp2 = SHR_MACL;
  3335. break;
  3336. case 2: // STS PR,Rn 0000nnnn00101010
  3337. tmp2 = SHR_PR;
  3338. break;
  3339. default:
  3340. goto default_;
  3341. }
  3342. emit_move_r_r(GET_Rn(), tmp2);
  3343. goto end_op;
  3344. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  3345. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  3346. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  3347. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), (op & 3) | drcf.polling);
  3348. goto end_op;
  3349. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  3350. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  3351. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3352. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3353. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3354. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  3355. rcache_free_tmp(tmp2);
  3356. rcache_free_tmp(tmp);
  3357. goto end_op;
  3358. }
  3359. goto default_;
  3360. /////////////////////////////////////////////
  3361. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  3362. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  3363. goto end_op;
  3364. case 0x02:
  3365. switch (op & 0x0f)
  3366. {
  3367. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  3368. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  3369. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  3370. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  3371. goto end_op;
  3372. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  3373. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  3374. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  3375. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  3376. goto end_op;
  3377. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  3378. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3379. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3380. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3381. tmp = rcache_get_tmp();
  3382. emith_invalidate_t();
  3383. emith_bic_r_imm(sr, M|Q|T);
  3384. emith_lsr(tmp, tmp2, 31); // Q = Nn
  3385. emith_or_r_r_lsl(sr, tmp, Q_SHIFT);
  3386. emith_lsr(tmp, tmp3, 31); // M = Nm
  3387. emith_or_r_r_lsl(sr, tmp, M_SHIFT);
  3388. emith_eor_r_r_lsr(tmp, tmp2, 31);
  3389. emith_or_r_r(sr, tmp); // T = Q^M
  3390. rcache_free(tmp);
  3391. drcf.Mflag = FLG_UNKNOWN;
  3392. goto end_op;
  3393. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  3394. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3395. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3396. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3397. emith_clr_t_cond(sr);
  3398. emith_tst_r_r(tmp2, tmp3);
  3399. emith_set_t_cond(sr, DCOND_EQ);
  3400. goto end_op;
  3401. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  3402. if (GET_Rm() != GET_Rn()) {
  3403. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3404. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3405. emith_and_r_r_r(tmp, tmp3, tmp2);
  3406. }
  3407. goto end_op;
  3408. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  3409. #if PROPAGATE_CONSTANTS
  3410. if (GET_Rn() == GET_Rm()) {
  3411. gconst_new(GET_Rn(), 0);
  3412. goto end_op;
  3413. }
  3414. #endif
  3415. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3416. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3417. emith_eor_r_r_r(tmp, tmp3, tmp2);
  3418. goto end_op;
  3419. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  3420. if (GET_Rm() != GET_Rn()) {
  3421. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3422. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3423. emith_or_r_r_r(tmp, tmp3, tmp2);
  3424. }
  3425. goto end_op;
  3426. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  3427. tmp = rcache_get_tmp();
  3428. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3429. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3430. emith_eor_r_r_r(tmp, tmp2, tmp3);
  3431. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3432. emith_clr_t_cond(sr);
  3433. emith_tst_r_imm(tmp, 0x000000ff);
  3434. EMITH_SJMP_START(DCOND_EQ);
  3435. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  3436. EMITH_SJMP_START(DCOND_EQ);
  3437. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  3438. EMITH_SJMP_START(DCOND_EQ);
  3439. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  3440. EMITH_SJMP_END(DCOND_EQ);
  3441. EMITH_SJMP_END(DCOND_EQ);
  3442. EMITH_SJMP_END(DCOND_EQ);
  3443. emith_set_t_cond(sr, DCOND_EQ);
  3444. rcache_free_tmp(tmp);
  3445. goto end_op;
  3446. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  3447. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3448. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3449. emith_lsr(tmp, tmp3, 16);
  3450. emith_or_r_r_lsl(tmp, tmp2, 16);
  3451. goto end_op;
  3452. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  3453. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  3454. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3455. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3456. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3457. tmp4 = rcache_get_tmp();
  3458. if (op & 1) {
  3459. emith_sext(tmp, tmp2, 16);
  3460. emith_sext(tmp4, tmp3, 16);
  3461. } else {
  3462. emith_clear_msb(tmp, tmp2, 16);
  3463. emith_clear_msb(tmp4, tmp3, 16);
  3464. }
  3465. emith_mul(tmp, tmp, tmp4);
  3466. rcache_free_tmp(tmp4);
  3467. goto end_op;
  3468. }
  3469. goto default_;
  3470. /////////////////////////////////////////////
  3471. case 0x03:
  3472. switch (op & 0x0f)
  3473. {
  3474. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  3475. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  3476. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  3477. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  3478. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  3479. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3480. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3481. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3482. switch (op & 0x07)
  3483. {
  3484. case 0x00: // CMP/EQ
  3485. tmp = DCOND_EQ;
  3486. break;
  3487. case 0x02: // CMP/HS
  3488. tmp = DCOND_HS;
  3489. break;
  3490. case 0x03: // CMP/GE
  3491. tmp = DCOND_GE;
  3492. break;
  3493. case 0x06: // CMP/HI
  3494. tmp = DCOND_HI;
  3495. break;
  3496. case 0x07: // CMP/GT
  3497. tmp = DCOND_GT;
  3498. break;
  3499. }
  3500. emith_clr_t_cond(sr);
  3501. emith_cmp_r_r(tmp2, tmp3);
  3502. emith_set_t_cond(sr, tmp);
  3503. goto end_op;
  3504. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  3505. // Q1 = carry(Rn = (Rn << 1) | T)
  3506. // if Q ^ M
  3507. // Q2 = carry(Rn += Rm)
  3508. // else
  3509. // Q2 = carry(Rn -= Rm)
  3510. // Q = M ^ Q1 ^ Q2
  3511. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  3512. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3513. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3514. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3515. emith_sync_t(sr);
  3516. tmp = rcache_get_tmp();
  3517. if (drcf.Mflag != FLG_0) {
  3518. emith_and_r_r_imm(tmp, sr, M);
  3519. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT); // Q ^= M
  3520. }
  3521. rcache_free_tmp(tmp);
  3522. // shift Rn, add T, add or sub Rm, set T = !(Q1 ^ Q2)
  3523. // in: (Q ^ M) passed in Q
  3524. emith_sh2_div1_step(tmp2, tmp3, sr);
  3525. tmp = rcache_get_tmp();
  3526. emith_or_r_imm(sr, Q); // Q = !T
  3527. emith_and_r_r_imm(tmp, sr, T);
  3528. emith_eor_r_r_lsl(sr, tmp, Q_SHIFT);
  3529. if (drcf.Mflag != FLG_0) { // Q = M ^ !T = M ^ Q1 ^ Q2
  3530. emith_and_r_r_imm(tmp, sr, M);
  3531. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT);
  3532. }
  3533. rcache_free_tmp(tmp);
  3534. goto end_op;
  3535. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  3536. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3537. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3538. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3539. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3540. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  3541. goto end_op;
  3542. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  3543. #if PROPAGATE_CONSTANTS
  3544. if (GET_Rn() == GET_Rm()) {
  3545. gconst_new(GET_Rn(), 0);
  3546. goto end_op;
  3547. }
  3548. #endif
  3549. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  3550. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3551. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3552. if (op & 4) {
  3553. emith_add_r_r_r(tmp, tmp3, tmp2);
  3554. } else
  3555. emith_sub_r_r_r(tmp, tmp3, tmp2);
  3556. goto end_op;
  3557. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  3558. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  3559. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3560. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3561. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3562. emith_sync_t(sr);
  3563. #if T_OPTIMIZER
  3564. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3565. if (op & 4) {
  3566. emith_t_to_carry(sr, 0);
  3567. emith_adc_r_r_r(tmp, tmp3, tmp2);
  3568. } else {
  3569. emith_t_to_carry(sr, 1);
  3570. emith_sbc_r_r_r(tmp, tmp3, tmp2);
  3571. }
  3572. } else
  3573. #endif
  3574. {
  3575. EMITH_HINT_COND(DCOND_CS);
  3576. if (op & 4) { // adc
  3577. emith_tpop_carry(sr, 0);
  3578. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  3579. emith_tpush_carry(sr, 0);
  3580. } else {
  3581. emith_tpop_carry(sr, 1);
  3582. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  3583. emith_tpush_carry(sr, 1);
  3584. }
  3585. }
  3586. goto end_op;
  3587. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  3588. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  3589. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3590. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3591. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3592. #if T_OPTIMIZER
  3593. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3594. if (op & 4)
  3595. emith_add_r_r_r(tmp,tmp3,tmp2);
  3596. else
  3597. emith_sub_r_r_r(tmp,tmp3,tmp2);
  3598. } else
  3599. #endif
  3600. {
  3601. emith_clr_t_cond(sr);
  3602. EMITH_HINT_COND(DCOND_VS);
  3603. if (op & 4)
  3604. emith_addf_r_r_r(tmp, tmp3, tmp2);
  3605. else
  3606. emith_subf_r_r_r(tmp, tmp3, tmp2);
  3607. emith_set_t_cond(sr, DCOND_VS);
  3608. }
  3609. goto end_op;
  3610. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  3611. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3612. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3613. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3614. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3615. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  3616. goto end_op;
  3617. }
  3618. goto default_;
  3619. /////////////////////////////////////////////
  3620. case 0x04:
  3621. switch (op & 0x0f)
  3622. {
  3623. case 0x00:
  3624. switch (GET_Fx())
  3625. {
  3626. case 0: // SHLL Rn 0100nnnn00000000
  3627. case 2: // SHAL Rn 0100nnnn00100000
  3628. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3629. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3630. #if T_OPTIMIZER
  3631. if (rcache_regs_discard & BITMASK1(SHR_T))
  3632. emith_lsl(tmp, tmp2, 1);
  3633. else
  3634. #endif
  3635. {
  3636. emith_invalidate_t();
  3637. emith_lslf(tmp, tmp2, 1);
  3638. emith_carry_to_t(sr, 0);
  3639. }
  3640. goto end_op;
  3641. case 1: // DT Rn 0100nnnn00010000
  3642. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3643. #if LOOP_DETECTION
  3644. if (drcf.loop_type == OF_DELAY_LOOP) {
  3645. if (drcf.delay_reg == -1)
  3646. drcf.delay_reg = GET_Rn();
  3647. else
  3648. drcf.polling = drcf.loop_type = 0;
  3649. }
  3650. #endif
  3651. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3652. emith_clr_t_cond(sr);
  3653. EMITH_HINT_COND(DCOND_EQ);
  3654. emith_subf_r_r_imm(tmp, tmp2, 1);
  3655. emith_set_t_cond(sr, DCOND_EQ);
  3656. goto end_op;
  3657. }
  3658. goto default_;
  3659. case 0x01:
  3660. switch (GET_Fx())
  3661. {
  3662. case 0: // SHLR Rn 0100nnnn00000001
  3663. case 2: // SHAR Rn 0100nnnn00100001
  3664. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3665. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3666. #if T_OPTIMIZER
  3667. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3668. if (op & 0x20)
  3669. emith_asr(tmp,tmp2,1);
  3670. else
  3671. emith_lsr(tmp,tmp2,1);
  3672. } else
  3673. #endif
  3674. {
  3675. emith_invalidate_t();
  3676. if (op & 0x20) {
  3677. emith_asrf(tmp, tmp2, 1);
  3678. } else
  3679. emith_lsrf(tmp, tmp2, 1);
  3680. emith_carry_to_t(sr, 0);
  3681. }
  3682. goto end_op;
  3683. case 1: // CMP/PZ Rn 0100nnnn00010001
  3684. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3685. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3686. emith_clr_t_cond(sr);
  3687. emith_cmp_r_imm(tmp, 0);
  3688. emith_set_t_cond(sr, DCOND_GE);
  3689. goto end_op;
  3690. }
  3691. goto default_;
  3692. case 0x02:
  3693. case 0x03:
  3694. switch (op & 0x3f)
  3695. {
  3696. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  3697. tmp = SHR_MACH;
  3698. break;
  3699. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  3700. tmp = SHR_MACL;
  3701. break;
  3702. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  3703. tmp = SHR_PR;
  3704. break;
  3705. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  3706. tmp = SHR_SR;
  3707. break;
  3708. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  3709. tmp = SHR_GBR;
  3710. break;
  3711. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  3712. tmp = SHR_VBR;
  3713. break;
  3714. default:
  3715. goto default_;
  3716. }
  3717. if (tmp == SHR_SR) {
  3718. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  3719. emith_sync_t(tmp4);
  3720. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  3721. } else
  3722. tmp3 = rcache_get_reg_arg(1, tmp, NULL);
  3723. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  3724. goto end_op;
  3725. case 0x04:
  3726. case 0x05:
  3727. switch (op & 0x3f)
  3728. {
  3729. case 0x04: // ROTL Rn 0100nnnn00000100
  3730. case 0x05: // ROTR Rn 0100nnnn00000101
  3731. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3732. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3733. #if T_OPTIMIZER
  3734. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3735. if (op & 1)
  3736. emith_ror(tmp, tmp2, 1);
  3737. else
  3738. emith_rol(tmp, tmp2, 1);
  3739. } else
  3740. #endif
  3741. {
  3742. emith_invalidate_t();
  3743. if (op & 1)
  3744. emith_rorf(tmp, tmp2, 1);
  3745. else
  3746. emith_rolf(tmp, tmp2, 1);
  3747. emith_carry_to_t(sr, 0);
  3748. }
  3749. goto end_op;
  3750. case 0x24: // ROTCL Rn 0100nnnn00100100
  3751. case 0x25: // ROTCR Rn 0100nnnn00100101
  3752. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3753. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3754. emith_sync_t(sr);
  3755. #if T_OPTIMIZER
  3756. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3757. emith_t_to_carry(sr, 0);
  3758. if (op & 1)
  3759. emith_rorc(tmp);
  3760. else
  3761. emith_rolc(tmp);
  3762. } else
  3763. #endif
  3764. {
  3765. emith_tpop_carry(sr, 0);
  3766. if (op & 1)
  3767. emith_rorcf(tmp);
  3768. else
  3769. emith_rolcf(tmp);
  3770. emith_tpush_carry(sr, 0);
  3771. }
  3772. goto end_op;
  3773. case 0x15: // CMP/PL Rn 0100nnnn00010101
  3774. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3775. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3776. emith_clr_t_cond(sr);
  3777. emith_cmp_r_imm(tmp, 0);
  3778. emith_set_t_cond(sr, DCOND_GT);
  3779. goto end_op;
  3780. }
  3781. goto default_;
  3782. case 0x06:
  3783. case 0x07:
  3784. switch (op & 0x3f)
  3785. {
  3786. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  3787. tmp = SHR_MACH;
  3788. break;
  3789. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  3790. tmp = SHR_MACL;
  3791. break;
  3792. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  3793. tmp = SHR_PR;
  3794. break;
  3795. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  3796. tmp = SHR_SR;
  3797. break;
  3798. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  3799. tmp = SHR_GBR;
  3800. break;
  3801. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  3802. tmp = SHR_VBR;
  3803. break;
  3804. default:
  3805. goto default_;
  3806. }
  3807. if (tmp == SHR_SR) {
  3808. emith_invalidate_t();
  3809. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  3810. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3811. emith_write_sr(sr, tmp2);
  3812. rcache_free_tmp(tmp2);
  3813. drcf.test_irq = 1;
  3814. } else
  3815. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  3816. goto end_op;
  3817. case 0x08:
  3818. case 0x09:
  3819. switch (GET_Fx())
  3820. {
  3821. case 0: // SHLL2 Rn 0100nnnn00001000
  3822. // SHLR2 Rn 0100nnnn00001001
  3823. tmp = 2;
  3824. break;
  3825. case 1: // SHLL8 Rn 0100nnnn00011000
  3826. // SHLR8 Rn 0100nnnn00011001
  3827. tmp = 8;
  3828. break;
  3829. case 2: // SHLL16 Rn 0100nnnn00101000
  3830. // SHLR16 Rn 0100nnnn00101001
  3831. tmp = 16;
  3832. break;
  3833. default:
  3834. goto default_;
  3835. }
  3836. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3837. if (op & 1) {
  3838. emith_lsr(tmp2, tmp3, tmp);
  3839. } else
  3840. emith_lsl(tmp2, tmp3, tmp);
  3841. goto end_op;
  3842. case 0x0a:
  3843. switch (GET_Fx())
  3844. {
  3845. case 0: // LDS Rm,MACH 0100mmmm00001010
  3846. tmp2 = SHR_MACH;
  3847. break;
  3848. case 1: // LDS Rm,MACL 0100mmmm00011010
  3849. tmp2 = SHR_MACL;
  3850. break;
  3851. case 2: // LDS Rm,PR 0100mmmm00101010
  3852. tmp2 = SHR_PR;
  3853. break;
  3854. default:
  3855. goto default_;
  3856. }
  3857. emit_move_r_r(tmp2, GET_Rn());
  3858. goto end_op;
  3859. case 0x0b:
  3860. switch (GET_Fx())
  3861. {
  3862. case 1: // TAS.B @Rn 0100nnnn00011011
  3863. // XXX: is TAS working on 32X?
  3864. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3865. tmp = emit_memhandler_read(0);
  3866. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3867. emith_clr_t_cond(sr);
  3868. emith_cmp_r_imm(tmp, 0);
  3869. emith_set_t_cond(sr, DCOND_EQ);
  3870. emith_or_r_imm(tmp, 0x80);
  3871. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  3872. emith_move_r_r(tmp2, tmp);
  3873. rcache_free_tmp(tmp);
  3874. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3875. emit_memhandler_write(0);
  3876. break;
  3877. default:
  3878. goto default_;
  3879. }
  3880. goto end_op;
  3881. case 0x0e:
  3882. switch (GET_Fx())
  3883. {
  3884. case 0: // LDC Rm,SR 0100mmmm00001110
  3885. tmp2 = SHR_SR;
  3886. break;
  3887. case 1: // LDC Rm,GBR 0100mmmm00011110
  3888. tmp2 = SHR_GBR;
  3889. break;
  3890. case 2: // LDC Rm,VBR 0100mmmm00101110
  3891. tmp2 = SHR_VBR;
  3892. break;
  3893. default:
  3894. goto default_;
  3895. }
  3896. if (tmp2 == SHR_SR) {
  3897. emith_invalidate_t();
  3898. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3899. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3900. emith_write_sr(sr, tmp);
  3901. drcf.test_irq = 1;
  3902. } else
  3903. emit_move_r_r(tmp2, GET_Rn());
  3904. goto end_op;
  3905. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  3906. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  3907. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3908. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3909. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3910. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  3911. rcache_free_tmp(tmp2);
  3912. rcache_free_tmp(tmp);
  3913. goto end_op;
  3914. }
  3915. goto default_;
  3916. /////////////////////////////////////////////
  3917. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  3918. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2 | drcf.polling);
  3919. goto end_op;
  3920. /////////////////////////////////////////////
  3921. case 0x06:
  3922. switch (op & 0x0f)
  3923. {
  3924. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  3925. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  3926. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  3927. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  3928. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  3929. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  3930. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : drcf.polling;
  3931. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  3932. goto end_op;
  3933. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  3934. emit_move_r_r(GET_Rn(), GET_Rm());
  3935. goto end_op;
  3936. case 0x07 ... 0x0f:
  3937. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3938. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3939. switch (op & 0x0f)
  3940. {
  3941. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  3942. emith_mvn_r_r(tmp2, tmp);
  3943. break;
  3944. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  3945. tmp3 = tmp2;
  3946. if (tmp == tmp2)
  3947. tmp3 = rcache_get_tmp();
  3948. tmp4 = rcache_get_tmp();
  3949. emith_lsr(tmp3, tmp, 16);
  3950. emith_or_r_r_lsl(tmp3, tmp, 24);
  3951. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  3952. emith_or_r_r_lsl(tmp3, tmp4, 8);
  3953. emith_rol(tmp2, tmp3, 16);
  3954. rcache_free_tmp(tmp4);
  3955. if (tmp == tmp2)
  3956. rcache_free_tmp(tmp3);
  3957. break;
  3958. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  3959. emith_rol(tmp2, tmp, 16);
  3960. break;
  3961. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  3962. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3963. emith_sync_t(sr);
  3964. #if T_OPTIMIZER
  3965. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3966. emith_t_to_carry(sr, 1);
  3967. emith_negc_r_r(tmp2, tmp);
  3968. } else
  3969. #endif
  3970. {
  3971. EMITH_HINT_COND(DCOND_CS);
  3972. emith_tpop_carry(sr, 1);
  3973. emith_negcf_r_r(tmp2, tmp);
  3974. emith_tpush_carry(sr, 1);
  3975. }
  3976. break;
  3977. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  3978. emith_neg_r_r(tmp2, tmp);
  3979. break;
  3980. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  3981. emith_clear_msb(tmp2, tmp, 24);
  3982. break;
  3983. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  3984. emith_clear_msb(tmp2, tmp, 16);
  3985. break;
  3986. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  3987. emith_sext(tmp2, tmp, 8);
  3988. break;
  3989. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  3990. emith_sext(tmp2, tmp, 16);
  3991. break;
  3992. }
  3993. goto end_op;
  3994. }
  3995. goto default_;
  3996. /////////////////////////////////////////////
  3997. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  3998. if (op & 0x80) // adding negative
  3999. emit_sub_r_imm(GET_Rn(), (u8)-op);
  4000. else
  4001. emit_add_r_imm(GET_Rn(), (u8)op);
  4002. goto end_op;
  4003. /////////////////////////////////////////////
  4004. case 0x08:
  4005. switch (op & 0x0f00)
  4006. {
  4007. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  4008. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  4009. tmp = (op & 0x100) >> 8;
  4010. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  4011. goto end_op;
  4012. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  4013. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  4014. tmp = (op & 0x100) >> 8;
  4015. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp | drcf.polling);
  4016. goto end_op;
  4017. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  4018. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4019. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4020. emith_clr_t_cond(sr);
  4021. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  4022. emith_set_t_cond(sr, DCOND_EQ);
  4023. goto end_op;
  4024. }
  4025. goto default_;
  4026. /////////////////////////////////////////////
  4027. case 0x0c:
  4028. switch (op & 0x0f00)
  4029. {
  4030. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  4031. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  4032. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  4033. tmp = (op & 0x300) >> 8;
  4034. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  4035. goto end_op;
  4036. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  4037. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  4038. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  4039. tmp = (op & 0x300) >> 8;
  4040. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp | drcf.polling);
  4041. goto end_op;
  4042. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  4043. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4044. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4045. emith_clr_t_cond(sr);
  4046. emith_tst_r_imm(tmp, op & 0xff);
  4047. emith_set_t_cond(sr, DCOND_EQ);
  4048. goto end_op;
  4049. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  4050. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4051. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  4052. goto end_op;
  4053. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  4054. if (op & 0xff) {
  4055. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4056. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  4057. }
  4058. goto end_op;
  4059. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  4060. if (op & 0xff) {
  4061. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4062. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  4063. }
  4064. goto end_op;
  4065. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  4066. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0 | drcf.polling);
  4067. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4068. emith_clr_t_cond(sr);
  4069. emith_tst_r_imm(tmp, op & 0xff);
  4070. emith_set_t_cond(sr, DCOND_EQ);
  4071. rcache_free_tmp(tmp);
  4072. goto end_op;
  4073. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  4074. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4075. tmp2 = rcache_get_tmp_arg(1);
  4076. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  4077. goto end_rmw_op;
  4078. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  4079. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4080. tmp2 = rcache_get_tmp_arg(1);
  4081. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  4082. goto end_rmw_op;
  4083. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  4084. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4085. tmp2 = rcache_get_tmp_arg(1);
  4086. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  4087. end_rmw_op:
  4088. rcache_free_tmp(tmp);
  4089. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4090. goto end_op;
  4091. }
  4092. goto default_;
  4093. /////////////////////////////////////////////
  4094. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  4095. emit_move_r_imm32(GET_Rn(), (s8)op);
  4096. goto end_op;
  4097. default:
  4098. default_:
  4099. if (!(op_flags[i] & OF_B_IN_DS)) {
  4100. elprintf_sh2(sh2, EL_ANOMALY,
  4101. "drc: illegal op %04x @ %08x", op, pc - 2);
  4102. exit(1);
  4103. }
  4104. }
  4105. end_op:
  4106. rcache_unlock_all();
  4107. rcache_set_usage_now(0);
  4108. #if DRC_DEBUG & 64
  4109. RCACHE_CHECK("after insn");
  4110. #endif
  4111. cycles += opd->cycles;
  4112. if (op_flags[i+1] & OF_DELAY_OP) {
  4113. do_host_disasm(tcache_id);
  4114. continue;
  4115. }
  4116. // test irq?
  4117. if (drcf.test_irq && !drcf.pending_branch_direct) {
  4118. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4119. FLUSH_CYCLES(sr);
  4120. emith_sync_t(sr);
  4121. if (!drcf.pending_branch_indirect)
  4122. emit_move_r_imm32(SHR_PC, pc);
  4123. rcache_flush();
  4124. emith_call(sh2_drc_test_irq);
  4125. drcf.test_irq = 0;
  4126. }
  4127. // branch handling
  4128. if (drcf.pending_branch_direct)
  4129. {
  4130. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4131. u32 target_pc = opd_b->imm;
  4132. int cond = -1;
  4133. int ctaken = 0;
  4134. void *target = NULL;
  4135. if (OP_ISBRACND(opd_b->op))
  4136. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  4137. cycles += ctaken; // assume branch taken
  4138. #if LOOP_OPTIMIZER
  4139. if ((drcf.loop_type == OF_IDLE_LOOP ||
  4140. (drcf.loop_type == OF_DELAY_LOOP && drcf.delay_reg >= 0)))
  4141. {
  4142. // idle or delay loop
  4143. emit_sync_t_to_sr();
  4144. emith_sh2_delay_loop(cycles, drcf.delay_reg);
  4145. rcache_unlock_all(); // may lock delay_reg
  4146. drcf.polling = drcf.loop_type = drcf.pinning = 0;
  4147. }
  4148. #endif
  4149. #if CALL_STACK
  4150. void *rtsadd = NULL, *rtsret = NULL;
  4151. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4152. // BSR - save rts data
  4153. tmp = rcache_get_tmp_arg(1);
  4154. rtsadd = tcache_ptr;
  4155. emith_move_r_imm_s8_patchable(tmp, 0);
  4156. rcache_clean_tmp();
  4157. rcache_invalidate_tmp();
  4158. emith_call(sh2_drc_dispatcher_call);
  4159. rtsret = tcache_ptr;
  4160. }
  4161. #endif
  4162. // XXX move below cond test if not changing host cond (MIPS delay slot)?
  4163. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4164. FLUSH_CYCLES(sr);
  4165. rcache_clean();
  4166. if (OP_ISBRACND(opd_b->op)) {
  4167. // BT[S], BF[S] - emit condition test
  4168. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  4169. if (delay_dep_fw & BITMASK1(SHR_T)) {
  4170. emith_sync_t(sr);
  4171. emith_tst_r_imm(sr, T_save);
  4172. } else {
  4173. cond = emith_tst_t(sr, (opd_b->op == OP_BRANCH_CT));
  4174. if (emith_get_t_cond() >= 0) {
  4175. if (opd_b->op == OP_BRANCH_CT)
  4176. emith_or_r_imm_c(cond, sr, T);
  4177. else
  4178. emith_bic_r_imm_c(cond, sr, T);
  4179. }
  4180. }
  4181. } else
  4182. emith_sync_t(sr);
  4183. // no modification of host status/flags between here and branching!
  4184. v = find_in_sorted_linkage(branch_targets, branch_target_count, target_pc);
  4185. if (v >= 0)
  4186. {
  4187. // local branch
  4188. if (branch_targets[v].ptr) {
  4189. // local backward jump, link here now since host PC is already known
  4190. target = branch_targets[v].ptr;
  4191. #if LOOP_OPTIMIZER
  4192. if (pinned_loops[pinned_loop_count].pc == target_pc) {
  4193. // backward jump at end of optimized loop
  4194. rcache_unpin_all();
  4195. target = pinned_loops[pinned_loop_count].ptr;
  4196. pinned_loop_count ++;
  4197. }
  4198. #endif
  4199. if (cond != -1) {
  4200. if (emith_jump_patch_inrange(tcache_ptr, target)) {
  4201. emith_jump_cond(cond, target);
  4202. } else {
  4203. // not reachable directly, must use far branch
  4204. EMITH_JMP_START(emith_invert_cond(cond));
  4205. emith_jump(target);
  4206. EMITH_JMP_END(emith_invert_cond(cond));
  4207. }
  4208. } else {
  4209. emith_jump(target);
  4210. rcache_invalidate();
  4211. }
  4212. } else if (blx_target_count < MAX_LOCAL_BRANCHES) {
  4213. // local forward jump
  4214. target = tcache_ptr;
  4215. blx_targets[blx_target_count++] =
  4216. (struct linkage) { .pc = target_pc, .ptr = target, .mask = 0x2 };
  4217. if (cond != -1)
  4218. emith_jump_cond_patchable(cond, target);
  4219. else {
  4220. emith_jump_patchable(target);
  4221. rcache_invalidate();
  4222. }
  4223. } else
  4224. // no space for resolving forward branch, handle it as external
  4225. dbg(1, "warning: too many unresolved branches");
  4226. }
  4227. if (target == NULL)
  4228. {
  4229. // can't resolve branch locally, make a block exit
  4230. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4231. if (cond != -1) {
  4232. #if 1
  4233. if (bl && blx_target_count < ARRAY_SIZE(blx_targets)) {
  4234. // conditional jumps get a blx stub for the far jump
  4235. bl->type = BL_JCCBLX;
  4236. target = tcache_ptr;
  4237. blx_targets[blx_target_count++] =
  4238. (struct linkage) { .pc = target_pc, .ptr = target, .bl = bl };
  4239. emith_jump_cond_patchable(cond, target);
  4240. } else {
  4241. // not linkable, or blx table full; inline jump @dispatcher
  4242. EMITH_JMP_START(emith_invert_cond(cond));
  4243. if (bl) {
  4244. bl->jump = tcache_ptr;
  4245. emith_flush(); // flush to inhibit insn swapping
  4246. bl->type = BL_LDJMP;
  4247. }
  4248. tmp = rcache_get_tmp_arg(0);
  4249. emith_move_r_imm(tmp, target_pc);
  4250. rcache_free_tmp(tmp);
  4251. target = sh2_drc_dispatcher;
  4252. emith_jump_patchable(target);
  4253. EMITH_JMP_END(emith_invert_cond(cond));
  4254. }
  4255. #else
  4256. // jump @dispatcher - ARM 32bit version with conditional execution
  4257. EMITH_SJMP_START(emith_invert_cond(cond));
  4258. tmp = rcache_get_tmp_arg(0);
  4259. emith_move_r_imm_c(cond, tmp, target_pc);
  4260. rcache_free_tmp(tmp);
  4261. target = sh2_drc_dispatcher;
  4262. if (bl) {
  4263. bl->jump = tcache_ptr;
  4264. bl->type = BL_JMP;
  4265. }
  4266. emith_jump_cond_patchable(cond, target);
  4267. EMITH_SJMP_END(emith_invert_cond(cond));
  4268. #endif
  4269. } else {
  4270. // unconditional, has the far jump inlined
  4271. if (bl) {
  4272. emith_flush(); // flush to inhibit insn swapping
  4273. bl->type = BL_LDJMP;
  4274. }
  4275. tmp = rcache_get_tmp_arg(0);
  4276. emith_move_r_imm(tmp, target_pc);
  4277. rcache_free_tmp(tmp);
  4278. target = sh2_drc_dispatcher;
  4279. emith_jump_patchable(target);
  4280. rcache_invalidate();
  4281. }
  4282. }
  4283. #if CALL_STACK
  4284. if (rtsadd)
  4285. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4286. #endif
  4287. // branch not taken, correct cycle count
  4288. if (ctaken)
  4289. cycles -= ctaken;
  4290. // set T bit to reflect branch not taken for OP_BRANCH_CT/CF
  4291. if (emith_get_t_cond() >= 0) // T is synced for all other cases
  4292. emith_set_t(sr, opd_b->op == OP_BRANCH_CF);
  4293. drcf.pending_branch_direct = 0;
  4294. if (target_pc >= base_pc && target_pc < pc)
  4295. drcf.polling = drcf.loop_type = 0;
  4296. }
  4297. else if (drcf.pending_branch_indirect) {
  4298. u32 target_pc;
  4299. tmp = rcache_get_reg_arg(0, SHR_PC, NULL);
  4300. #if CALL_STACK
  4301. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4302. void *rtsadd = NULL, *rtsret = NULL;
  4303. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4304. // JSR, BSRF - save rts data
  4305. tmp = rcache_get_tmp_arg(1);
  4306. rtsadd = tcache_ptr;
  4307. emith_move_r_imm_s8_patchable(tmp, 0);
  4308. rcache_clean_tmp();
  4309. rcache_invalidate_tmp();
  4310. emith_call(sh2_drc_dispatcher_call);
  4311. rtsret = tcache_ptr;
  4312. }
  4313. #endif
  4314. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4315. FLUSH_CYCLES(sr);
  4316. emith_sync_t(sr);
  4317. rcache_clean();
  4318. #if CALL_STACK
  4319. if (opd_b->rm == SHR_PR) {
  4320. // RTS - restore rts data, else jump to dispatcher
  4321. emith_jump(sh2_drc_dispatcher_return);
  4322. } else
  4323. #endif
  4324. if (gconst_get(SHR_PC, &target_pc)) {
  4325. // JMP, JSR, BRAF, BSRF const - treat like unconditional direct branch
  4326. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4327. if (bl) // pc already loaded somewhere else, can patch jump only
  4328. bl->type = BL_JMP;
  4329. emith_jump_patchable(sh2_drc_dispatcher);
  4330. } else {
  4331. // JMP, JSR, BRAF, BSRF not const
  4332. emith_jump(sh2_drc_dispatcher);
  4333. }
  4334. rcache_invalidate();
  4335. #if CALL_STACK
  4336. if (rtsadd)
  4337. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4338. #endif
  4339. drcf.pending_branch_indirect = 0;
  4340. drcf.polling = drcf.loop_type = 0;
  4341. }
  4342. rcache_unlock_all();
  4343. do_host_disasm(tcache_id);
  4344. }
  4345. // check the last op
  4346. if (op_flags[i-1] & OF_DELAY_OP)
  4347. opd = &ops[i-2];
  4348. else
  4349. opd = &ops[i-1];
  4350. if (! OP_ISBRAUC(opd->op))
  4351. {
  4352. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4353. FLUSH_CYCLES(tmp);
  4354. emith_sync_t(tmp);
  4355. rcache_clean();
  4356. bl = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  4357. if (bl) {
  4358. emith_flush(); // flush to inhibit insn swapping
  4359. bl->type = BL_LDJMP;
  4360. }
  4361. tmp = rcache_get_tmp_arg(0);
  4362. emith_move_r_imm(tmp, pc);
  4363. emith_jump_patchable(sh2_drc_dispatcher);
  4364. rcache_invalidate();
  4365. } else
  4366. rcache_flush();
  4367. // link unresolved branches, emitting blx area entries as needed
  4368. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  4369. branch_target_count, blx_targets, blx_target_count);
  4370. emith_flush();
  4371. do_host_disasm(tcache_id);
  4372. emith_pool_commit(0);
  4373. // fill blx backup; do this last to backup final patched code
  4374. for (i = 0; i < block->entry_count; i++)
  4375. for (bl = block->entryp[i].o_links; bl; bl = bl->o_next)
  4376. memcpy(bl->jdisp, bl->blx ?: bl->jump, emith_jump_at_size());
  4377. ring_alloc(&tcache_ring[tcache_id], tcache_ptr - block_entry_ptr);
  4378. host_instructions_updated(block_entry_ptr, tcache_ptr);
  4379. dr_activate_block(block, tcache_id, sh2->is_slave);
  4380. emith_update_cache();
  4381. do_host_disasm(tcache_id);
  4382. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  4383. tcache_id, blkid_main, tcache_ptr,
  4384. tcache_ring[tcache_id].used, tcache_ring[tcache_id].size,
  4385. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  4386. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  4387. dbg(2, " hash collisions %d/%d", hash_collisions, block_ring[tcache_id].used);
  4388. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  4389. }
  4390. /*
  4391. printf("~~~\n");
  4392. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  4393. do_host_disasm(tcache_id);
  4394. printf("~~~\n");
  4395. */
  4396. #if (DRC_DEBUG)
  4397. fflush(stdout);
  4398. #endif
  4399. return block_entry_ptr;
  4400. }
  4401. static void sh2_generate_utils(void)
  4402. {
  4403. int arg0, arg1, arg2, arg3, sr, tmp, tmp2;
  4404. #if DRC_DEBUG
  4405. int hic = host_insn_count; // don't count utils for insn statistics
  4406. #endif
  4407. host_arg2reg(arg0, 0);
  4408. host_arg2reg(arg1, 1);
  4409. host_arg2reg(arg2, 2);
  4410. host_arg2reg(arg3, 3);
  4411. emith_move_r_r(arg0, arg0); // nop
  4412. emith_flush();
  4413. // sh2_drc_write8(u32 a, u32 d)
  4414. sh2_drc_write8 = (void *)tcache_ptr;
  4415. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  4416. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4417. emith_flush();
  4418. // sh2_drc_write16(u32 a, u32 d)
  4419. sh2_drc_write16 = (void *)tcache_ptr;
  4420. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  4421. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4422. emith_flush();
  4423. // sh2_drc_write32(u32 a, u32 d)
  4424. sh2_drc_write32 = (void *)tcache_ptr;
  4425. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  4426. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4427. emith_flush();
  4428. // d = sh2_drc_read8(u32 a)
  4429. sh2_drc_read8 = (void *)tcache_ptr;
  4430. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4431. EMITH_HINT_COND(DCOND_CS);
  4432. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4433. EMITH_SJMP_START(DCOND_CS);
  4434. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4435. emith_eor_r_imm_ptr_c(DCOND_CC, arg0, 1);
  4436. emith_read8s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4437. emith_ret_c(DCOND_CC);
  4438. EMITH_SJMP_END(DCOND_CS);
  4439. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4440. emith_jump_reg(arg2);
  4441. emith_flush();
  4442. // d = sh2_drc_read16(u32 a)
  4443. sh2_drc_read16 = (void *)tcache_ptr;
  4444. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4445. EMITH_HINT_COND(DCOND_CS);
  4446. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4447. EMITH_SJMP_START(DCOND_CS);
  4448. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4449. emith_read16s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4450. emith_ret_c(DCOND_CC);
  4451. EMITH_SJMP_END(DCOND_CS);
  4452. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4453. emith_jump_reg(arg2);
  4454. emith_flush();
  4455. // d = sh2_drc_read32(u32 a)
  4456. sh2_drc_read32 = (void *)tcache_ptr;
  4457. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4458. EMITH_HINT_COND(DCOND_CS);
  4459. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4460. EMITH_SJMP_START(DCOND_CS);
  4461. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4462. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4463. emith_ror_c(DCOND_CC, RET_REG, RET_REG, 16);
  4464. emith_ret_c(DCOND_CC);
  4465. EMITH_SJMP_END(DCOND_CS);
  4466. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4467. emith_jump_reg(arg2);
  4468. emith_flush();
  4469. // d = sh2_drc_read8_poll(u32 a)
  4470. sh2_drc_read8_poll = (void *)tcache_ptr;
  4471. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4472. EMITH_HINT_COND(DCOND_CS);
  4473. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4474. EMITH_SJMP_START(DCOND_CC);
  4475. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4476. emith_jump_reg_c(DCOND_CS, arg2);
  4477. EMITH_SJMP_END(DCOND_CC);
  4478. emith_and_r_r_r(arg1, arg0, arg3);
  4479. emith_eor_r_imm_ptr(arg1, 1);
  4480. emith_read8s_r_r_r(arg1, arg2, arg1);
  4481. emith_push_ret(arg1);
  4482. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4483. emith_call(p32x_sh2_poll_memory8);
  4484. emith_pop_and_ret(arg1);
  4485. emith_flush();
  4486. // d = sh2_drc_read16_poll(u32 a)
  4487. sh2_drc_read16_poll = (void *)tcache_ptr;
  4488. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4489. EMITH_HINT_COND(DCOND_CS);
  4490. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4491. EMITH_SJMP_START(DCOND_CC);
  4492. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4493. emith_jump_reg_c(DCOND_CS, arg2);
  4494. EMITH_SJMP_END(DCOND_CC);
  4495. emith_and_r_r_r(arg1, arg0, arg3);
  4496. emith_read16s_r_r_r(arg1, arg2, arg1);
  4497. emith_push_ret(arg1);
  4498. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4499. emith_call(p32x_sh2_poll_memory16);
  4500. emith_pop_and_ret(arg1);
  4501. emith_flush();
  4502. // d = sh2_drc_read32_poll(u32 a)
  4503. sh2_drc_read32_poll = (void *)tcache_ptr;
  4504. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4505. EMITH_HINT_COND(DCOND_CS);
  4506. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4507. EMITH_SJMP_START(DCOND_CC);
  4508. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4509. emith_jump_reg_c(DCOND_CS, arg2);
  4510. EMITH_SJMP_END(DCOND_CC);
  4511. emith_and_r_r_r(arg1, arg0, arg3);
  4512. emith_read_r_r_r(arg1, arg2, arg1);
  4513. emith_ror(arg1, arg1, 16);
  4514. emith_push_ret(arg1);
  4515. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4516. emith_call(p32x_sh2_poll_memory32);
  4517. emith_pop_and_ret(arg1);
  4518. emith_flush();
  4519. // sh2_drc_exit(u32 pc)
  4520. sh2_drc_exit = (void *)tcache_ptr;
  4521. emith_ctx_write(arg0, SHR_PC * 4);
  4522. emit_do_static_regs(1, arg2);
  4523. emith_sh2_drc_exit();
  4524. emith_flush();
  4525. // sh2_drc_dispatcher(u32 pc)
  4526. sh2_drc_dispatcher = (void *)tcache_ptr;
  4527. emith_ctx_write(arg0, SHR_PC * 4);
  4528. #if BRANCH_CACHE
  4529. // check if PC is in branch target cache
  4530. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4531. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4532. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  4533. emith_cmp_r_r(arg2, arg0);
  4534. EMITH_SJMP_START(DCOND_NE);
  4535. #if (DRC_DEBUG & 128)
  4536. emith_move_r_ptr_imm(arg2, (uptr)&bchit);
  4537. emith_read_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4538. emith_add_r_imm_c(DCOND_EQ, arg3, 1);
  4539. emith_write_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4540. #endif
  4541. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4542. emith_jump_reg_c(DCOND_EQ, RET_REG);
  4543. EMITH_SJMP_END(DCOND_NE);
  4544. #endif
  4545. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4546. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  4547. emith_call(dr_lookup_block);
  4548. // store PC and block entry ptr (in arg0) in branch target cache
  4549. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4550. EMITH_SJMP_START(DCOND_EQ);
  4551. #if BRANCH_CACHE
  4552. #if (DRC_DEBUG & 128)
  4553. emith_move_r_ptr_imm(arg2, (uptr)&bcmiss);
  4554. emith_read_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4555. emith_add_r_imm_c(DCOND_NE, arg3, 1);
  4556. emith_write_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4557. #endif
  4558. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  4559. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4560. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4561. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  4562. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4563. #endif
  4564. emith_jump_reg_c(DCOND_NE, RET_REG);
  4565. EMITH_SJMP_END(DCOND_EQ);
  4566. // lookup failed, call sh2_translate()
  4567. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4568. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  4569. emith_call(sh2_translate);
  4570. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4571. EMITH_SJMP_START(DCOND_EQ);
  4572. emith_jump_reg_c(DCOND_NE, RET_REG);
  4573. EMITH_SJMP_END(DCOND_EQ);
  4574. // XXX: can't translate, fail
  4575. emith_call(dr_failure);
  4576. emith_flush();
  4577. #if CALL_STACK
  4578. // pc = sh2_drc_dispatcher_call(u32 pc)
  4579. sh2_drc_dispatcher_call = (void *)tcache_ptr;
  4580. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4581. emith_add_r_imm(arg2, (u32)(2*sizeof(void *)));
  4582. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4583. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4584. emith_add_r_r_r_lsl_ptr(arg3, CONTEXT_REG, arg2, 0);
  4585. rcache_get_reg_arg(2, SHR_PR, NULL);
  4586. emith_add_r_ret(arg1);
  4587. emith_write_r_r_offs_ptr(arg1, arg3, offsetof(SH2, rts_cache)+sizeof(void *));
  4588. emith_write_r_r_offs(arg2, arg3, offsetof(SH2, rts_cache));
  4589. rcache_flush();
  4590. emith_ret();
  4591. emith_flush();
  4592. // sh2_drc_dispatcher_return(u32 pc)
  4593. sh2_drc_dispatcher_return = (void *)tcache_ptr;
  4594. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4595. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg2, 0);
  4596. emith_read_r_r_offs(arg3, arg1, offsetof(SH2, rts_cache));
  4597. emith_cmp_r_r(arg0, arg3);
  4598. #if (DRC_DEBUG & 128)
  4599. EMITH_SJMP_START(DCOND_EQ);
  4600. emith_move_r_ptr_imm(arg3, (uptr)&rcmiss);
  4601. emith_read_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4602. emith_add_r_imm_c(DCOND_NE, arg1, 1);
  4603. emith_write_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4604. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4605. EMITH_SJMP_END(DCOND_EQ);
  4606. #else
  4607. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4608. #endif
  4609. emith_read_r_r_offs_ptr(arg0, arg1, offsetof(SH2, rts_cache) + sizeof(void *));
  4610. emith_sub_r_imm(arg2, (u32)(2*sizeof(void *)));
  4611. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4612. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4613. #if (DRC_DEBUG & 128)
  4614. emith_move_r_ptr_imm(arg3, (uptr)&rchit);
  4615. emith_read_r_r_offs(arg1, arg3, 0);
  4616. emith_add_r_imm(arg1, 1);
  4617. emith_write_r_r_offs(arg1, arg3, 0);
  4618. #endif
  4619. emith_jump_reg(arg0);
  4620. emith_flush();
  4621. #endif
  4622. // sh2_drc_test_irq(void)
  4623. // assumes it's called from main function (may jump to dispatcher)
  4624. sh2_drc_test_irq = (void *)tcache_ptr;
  4625. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4626. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4627. emith_lsr(arg0, sr, I_SHIFT);
  4628. emith_and_r_imm(arg0, 0x0f);
  4629. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  4630. EMITH_SJMP_START(DCOND_GT);
  4631. emith_ret_c(DCOND_LE); // nope, return
  4632. EMITH_SJMP_END(DCOND_GT);
  4633. // adjust SP
  4634. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  4635. emith_sub_r_imm(tmp, 4*2);
  4636. rcache_clean();
  4637. // push SR
  4638. tmp = rcache_get_reg_arg(0, SHR_SP, &tmp2);
  4639. emith_add_r_r_imm(tmp, tmp2, 4);
  4640. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  4641. emith_clear_msb(tmp, tmp, 22);
  4642. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4643. rcache_invalidate_tmp();
  4644. emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  4645. // push PC
  4646. rcache_get_reg_arg(0, SHR_SP, NULL);
  4647. rcache_get_reg_arg(1, SHR_PC, NULL);
  4648. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4649. rcache_invalidate_tmp();
  4650. emith_call(p32x_sh2_write32);
  4651. // update I, cycles, do callback
  4652. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4653. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4654. emith_bic_r_imm(sr, I);
  4655. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  4656. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  4657. rcache_flush();
  4658. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4659. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  4660. // obtain new PC
  4661. tmp = rcache_get_reg_arg(1, SHR_VBR, &tmp2);
  4662. emith_add_r_r_r_lsl(arg0, tmp2, RET_REG, 2);
  4663. emith_call(sh2_drc_read32);
  4664. if (arg0 != RET_REG)
  4665. emith_move_r_r(arg0, RET_REG);
  4666. emith_call_cleanup();
  4667. rcache_invalidate();
  4668. emith_jump(sh2_drc_dispatcher);
  4669. emith_flush();
  4670. // sh2_drc_entry(SH2 *sh2)
  4671. sh2_drc_entry = (void *)tcache_ptr;
  4672. emith_sh2_drc_entry();
  4673. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  4674. emit_do_static_regs(0, arg2);
  4675. emith_call(sh2_drc_test_irq);
  4676. emith_ctx_read(arg0, SHR_PC * 4);
  4677. emith_jump(sh2_drc_dispatcher);
  4678. emith_flush();
  4679. #ifdef DRC_SR_REG
  4680. // sh2_drc_save_sr(SH2 *sh2)
  4681. sh2_drc_save_sr = (void *)tcache_ptr;
  4682. tmp = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4683. emith_write_r_r_offs(tmp, arg0, SHR_SR * 4);
  4684. rcache_invalidate();
  4685. emith_ret();
  4686. emith_flush();
  4687. // sh2_drc_restore_sr(SH2 *sh2)
  4688. sh2_drc_restore_sr = (void *)tcache_ptr;
  4689. tmp = rcache_get_reg(SHR_SR, RC_GR_WRITE, NULL);
  4690. emith_read_r_r_offs(tmp, arg0, SHR_SR * 4);
  4691. rcache_flush();
  4692. emith_ret();
  4693. emith_flush();
  4694. #endif
  4695. #ifdef PDB_NET
  4696. // debug
  4697. #define MAKE_READ_WRAPPER(func) { \
  4698. void *tmp = (void *)tcache_ptr; \
  4699. emith_push_ret(); \
  4700. emith_call(func); \
  4701. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4702. emith_addf_r_r(arg2, arg0); \
  4703. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4704. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4705. emith_adc_r_imm(arg2, 0x01000000); \
  4706. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4707. emith_pop_and_ret(); \
  4708. emith_flush(); \
  4709. func = tmp; \
  4710. }
  4711. #define MAKE_WRITE_WRAPPER(func) { \
  4712. void *tmp = (void *)tcache_ptr; \
  4713. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4714. emith_addf_r_r(arg2, arg1); \
  4715. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4716. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4717. emith_adc_r_imm(arg2, 0x01000000); \
  4718. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4719. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  4720. emith_jump(func); \
  4721. emith_flush(); \
  4722. func = tmp; \
  4723. }
  4724. MAKE_READ_WRAPPER(sh2_drc_read8);
  4725. MAKE_READ_WRAPPER(sh2_drc_read16);
  4726. MAKE_READ_WRAPPER(sh2_drc_read32);
  4727. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  4728. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  4729. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  4730. MAKE_READ_WRAPPER(sh2_drc_read8_poll);
  4731. MAKE_READ_WRAPPER(sh2_drc_read16_poll);
  4732. MAKE_READ_WRAPPER(sh2_drc_read32_poll);
  4733. #endif
  4734. emith_pool_commit(0);
  4735. rcache_invalidate();
  4736. #if (DRC_DEBUG & 4)
  4737. host_dasm_new_symbol(sh2_drc_entry);
  4738. host_dasm_new_symbol(sh2_drc_dispatcher);
  4739. #if CALL_STACK
  4740. host_dasm_new_symbol(sh2_drc_dispatcher_call);
  4741. host_dasm_new_symbol(sh2_drc_dispatcher_return);
  4742. #endif
  4743. host_dasm_new_symbol(sh2_drc_exit);
  4744. host_dasm_new_symbol(sh2_drc_test_irq);
  4745. host_dasm_new_symbol(sh2_drc_write8);
  4746. host_dasm_new_symbol(sh2_drc_write16);
  4747. host_dasm_new_symbol(sh2_drc_write32);
  4748. host_dasm_new_symbol(sh2_drc_read8);
  4749. host_dasm_new_symbol(sh2_drc_read16);
  4750. host_dasm_new_symbol(sh2_drc_read32);
  4751. host_dasm_new_symbol(sh2_drc_read8_poll);
  4752. host_dasm_new_symbol(sh2_drc_read16_poll);
  4753. host_dasm_new_symbol(sh2_drc_read32_poll);
  4754. #ifdef DRC_SR_REG
  4755. host_dasm_new_symbol(sh2_drc_save_sr);
  4756. host_dasm_new_symbol(sh2_drc_restore_sr);
  4757. #endif
  4758. #endif
  4759. #if DRC_DEBUG
  4760. host_insn_count = hic;
  4761. #endif
  4762. }
  4763. static void sh2_smc_rm_blocks(u32 a, int len, int tcache_id, u32 shift)
  4764. {
  4765. struct block_list **blist, *entry, *next;
  4766. u32 mask = RAM_SIZE(tcache_id) - 1;
  4767. u32 wtmask = ~0x20000000; // writethrough area mask
  4768. u32 start_addr, end_addr;
  4769. u32 start_lit, end_lit;
  4770. struct block_desc *block;
  4771. #if (DRC_DEBUG & 2)
  4772. int removed = 0;
  4773. #endif
  4774. // ignore cache-through
  4775. a &= wtmask;
  4776. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  4777. entry = *blist;
  4778. // go through the block list for this range
  4779. while (entry != NULL) {
  4780. next = entry->next;
  4781. block = entry->block;
  4782. start_addr = block->addr & wtmask;
  4783. end_addr = start_addr + block->size;
  4784. start_lit = block->addr_lit & wtmask;
  4785. end_lit = start_lit + block->size_lit;
  4786. // disable/delete block if it covers the modified address
  4787. if ((start_addr < a+len && a < end_addr) ||
  4788. (start_lit < a+len && a < end_lit))
  4789. {
  4790. dbg(2, "smc remove @%08x", a);
  4791. end_addr = (start_lit < a+len && block->size_lit ? a : 0);
  4792. dr_rm_block_entry(block, tcache_id, end_addr, 0);
  4793. #if (DRC_DEBUG & 2)
  4794. removed = 1;
  4795. #endif
  4796. }
  4797. entry = next;
  4798. }
  4799. #if (DRC_DEBUG & 2)
  4800. if (!removed)
  4801. dbg(2, "rm_blocks called @%08x, no work?", a);
  4802. #endif
  4803. #if BRANCH_CACHE
  4804. if (tcache_id)
  4805. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  4806. else {
  4807. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  4808. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  4809. }
  4810. #endif
  4811. #if CALL_STACK
  4812. if (tcache_id) {
  4813. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  4814. sh2s[tcache_id-1].rts_cache_idx = 0;
  4815. } else {
  4816. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  4817. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  4818. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  4819. }
  4820. #endif
  4821. }
  4822. void sh2_drc_wcheck_ram(u32 a, unsigned len, SH2 *sh2)
  4823. {
  4824. sh2_smc_rm_blocks(a, len, 0, SH2_DRCBLK_RAM_SHIFT);
  4825. }
  4826. void sh2_drc_wcheck_da(u32 a, unsigned len, SH2 *sh2)
  4827. {
  4828. sh2_smc_rm_blocks(a, len, 1 + sh2->is_slave, SH2_DRCBLK_DA_SHIFT);
  4829. }
  4830. int sh2_execute_drc(SH2 *sh2c, int cycles)
  4831. {
  4832. int ret_cycles;
  4833. // cycles are kept in SHR_SR unused bits (upper 20)
  4834. // bit11 contains T saved for delay slot
  4835. // others are usual SH2 flags
  4836. sh2c->sr &= 0x3f3;
  4837. sh2c->sr |= cycles << 12;
  4838. sh2_drc_entry(sh2c);
  4839. // TODO: irq cycles
  4840. ret_cycles = (int32_t)sh2c->sr >> 12;
  4841. if (ret_cycles > 0)
  4842. dbg(1, "warning: drc returned with cycles: %d, pc %08x", ret_cycles, sh2c->pc);
  4843. sh2c->sr &= 0x3f3;
  4844. return ret_cycles;
  4845. }
  4846. static void block_stats(void)
  4847. {
  4848. #if (DRC_DEBUG & 2)
  4849. int c, b, i;
  4850. long total = 0;
  4851. printf("block stats:\n");
  4852. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4853. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4854. if (block_tables[b][i].addr != 0)
  4855. total += block_tables[b][i].refcount;
  4856. }
  4857. printf("total: %ld\n",total);
  4858. for (c = 0; c < 20; c++) {
  4859. struct block_desc *blk, *maxb = NULL;
  4860. int max = 0;
  4861. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4862. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4863. if ((blk = &block_tables[b][i])->addr != 0 && blk->refcount > max) {
  4864. max = blk->refcount;
  4865. maxb = blk;
  4866. }
  4867. }
  4868. if (maxb == NULL)
  4869. break;
  4870. printf("%08x %p %9d %2.3f%%\n", maxb->addr, maxb->tcache_ptr, maxb->refcount,
  4871. (double)maxb->refcount / total * 100.0);
  4872. maxb->refcount = 0;
  4873. }
  4874. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  4875. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4876. block_tables[b][i].refcount = 0;
  4877. #endif
  4878. }
  4879. void entry_stats(void)
  4880. {
  4881. #if (DRC_DEBUG & 32)
  4882. int c, b, i, j;
  4883. long total = 0;
  4884. printf("block entry stats:\n");
  4885. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4886. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4887. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4888. total += block_tables[b][i].entryp[j].entry_count;
  4889. }
  4890. printf("total: %ld\n",total);
  4891. for (c = 0; c < 20; c++) {
  4892. struct block_desc *blk;
  4893. struct block_entry *maxb = NULL;
  4894. int max = 0;
  4895. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4896. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size) {
  4897. blk = &block_tables[b][i];
  4898. for (j = 0; j < blk->entry_count; j++)
  4899. if (blk->entryp[j].entry_count > max) {
  4900. max = blk->entryp[j].entry_count;
  4901. maxb = &blk->entryp[j];
  4902. }
  4903. }
  4904. }
  4905. if (maxb == NULL)
  4906. break;
  4907. printf("%08x %p %9d %2.3f%%\n", maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  4908. (double)100 * maxb->entry_count / total);
  4909. maxb->entry_count = 0;
  4910. }
  4911. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4912. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  4913. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4914. block_tables[b][i].entryp[j].entry_count = 0;
  4915. }
  4916. #endif
  4917. }
  4918. static void backtrace(void)
  4919. {
  4920. #if (DRC_DEBUG & 1024)
  4921. int i;
  4922. printf("backtrace master:\n");
  4923. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  4924. SH2_DUMP(&csh2[0][i], "bt msh2");
  4925. printf("backtrace slave:\n");
  4926. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  4927. SH2_DUMP(&csh2[1][i], "bt ssh2");
  4928. #endif
  4929. }
  4930. static void state_dump(void)
  4931. {
  4932. #if (DRC_DEBUG & 2048)
  4933. int i;
  4934. SH2_DUMP(&sh2s[0], "master");
  4935. printf("VBR msh2: %x\n", sh2s[0].vbr);
  4936. for (i = 0; i < 0x60; i++) {
  4937. printf("%08x ",p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  4938. if ((i+1) % 8 == 0) printf("\n");
  4939. }
  4940. printf("stack msh2: %x\n", sh2s[0].r[15]);
  4941. for (i = -0x30; i < 0x30; i++) {
  4942. printf("%08x ",p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  4943. if ((i+1) % 8 == 0) printf("\n");
  4944. }
  4945. SH2_DUMP(&sh2s[1], "slave");
  4946. printf("VBR ssh2: %x\n", sh2s[1].vbr);
  4947. for (i = 0; i < 0x60; i++) {
  4948. printf("%08x ",p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  4949. if ((i+1) % 8 == 0) printf("\n");
  4950. }
  4951. printf("stack ssh2: %x\n", sh2s[1].r[15]);
  4952. for (i = -0x30; i < 0x30; i++) {
  4953. printf("%08x ",p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  4954. if ((i+1) % 8 == 0) printf("\n");
  4955. }
  4956. #endif
  4957. }
  4958. static void bcache_stats(void)
  4959. {
  4960. #if (DRC_DEBUG & 128)
  4961. int i;
  4962. #if CALL_STACK
  4963. for (i = 1; i < ARRAY_SIZE(sh2s->rts_cache); i++)
  4964. if (sh2s[0].rts_cache[i].pc == -1 && sh2s[1].rts_cache[i].pc == -1) break;
  4965. printf("return cache hits:%d misses:%d depth: %d index: %d/%d\n", rchit, rcmiss, i,sh2s[0].rts_cache_idx,sh2s[1].rts_cache_idx);
  4966. for (i = 0; i < ARRAY_SIZE(sh2s[0].rts_cache); i++) {
  4967. printf("%08x ",sh2s[0].rts_cache[i].pc);
  4968. if ((i+1) % 8 == 0) printf("\n");
  4969. }
  4970. for (i = 0; i < ARRAY_SIZE(sh2s[1].rts_cache); i++) {
  4971. printf("%08x ",sh2s[1].rts_cache[i].pc);
  4972. if ((i+1) % 8 == 0) printf("\n");
  4973. }
  4974. #endif
  4975. #if BRANCH_CACHE
  4976. printf("branch cache hits:%d misses:%d\n", bchit, bcmiss);
  4977. printf("branch cache master:\n");
  4978. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  4979. printf("%08x ",sh2s[0].branch_cache[i].pc);
  4980. if ((i+1) % 8 == 0) printf("\n");
  4981. }
  4982. printf("branch cache slave:\n");
  4983. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  4984. printf("%08x ",sh2s[1].branch_cache[i].pc);
  4985. if ((i+1) % 8 == 0) printf("\n");
  4986. }
  4987. #endif
  4988. #endif
  4989. }
  4990. void sh2_drc_flush_all(void)
  4991. {
  4992. backtrace();
  4993. state_dump();
  4994. block_stats();
  4995. entry_stats();
  4996. bcache_stats();
  4997. dr_flush_tcache(0);
  4998. dr_flush_tcache(1);
  4999. dr_flush_tcache(2);
  5000. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  5001. }
  5002. void sh2_drc_mem_setup(SH2 *sh2)
  5003. {
  5004. // fill the DRC-only convenience pointers
  5005. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  5006. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  5007. }
  5008. int sh2_drc_init(SH2 *sh2)
  5009. {
  5010. int i;
  5011. if (block_tables[0] == NULL)
  5012. {
  5013. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5014. block_tables[i] = calloc(BLOCK_MAX_COUNT(i), sizeof(*block_tables[0]));
  5015. if (block_tables[i] == NULL)
  5016. goto fail;
  5017. entry_tables[i] = calloc(ENTRY_MAX_COUNT(i), sizeof(*entry_tables[0]));
  5018. if (entry_tables[i] == NULL)
  5019. goto fail;
  5020. block_link_pool[i] = calloc(BLOCK_LINK_MAX_COUNT(i),
  5021. sizeof(*block_link_pool[0]));
  5022. if (block_link_pool[i] == NULL)
  5023. goto fail;
  5024. inval_lookup[i] = calloc(RAM_SIZE(i) / INVAL_PAGE_SIZE,
  5025. sizeof(inval_lookup[0]));
  5026. if (inval_lookup[i] == NULL)
  5027. goto fail;
  5028. hash_tables[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*hash_tables[0]));
  5029. if (hash_tables[i] == NULL)
  5030. goto fail;
  5031. unresolved_links[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*unresolved_links[0]));
  5032. if (unresolved_links[i] == NULL)
  5033. goto fail;
  5034. //atexit(sh2_drc_finish);
  5035. RING_INIT(&block_ring[i], block_tables[i], BLOCK_MAX_COUNT(i));
  5036. RING_INIT(&entry_ring[i], entry_tables[i], ENTRY_MAX_COUNT(i));
  5037. }
  5038. block_list_pool = calloc(BLOCK_LIST_MAX_COUNT, sizeof(*block_list_pool));
  5039. if (block_list_pool == NULL)
  5040. goto fail;
  5041. block_list_pool_count = 0;
  5042. blist_free = NULL;
  5043. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  5044. memset(blink_free, 0, sizeof(blink_free));
  5045. drc_cmn_init();
  5046. rcache_init();
  5047. tcache_ptr = tcache;
  5048. sh2_generate_utils();
  5049. host_instructions_updated(tcache, tcache_ptr);
  5050. emith_update_cache();
  5051. i = tcache_ptr - tcache;
  5052. RING_INIT(&tcache_ring[0], tcache_ptr, tcache_sizes[0] - i);
  5053. for (i = 1; i < ARRAY_SIZE(tcache_ring); i++) {
  5054. RING_INIT(&tcache_ring[i], tcache_ring[i-1].base + tcache_ring[i-1].size,
  5055. tcache_sizes[i]);
  5056. }
  5057. #if (DRC_DEBUG & 4)
  5058. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  5059. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5060. // disasm the utils
  5061. tcache_dsm_ptrs[0] = tcache;
  5062. do_host_disasm(0);
  5063. fflush(stdout);
  5064. #endif
  5065. #if (DRC_DEBUG & 1)
  5066. hash_collisions = 0;
  5067. #endif
  5068. }
  5069. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  5070. memset(sh2->rts_cache, -1, sizeof(sh2->rts_cache));
  5071. sh2->rts_cache_idx = 0;
  5072. return 0;
  5073. fail:
  5074. sh2_drc_finish(sh2);
  5075. return -1;
  5076. }
  5077. void sh2_drc_finish(SH2 *sh2)
  5078. {
  5079. int i;
  5080. if (block_tables[0] == NULL)
  5081. return;
  5082. #if (DRC_DEBUG & (256|512))
  5083. if (trace[0]) fclose(trace[0]);
  5084. if (trace[1]) fclose(trace[1]);
  5085. trace[0] = trace[1] = NULL;
  5086. #endif
  5087. #if (DRC_DEBUG & 4)
  5088. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5089. printf("~~~ tcache %d\n", i);
  5090. #if 0
  5091. if (tcache_ring[i].first < tcache_ring[i].next) {
  5092. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5093. tcache_ptr = tcache_ring[i].next;
  5094. do_host_disasm(i);
  5095. } else if (tcache_ring[i].used) {
  5096. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5097. tcache_ptr = tcache_ring[i].base + tcache_ring[i].size;
  5098. do_host_disasm(i);
  5099. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5100. tcache_ptr = tcache_ring[i].next;
  5101. do_host_disasm(i);
  5102. }
  5103. #endif
  5104. printf("max links: %d\n", block_link_pool_counts[i]);
  5105. }
  5106. printf("max block list: %d\n", block_list_pool_count);
  5107. #endif
  5108. sh2_drc_flush_all();
  5109. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5110. if (block_tables[i] != NULL)
  5111. free(block_tables[i]);
  5112. block_tables[i] = NULL;
  5113. if (entry_tables[i] != NULL)
  5114. free(entry_tables[i]);
  5115. entry_tables[i] = NULL;
  5116. if (block_link_pool[i] != NULL)
  5117. free(block_link_pool[i]);
  5118. block_link_pool[i] = NULL;
  5119. blink_free[i] = NULL;
  5120. if (inval_lookup[i] != NULL)
  5121. free(inval_lookup[i]);
  5122. inval_lookup[i] = NULL;
  5123. if (hash_tables[i] != NULL) {
  5124. free(hash_tables[i]);
  5125. hash_tables[i] = NULL;
  5126. }
  5127. }
  5128. if (block_list_pool != NULL)
  5129. free(block_list_pool);
  5130. block_list_pool = NULL;
  5131. blist_free = NULL;
  5132. drc_cmn_cleanup();
  5133. }
  5134. #endif /* DRC_SH2 */
  5135. static void *dr_get_pc_base(u32 pc, SH2 *sh2)
  5136. {
  5137. void *ret;
  5138. u32 mask = 0;
  5139. ret = p32x_sh2_get_mem_ptr(pc, &mask, sh2);
  5140. if (ret == (void *)-1)
  5141. return ret;
  5142. return (char *)ret - (pc & ~mask);
  5143. }
  5144. u16 scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  5145. u32 *base_literals_out, u32 *end_literals_out)
  5146. {
  5147. u16 *dr_pc_base;
  5148. u32 pc, op, tmp;
  5149. u32 end_pc, end_literals = 0;
  5150. u32 lowest_literal = 0;
  5151. u32 lowest_mova = 0;
  5152. struct op_data *opd;
  5153. int next_is_delay = 0;
  5154. int end_block = 0;
  5155. int i, i_end;
  5156. u32 crc = 0;
  5157. // 2nd pass stuff
  5158. int last_btarget; // loop detector
  5159. enum { T_UNKNOWN, T_CLEAR, T_SET } t; // T propagation state
  5160. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  5161. op_flags[0] |= OF_BTARGET; // block start is always a target
  5162. dr_pc_base = dr_get_pc_base(base_pc, &sh2s[!!is_slave]);
  5163. // 1st pass: disassemble
  5164. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  5165. // we need an ops[] entry after the last one initialized,
  5166. // so do it before end_block checks
  5167. opd = &ops[i];
  5168. opd->op = OP_UNHANDLED;
  5169. opd->rm = -1;
  5170. opd->source = opd->dest = 0;
  5171. opd->cycles = 1;
  5172. opd->imm = 0;
  5173. if (next_is_delay) {
  5174. op_flags[i] |= OF_DELAY_OP;
  5175. next_is_delay = 0;
  5176. }
  5177. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  5178. break;
  5179. else if ((lowest_mova && lowest_mova <= pc) ||
  5180. (lowest_literal && lowest_literal <= pc))
  5181. break; // text area collides with data area
  5182. op = FETCH_OP(pc);
  5183. switch ((op & 0xf000) >> 12)
  5184. {
  5185. /////////////////////////////////////////////
  5186. case 0x00:
  5187. switch (op & 0x0f)
  5188. {
  5189. case 0x02:
  5190. switch (GET_Fx())
  5191. {
  5192. case 0: // STC SR,Rn 0000nnnn00000010
  5193. tmp = BITMASK2(SHR_SR, SHR_T);
  5194. break;
  5195. case 1: // STC GBR,Rn 0000nnnn00010010
  5196. tmp = BITMASK1(SHR_GBR);
  5197. break;
  5198. case 2: // STC VBR,Rn 0000nnnn00100010
  5199. tmp = BITMASK1(SHR_VBR);
  5200. break;
  5201. default:
  5202. goto undefined;
  5203. }
  5204. opd->op = OP_MOVE;
  5205. opd->source = tmp;
  5206. opd->dest = BITMASK1(GET_Rn());
  5207. break;
  5208. case 0x03:
  5209. CHECK_UNHANDLED_BITS(0xd0, undefined);
  5210. // BRAF Rm 0000mmmm00100011
  5211. // BSRF Rm 0000mmmm00000011
  5212. opd->op = OP_BRANCH_RF;
  5213. opd->rm = GET_Rn();
  5214. opd->source = BITMASK2(SHR_PC, opd->rm);
  5215. opd->dest = BITMASK1(SHR_PC);
  5216. if (!(op & 0x20))
  5217. opd->dest |= BITMASK1(SHR_PR);
  5218. opd->cycles = 2;
  5219. next_is_delay = 1;
  5220. if (!(opd->dest & BITMASK1(SHR_PR)))
  5221. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5222. else
  5223. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5224. break;
  5225. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  5226. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  5227. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  5228. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  5229. opd->dest = BITMASK1(SHR_MEM);
  5230. break;
  5231. case 0x07:
  5232. // MUL.L Rm,Rn 0000nnnnmmmm0111
  5233. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5234. opd->dest = BITMASK1(SHR_MACL);
  5235. opd->cycles = 2;
  5236. break;
  5237. case 0x08:
  5238. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5239. switch (GET_Fx())
  5240. {
  5241. case 0: // CLRT 0000000000001000
  5242. opd->op = OP_SETCLRT;
  5243. opd->dest = BITMASK1(SHR_T);
  5244. opd->imm = 0;
  5245. break;
  5246. case 1: // SETT 0000000000011000
  5247. opd->op = OP_SETCLRT;
  5248. opd->dest = BITMASK1(SHR_T);
  5249. opd->imm = 1;
  5250. break;
  5251. case 2: // CLRMAC 0000000000101000
  5252. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5253. break;
  5254. default:
  5255. goto undefined;
  5256. }
  5257. break;
  5258. case 0x09:
  5259. switch (GET_Fx())
  5260. {
  5261. case 0: // NOP 0000000000001001
  5262. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5263. break;
  5264. case 1: // DIV0U 0000000000011001
  5265. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5266. opd->source = BITMASK1(SHR_SR);
  5267. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5268. break;
  5269. case 2: // MOVT Rn 0000nnnn00101001
  5270. opd->source = BITMASK1(SHR_T);
  5271. opd->dest = BITMASK1(GET_Rn());
  5272. break;
  5273. default:
  5274. goto undefined;
  5275. }
  5276. break;
  5277. case 0x0a:
  5278. switch (GET_Fx())
  5279. {
  5280. case 0: // STS MACH,Rn 0000nnnn00001010
  5281. tmp = SHR_MACH;
  5282. break;
  5283. case 1: // STS MACL,Rn 0000nnnn00011010
  5284. tmp = SHR_MACL;
  5285. break;
  5286. case 2: // STS PR,Rn 0000nnnn00101010
  5287. tmp = SHR_PR;
  5288. break;
  5289. default:
  5290. goto undefined;
  5291. }
  5292. opd->op = OP_MOVE;
  5293. opd->source = BITMASK1(tmp);
  5294. opd->dest = BITMASK1(GET_Rn());
  5295. break;
  5296. case 0x0b:
  5297. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5298. switch (GET_Fx())
  5299. {
  5300. case 0: // RTS 0000000000001011
  5301. opd->op = OP_BRANCH_R;
  5302. opd->rm = SHR_PR;
  5303. opd->source = BITMASK1(opd->rm);
  5304. opd->dest = BITMASK1(SHR_PC);
  5305. opd->cycles = 2;
  5306. next_is_delay = 1;
  5307. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5308. break;
  5309. case 1: // SLEEP 0000000000011011
  5310. opd->op = OP_SLEEP;
  5311. end_block = 1;
  5312. break;
  5313. case 2: // RTE 0000000000101011
  5314. opd->op = OP_RTE;
  5315. opd->source = BITMASK1(SHR_SP);
  5316. opd->dest = BITMASK4(SHR_SP, SHR_SR, SHR_T, SHR_PC);
  5317. opd->cycles = 4;
  5318. next_is_delay = 1;
  5319. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5320. break;
  5321. default:
  5322. goto undefined;
  5323. }
  5324. break;
  5325. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  5326. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  5327. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  5328. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  5329. opd->dest = BITMASK1(GET_Rn());
  5330. op_flags[i] |= OF_POLL_INSN;
  5331. break;
  5332. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  5333. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5334. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5335. opd->cycles = 3;
  5336. break;
  5337. default:
  5338. goto undefined;
  5339. }
  5340. break;
  5341. /////////////////////////////////////////////
  5342. case 0x01:
  5343. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  5344. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5345. opd->dest = BITMASK1(SHR_MEM);
  5346. opd->imm = (op & 0x0f) * 4;
  5347. break;
  5348. /////////////////////////////////////////////
  5349. case 0x02:
  5350. switch (op & 0x0f)
  5351. {
  5352. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  5353. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  5354. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  5355. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5356. opd->dest = BITMASK1(SHR_MEM);
  5357. break;
  5358. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  5359. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  5360. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  5361. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5362. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5363. break;
  5364. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  5365. opd->source = BITMASK3(SHR_SR, GET_Rm(), GET_Rn());
  5366. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5367. break;
  5368. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  5369. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5370. opd->dest = BITMASK1(SHR_T);
  5371. break;
  5372. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  5373. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  5374. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  5375. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5376. opd->dest = BITMASK1(GET_Rn());
  5377. break;
  5378. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  5379. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5380. opd->dest = BITMASK1(SHR_T);
  5381. break;
  5382. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  5383. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5384. opd->dest = BITMASK1(GET_Rn());
  5385. break;
  5386. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  5387. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  5388. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5389. opd->dest = BITMASK1(SHR_MACL);
  5390. break;
  5391. default:
  5392. goto undefined;
  5393. }
  5394. break;
  5395. /////////////////////////////////////////////
  5396. case 0x03:
  5397. switch (op & 0x0f)
  5398. {
  5399. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  5400. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  5401. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  5402. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  5403. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  5404. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5405. opd->dest = BITMASK1(SHR_T);
  5406. break;
  5407. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  5408. opd->source = BITMASK4(GET_Rm(), GET_Rn(), SHR_SR, SHR_T);
  5409. opd->dest = BITMASK3(GET_Rn(), SHR_SR, SHR_T);
  5410. break;
  5411. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  5412. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  5413. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5414. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5415. opd->cycles = 2;
  5416. break;
  5417. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  5418. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  5419. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5420. opd->dest = BITMASK1(GET_Rn());
  5421. break;
  5422. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  5423. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  5424. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  5425. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5426. break;
  5427. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  5428. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  5429. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5430. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5431. break;
  5432. default:
  5433. goto undefined;
  5434. }
  5435. break;
  5436. /////////////////////////////////////////////
  5437. case 0x04:
  5438. switch (op & 0x0f)
  5439. {
  5440. case 0x00:
  5441. switch (GET_Fx())
  5442. {
  5443. case 0: // SHLL Rn 0100nnnn00000000
  5444. case 2: // SHAL Rn 0100nnnn00100000
  5445. opd->source = BITMASK1(GET_Rn());
  5446. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5447. break;
  5448. case 1: // DT Rn 0100nnnn00010000
  5449. opd->source = BITMASK1(GET_Rn());
  5450. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5451. op_flags[i] |= OF_DELAY_INSN;
  5452. break;
  5453. default:
  5454. goto undefined;
  5455. }
  5456. break;
  5457. case 0x01:
  5458. switch (GET_Fx())
  5459. {
  5460. case 0: // SHLR Rn 0100nnnn00000001
  5461. case 2: // SHAR Rn 0100nnnn00100001
  5462. opd->source = BITMASK1(GET_Rn());
  5463. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5464. break;
  5465. case 1: // CMP/PZ Rn 0100nnnn00010001
  5466. opd->source = BITMASK1(GET_Rn());
  5467. opd->dest = BITMASK1(SHR_T);
  5468. break;
  5469. default:
  5470. goto undefined;
  5471. }
  5472. break;
  5473. case 0x02:
  5474. case 0x03:
  5475. switch (op & 0x3f)
  5476. {
  5477. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  5478. tmp = BITMASK1(SHR_MACH);
  5479. break;
  5480. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  5481. tmp = BITMASK1(SHR_MACL);
  5482. break;
  5483. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  5484. tmp = BITMASK1(SHR_PR);
  5485. break;
  5486. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  5487. tmp = BITMASK2(SHR_SR, SHR_T);
  5488. opd->cycles = 2;
  5489. break;
  5490. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  5491. tmp = BITMASK1(SHR_GBR);
  5492. opd->cycles = 2;
  5493. break;
  5494. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  5495. tmp = BITMASK1(SHR_VBR);
  5496. opd->cycles = 2;
  5497. break;
  5498. default:
  5499. goto undefined;
  5500. }
  5501. opd->source = BITMASK1(GET_Rn()) | tmp;
  5502. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5503. break;
  5504. case 0x04:
  5505. case 0x05:
  5506. switch (op & 0x3f)
  5507. {
  5508. case 0x04: // ROTL Rn 0100nnnn00000100
  5509. case 0x05: // ROTR Rn 0100nnnn00000101
  5510. opd->source = BITMASK1(GET_Rn());
  5511. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5512. break;
  5513. case 0x24: // ROTCL Rn 0100nnnn00100100
  5514. case 0x25: // ROTCR Rn 0100nnnn00100101
  5515. opd->source = BITMASK2(GET_Rn(), SHR_T);
  5516. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5517. break;
  5518. case 0x15: // CMP/PL Rn 0100nnnn00010101
  5519. opd->source = BITMASK1(GET_Rn());
  5520. opd->dest = BITMASK1(SHR_T);
  5521. break;
  5522. default:
  5523. goto undefined;
  5524. }
  5525. break;
  5526. case 0x06:
  5527. case 0x07:
  5528. switch (op & 0x3f)
  5529. {
  5530. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  5531. tmp = BITMASK1(SHR_MACH);
  5532. break;
  5533. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  5534. tmp = BITMASK1(SHR_MACL);
  5535. break;
  5536. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  5537. tmp = BITMASK1(SHR_PR);
  5538. break;
  5539. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  5540. tmp = BITMASK2(SHR_SR, SHR_T);
  5541. opd->op = OP_LDC;
  5542. opd->cycles = 3;
  5543. break;
  5544. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  5545. tmp = BITMASK1(SHR_GBR);
  5546. opd->op = OP_LDC;
  5547. opd->cycles = 3;
  5548. break;
  5549. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  5550. tmp = BITMASK1(SHR_VBR);
  5551. opd->op = OP_LDC;
  5552. opd->cycles = 3;
  5553. break;
  5554. default:
  5555. goto undefined;
  5556. }
  5557. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5558. opd->dest = BITMASK1(GET_Rn()) | tmp;
  5559. break;
  5560. case 0x08:
  5561. case 0x09:
  5562. switch (GET_Fx())
  5563. {
  5564. case 0:
  5565. // SHLL2 Rn 0100nnnn00001000
  5566. // SHLR2 Rn 0100nnnn00001001
  5567. break;
  5568. case 1:
  5569. // SHLL8 Rn 0100nnnn00011000
  5570. // SHLR8 Rn 0100nnnn00011001
  5571. break;
  5572. case 2:
  5573. // SHLL16 Rn 0100nnnn00101000
  5574. // SHLR16 Rn 0100nnnn00101001
  5575. break;
  5576. default:
  5577. goto undefined;
  5578. }
  5579. opd->source = BITMASK1(GET_Rn());
  5580. opd->dest = BITMASK1(GET_Rn());
  5581. break;
  5582. case 0x0a:
  5583. switch (GET_Fx())
  5584. {
  5585. case 0: // LDS Rm,MACH 0100mmmm00001010
  5586. tmp = SHR_MACH;
  5587. break;
  5588. case 1: // LDS Rm,MACL 0100mmmm00011010
  5589. tmp = SHR_MACL;
  5590. break;
  5591. case 2: // LDS Rm,PR 0100mmmm00101010
  5592. tmp = SHR_PR;
  5593. break;
  5594. default:
  5595. goto undefined;
  5596. }
  5597. opd->op = OP_MOVE;
  5598. opd->source = BITMASK1(GET_Rn());
  5599. opd->dest = BITMASK1(tmp);
  5600. break;
  5601. case 0x0b:
  5602. switch (GET_Fx())
  5603. {
  5604. case 0: // JSR @Rm 0100mmmm00001011
  5605. opd->dest = BITMASK1(SHR_PR);
  5606. case 2: // JMP @Rm 0100mmmm00101011
  5607. opd->op = OP_BRANCH_R;
  5608. opd->rm = GET_Rn();
  5609. opd->source = BITMASK1(opd->rm);
  5610. opd->dest |= BITMASK1(SHR_PC);
  5611. opd->cycles = 2;
  5612. next_is_delay = 1;
  5613. if (!(opd->dest & BITMASK1(SHR_PR)))
  5614. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5615. else
  5616. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5617. break;
  5618. case 1: // TAS.B @Rn 0100nnnn00011011
  5619. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5620. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  5621. opd->cycles = 4;
  5622. break;
  5623. default:
  5624. goto undefined;
  5625. }
  5626. break;
  5627. case 0x0e:
  5628. switch (GET_Fx())
  5629. {
  5630. case 0: // LDC Rm,SR 0100mmmm00001110
  5631. tmp = BITMASK2(SHR_SR, SHR_T);
  5632. break;
  5633. case 1: // LDC Rm,GBR 0100mmmm00011110
  5634. tmp = BITMASK1(SHR_GBR);
  5635. break;
  5636. case 2: // LDC Rm,VBR 0100mmmm00101110
  5637. tmp = BITMASK1(SHR_VBR);
  5638. break;
  5639. default:
  5640. goto undefined;
  5641. }
  5642. opd->op = OP_LDC;
  5643. opd->source = BITMASK1(GET_Rn());
  5644. opd->dest = tmp;
  5645. break;
  5646. case 0x0f:
  5647. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  5648. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5649. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5650. opd->cycles = 3;
  5651. break;
  5652. default:
  5653. goto undefined;
  5654. }
  5655. break;
  5656. /////////////////////////////////////////////
  5657. case 0x05:
  5658. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  5659. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5660. opd->dest = BITMASK1(GET_Rn());
  5661. opd->imm = (op & 0x0f) * 4;
  5662. op_flags[i] |= OF_POLL_INSN;
  5663. break;
  5664. /////////////////////////////////////////////
  5665. case 0x06:
  5666. switch (op & 0x0f)
  5667. {
  5668. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  5669. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  5670. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  5671. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  5672. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5673. break;
  5674. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  5675. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  5676. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  5677. opd->dest = BITMASK1(GET_Rn());
  5678. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5679. op_flags[i] |= OF_POLL_INSN;
  5680. break;
  5681. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  5682. opd->source = BITMASK2(GET_Rm(), SHR_T);
  5683. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5684. break;
  5685. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  5686. opd->op = OP_MOVE;
  5687. goto arith_rmrn;
  5688. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  5689. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  5690. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  5691. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  5692. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  5693. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  5694. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  5695. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  5696. arith_rmrn:
  5697. opd->source = BITMASK1(GET_Rm());
  5698. opd->dest = BITMASK1(GET_Rn());
  5699. break;
  5700. }
  5701. break;
  5702. /////////////////////////////////////////////
  5703. case 0x07:
  5704. // ADD #imm,Rn 0111nnnniiiiiiii
  5705. opd->source = opd->dest = BITMASK1(GET_Rn());
  5706. opd->imm = (s8)op;
  5707. break;
  5708. /////////////////////////////////////////////
  5709. case 0x08:
  5710. switch (op & 0x0f00)
  5711. {
  5712. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  5713. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  5714. opd->dest = BITMASK1(SHR_MEM);
  5715. opd->imm = (op & 0x0f);
  5716. break;
  5717. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  5718. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  5719. opd->dest = BITMASK1(SHR_MEM);
  5720. opd->imm = (op & 0x0f) * 2;
  5721. break;
  5722. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  5723. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5724. opd->dest = BITMASK1(SHR_R0);
  5725. opd->imm = (op & 0x0f);
  5726. op_flags[i] |= OF_POLL_INSN;
  5727. break;
  5728. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  5729. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5730. opd->dest = BITMASK1(SHR_R0);
  5731. opd->imm = (op & 0x0f) * 2;
  5732. op_flags[i] |= OF_POLL_INSN;
  5733. break;
  5734. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  5735. opd->source = BITMASK1(SHR_R0);
  5736. opd->dest = BITMASK1(SHR_T);
  5737. opd->imm = (s8)op;
  5738. break;
  5739. case 0x0d00: // BT/S label 10001101dddddddd
  5740. case 0x0f00: // BF/S label 10001111dddddddd
  5741. next_is_delay = 1;
  5742. // fallthrough
  5743. case 0x0900: // BT label 10001001dddddddd
  5744. case 0x0b00: // BF label 10001011dddddddd
  5745. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  5746. opd->source = BITMASK2(SHR_PC, SHR_T);
  5747. opd->dest = BITMASK1(SHR_PC);
  5748. opd->imm = ((signed int)(op << 24) >> 23);
  5749. opd->imm += pc + 4;
  5750. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  5751. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  5752. break;
  5753. default:
  5754. goto undefined;
  5755. }
  5756. break;
  5757. /////////////////////////////////////////////
  5758. case 0x09:
  5759. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  5760. opd->op = OP_LOAD_POOL;
  5761. tmp = pc + 2;
  5762. if (op_flags[i] & OF_DELAY_OP) {
  5763. if (ops[i-1].op == OP_BRANCH)
  5764. tmp = ops[i-1].imm;
  5765. else if (ops[i-1].op != OP_BRANCH_N)
  5766. tmp = 0;
  5767. }
  5768. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  5769. opd->dest = BITMASK1(GET_Rn());
  5770. if (tmp) {
  5771. opd->imm = tmp + 2 + (op & 0xff) * 2;
  5772. if (lowest_literal == 0 || opd->imm < lowest_literal)
  5773. lowest_literal = opd->imm;
  5774. }
  5775. opd->size = 1;
  5776. break;
  5777. /////////////////////////////////////////////
  5778. case 0x0b:
  5779. // BSR label 1011dddddddddddd
  5780. opd->dest = BITMASK1(SHR_PR);
  5781. case 0x0a:
  5782. // BRA label 1010dddddddddddd
  5783. opd->op = OP_BRANCH;
  5784. opd->source = BITMASK1(SHR_PC);
  5785. opd->dest |= BITMASK1(SHR_PC);
  5786. opd->imm = ((signed int)(op << 20) >> 19);
  5787. opd->imm += pc + 4;
  5788. opd->cycles = 2;
  5789. next_is_delay = 1;
  5790. if (!(opd->dest & BITMASK1(SHR_PR))) {
  5791. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  5792. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  5793. if (opd->imm <= pc)
  5794. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5795. } else
  5796. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5797. } else
  5798. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5799. break;
  5800. /////////////////////////////////////////////
  5801. case 0x0c:
  5802. switch (op & 0x0f00)
  5803. {
  5804. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  5805. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  5806. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  5807. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  5808. opd->dest = BITMASK1(SHR_MEM);
  5809. opd->size = (op & 0x300) >> 8;
  5810. opd->imm = (op & 0xff) << opd->size;
  5811. break;
  5812. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  5813. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  5814. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  5815. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  5816. opd->dest = BITMASK1(SHR_R0);
  5817. opd->size = (op & 0x300) >> 8;
  5818. opd->imm = (op & 0xff) << opd->size;
  5819. op_flags[i] |= OF_POLL_INSN;
  5820. break;
  5821. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  5822. opd->op = OP_TRAPA;
  5823. opd->source = BITMASK4(SHR_SP, SHR_PC, SHR_SR, SHR_T);
  5824. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  5825. opd->imm = (op & 0xff);
  5826. opd->cycles = 8;
  5827. op_flags[i+1] |= OF_BTARGET;
  5828. break;
  5829. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  5830. opd->op = OP_MOVA;
  5831. tmp = pc + 2;
  5832. if (op_flags[i] & OF_DELAY_OP) {
  5833. if (ops[i-1].op == OP_BRANCH)
  5834. tmp = ops[i-1].imm;
  5835. else if (ops[i-1].op != OP_BRANCH_N)
  5836. tmp = 0;
  5837. }
  5838. opd->dest = BITMASK1(SHR_R0);
  5839. if (tmp) {
  5840. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  5841. if (opd->imm >= base_pc) {
  5842. if (lowest_mova == 0 || opd->imm < lowest_mova)
  5843. lowest_mova = opd->imm;
  5844. }
  5845. }
  5846. break;
  5847. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  5848. opd->source = BITMASK1(SHR_R0);
  5849. opd->dest = BITMASK1(SHR_T);
  5850. opd->imm = op & 0xff;
  5851. break;
  5852. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  5853. opd->source = opd->dest = BITMASK1(SHR_R0);
  5854. opd->imm = op & 0xff;
  5855. break;
  5856. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  5857. opd->source = opd->dest = BITMASK1(SHR_R0);
  5858. opd->imm = op & 0xff;
  5859. break;
  5860. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  5861. opd->source = opd->dest = BITMASK1(SHR_R0);
  5862. opd->imm = op & 0xff;
  5863. break;
  5864. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  5865. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  5866. opd->dest = BITMASK1(SHR_T);
  5867. opd->imm = op & 0xff;
  5868. op_flags[i] |= OF_POLL_INSN;
  5869. opd->cycles = 3;
  5870. break;
  5871. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  5872. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  5873. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  5874. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  5875. opd->dest = BITMASK1(SHR_MEM);
  5876. opd->imm = op & 0xff;
  5877. opd->cycles = 3;
  5878. break;
  5879. default:
  5880. goto undefined;
  5881. }
  5882. break;
  5883. /////////////////////////////////////////////
  5884. case 0x0d:
  5885. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  5886. opd->op = OP_LOAD_POOL;
  5887. tmp = pc + 2;
  5888. if (op_flags[i] & OF_DELAY_OP) {
  5889. if (ops[i-1].op == OP_BRANCH)
  5890. tmp = ops[i-1].imm;
  5891. else if (ops[i-1].op != OP_BRANCH_N)
  5892. tmp = 0;
  5893. }
  5894. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  5895. opd->dest = BITMASK1(GET_Rn());
  5896. if (tmp) {
  5897. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  5898. if (lowest_literal == 0 || opd->imm < lowest_literal)
  5899. lowest_literal = opd->imm;
  5900. }
  5901. opd->size = 2;
  5902. break;
  5903. /////////////////////////////////////////////
  5904. case 0x0e:
  5905. // MOV #imm,Rn 1110nnnniiiiiiii
  5906. opd->op = OP_LOAD_CONST;
  5907. opd->dest = BITMASK1(GET_Rn());
  5908. opd->imm = (s8)op;
  5909. break;
  5910. default:
  5911. undefined:
  5912. opd->op = OP_UNDEFINED;
  5913. // an unhandled instruction is probably not code if it's not the 1st insn
  5914. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  5915. goto end;
  5916. break;
  5917. }
  5918. if (op_flags[i] & OF_DELAY_OP) {
  5919. switch (opd->op) {
  5920. case OP_BRANCH:
  5921. case OP_BRANCH_N:
  5922. case OP_BRANCH_CT:
  5923. case OP_BRANCH_CF:
  5924. case OP_BRANCH_R:
  5925. case OP_BRANCH_RF:
  5926. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  5927. is_slave ? 's' : 'm', pc);
  5928. opd->op = OP_UNDEFINED;
  5929. op_flags[i] |= OF_B_IN_DS;
  5930. next_is_delay = 0;
  5931. break;
  5932. }
  5933. }
  5934. }
  5935. end:
  5936. i_end = i;
  5937. end_pc = pc;
  5938. // 2nd pass: some analysis
  5939. lowest_literal = end_literals = lowest_mova = 0;
  5940. t = T_UNKNOWN;
  5941. last_btarget = 0;
  5942. op = 0; // delay/poll insns counter
  5943. for (i = 0, pc = base_pc; i < i_end; i++, pc += 2) {
  5944. opd = &ops[i];
  5945. crc += FETCH_OP(pc);
  5946. // propagate T (TODO: DIV0U)
  5947. if ((op_flags[i] & OF_BTARGET) || (opd->dest & BITMASK1(SHR_T)))
  5948. t = T_UNKNOWN;
  5949. if ((opd->op == OP_BRANCH_CT && t == T_SET) ||
  5950. (opd->op == OP_BRANCH_CF && t == T_CLEAR)) {
  5951. opd->op = OP_BRANCH;
  5952. opd->cycles = (op_flags[i + 1] & OF_DELAY_OP) ? 2 : 3;
  5953. } else if ((opd->op == OP_BRANCH_CT && t == T_CLEAR) ||
  5954. (opd->op == OP_BRANCH_CF && t == T_SET))
  5955. opd->op = OP_BRANCH_N;
  5956. else if ((opd->op == OP_SETCLRT && !opd->imm) || opd->op == OP_BRANCH_CT)
  5957. t = T_CLEAR;
  5958. else if ((opd->op == OP_SETCLRT && opd->imm) || opd->op == OP_BRANCH_CF)
  5959. t = T_SET;
  5960. // "overscan" detection: unreachable code after unconditional branch
  5961. // this can happen if the insn after a forward branch isn't a local target
  5962. if (OP_ISBRAUC(opd->op)) {
  5963. if (op_flags[i + 1] & OF_DELAY_OP) {
  5964. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  5965. i_end = i + 2;
  5966. } else {
  5967. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  5968. i_end = i + 1;
  5969. }
  5970. }
  5971. // literal pool size detection
  5972. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  5973. if (lowest_mova == 0 || opd->imm < lowest_mova)
  5974. lowest_mova = opd->imm;
  5975. if (opd->op == OP_LOAD_POOL) {
  5976. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  5977. if (end_literals < opd->imm + opd->size * 2)
  5978. end_literals = opd->imm + opd->size * 2;
  5979. if (lowest_literal == 0 || lowest_literal > opd->imm)
  5980. lowest_literal = opd->imm;
  5981. if (opd->size == 2) {
  5982. // tweak for NFL: treat a 32bit literal as an address and check if it
  5983. // points to the literal space. In that case handle it like MOVA.
  5984. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  5985. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  5986. if (lowest_mova == 0 || tmp < lowest_mova)
  5987. lowest_mova = tmp;
  5988. }
  5989. }
  5990. }
  5991. #if LOOP_DETECTION
  5992. // inner loop detection
  5993. // 1. a loop always starts with a branch target (for the backwards jump)
  5994. // 2. it doesn't contain more than one polling and/or delaying insn
  5995. // 3. it doesn't contain unconditional jumps
  5996. // 4. no overlapping of loops
  5997. if (op_flags[i] & OF_BTARGET) {
  5998. last_btarget = i; // possible loop starting point
  5999. op = 0;
  6000. }
  6001. // XXX let's hope nobody is putting a delay or poll insn in a delay slot :-/
  6002. if (OP_ISBRAIMM(opd->op)) {
  6003. // BSR, BRA, BT, BF with immediate target
  6004. int i_tmp = (opd->imm - base_pc) / 2; // branch target, index in ops
  6005. if (i_tmp == last_btarget) // candidate for basic loop optimizer
  6006. op_flags[i_tmp] |= OF_BASIC_LOOP;
  6007. if (i_tmp == last_btarget && op <= 1) {
  6008. op_flags[i_tmp] |= OF_LOOP; // conditions met -> mark loop
  6009. last_btarget = i+1; // condition 4
  6010. } else if (opd->op == OP_BRANCH)
  6011. last_btarget = i+1; // condition 3
  6012. }
  6013. else if (OP_ISBRAIND(opd->op))
  6014. // BRAF, BSRF, JMP, JSR, register indirect. treat it as off-limits jump
  6015. last_btarget = i+1; // condition 3
  6016. else if (op_flags[i] & (OF_POLL_INSN|OF_DELAY_INSN))
  6017. op ++; // condition 2
  6018. #endif
  6019. }
  6020. end_pc = pc;
  6021. // end_literals is used to decide to inline a literal or not
  6022. // XXX: need better detection if this actually is used in write
  6023. if (lowest_literal >= base_pc) {
  6024. if (lowest_literal < end_pc) {
  6025. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  6026. // TODO: does this always mean end_pc covers data?
  6027. }
  6028. }
  6029. if (lowest_mova >= base_pc) {
  6030. if (lowest_mova < end_literals) {
  6031. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  6032. end_literals = lowest_mova;
  6033. }
  6034. if (lowest_mova < end_pc) {
  6035. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  6036. end_literals = end_pc;
  6037. }
  6038. }
  6039. if (lowest_literal >= end_literals)
  6040. lowest_literal = end_literals;
  6041. if (lowest_literal && end_literals)
  6042. for (pc = lowest_literal; pc < end_literals; pc += 2)
  6043. crc += FETCH_OP(pc);
  6044. *end_pc_out = end_pc;
  6045. if (base_literals_out != NULL)
  6046. *base_literals_out = (lowest_literal ?: end_pc);
  6047. if (end_literals_out != NULL)
  6048. *end_literals_out = (end_literals ?: end_pc);
  6049. // crc overflow handling, twice to collect all overflows
  6050. crc = (crc & 0xffff) + (crc >> 16);
  6051. crc = (crc & 0xffff) + (crc >> 16);
  6052. return crc;
  6053. }
  6054. // vim:shiftwidth=2:ts=2:expandtab