compiler.c 215 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. * (C) kub, 2018,2019,2020
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. *
  9. * notes:
  10. * - tcache, block descriptor, block entry buffer overflows result in oldest
  11. * blocks being deleted until enough space is available
  12. * - link and list element buffer overflows result in failure and exit
  13. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  14. * except jumps from global to CPU-local tcaches
  15. *
  16. * implemented:
  17. * - static register allocation
  18. * - remaining register caching and tracking in temporaries
  19. * - block-local branch linking
  20. * - block linking
  21. * - some constant propagation
  22. * - call stack caching for host block entry address
  23. * - delay, poll, and idle loop detection and handling
  24. * - some T/M flag optimizations where the value is known or isn't used
  25. *
  26. * TODO:
  27. * - better constant propagation
  28. * - bug fixing
  29. */
  30. #include <stddef.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <assert.h>
  34. #include "../../pico/pico_int.h"
  35. #include "../../pico/arm_features.h"
  36. #include "sh2.h"
  37. #include "compiler.h"
  38. #include "../drc/cmn.h"
  39. #include "../debug.h"
  40. // features
  41. #define PROPAGATE_CONSTANTS 1
  42. #define LINK_BRANCHES 1
  43. #define BRANCH_CACHE 1
  44. #define CALL_STACK 1
  45. #define ALIAS_REGISTERS 1
  46. #define REMAP_REGISTER 1
  47. #define LOOP_DETECTION 1
  48. #define LOOP_OPTIMIZER 1
  49. #define T_OPTIMIZER 1
  50. #define DIV_OPTIMIZER 0
  51. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  52. #define MAX_LOCAL_TARGETS (BLOCK_INSN_LIMIT / 4)
  53. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 2)
  54. // debug stuff
  55. // 01 - warnings/errors
  56. // 02 - block info/smc
  57. // 04 - asm
  58. // 08 - runtime block entry log
  59. // 10 - smc self-check
  60. // 20 - runtime block entry counter
  61. // 40 - rcache checking
  62. // 80 - branch cache statistics
  63. // 100 - write trace
  64. // 200 - compare trace
  65. // 400 - block entry backtrace on exit
  66. // 800 - state dump on exit
  67. // {
  68. #ifndef DRC_DEBUG
  69. #define DRC_DEBUG 0//x847
  70. #endif
  71. #if DRC_DEBUG
  72. #define dbg(l,...) { \
  73. if ((l) & DRC_DEBUG) \
  74. elprintf(EL_STATUS, ##__VA_ARGS__); \
  75. }
  76. #include "mame/sh2dasm.h"
  77. #include <platform/libpicofe/linux/host_dasm.h>
  78. static int insns_compiled, hash_collisions, host_insn_count;
  79. #define COUNT_OP \
  80. host_insn_count++
  81. #else // !DRC_DEBUG
  82. #define COUNT_OP
  83. #define dbg(...)
  84. #endif
  85. ///
  86. #define FETCH_OP(pc) \
  87. dr_pc_base[(pc) / 2]
  88. #define FETCH32(a) \
  89. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  90. #define CHECK_UNHANDLED_BITS(mask, label) { \
  91. if ((op & (mask)) != 0) \
  92. goto label; \
  93. }
  94. #define GET_Fx() \
  95. ((op >> 4) & 0x0f)
  96. #define GET_Rm GET_Fx
  97. #define GET_Rn() \
  98. ((op >> 8) & 0x0f)
  99. #define T 0x00000001
  100. #define S 0x00000002
  101. #define I 0x000000f0
  102. #define Q 0x00000100
  103. #define M 0x00000200
  104. #define T_save 0x00000800
  105. #define I_SHIFT 4
  106. #define Q_SHIFT 8
  107. #define M_SHIFT 9
  108. #define T_SHIFT 11
  109. static struct op_data {
  110. u8 op;
  111. u8 cycles;
  112. u8 size; // 0, 1, 2 - byte, word, long
  113. s8 rm; // branch or load/store data reg
  114. u32 source; // bitmask of src regs
  115. u32 dest; // bitmask of dest regs
  116. u32 imm; // immediate/io address/branch target
  117. // (for literal - address, not value)
  118. } ops[BLOCK_INSN_LIMIT];
  119. enum op_types {
  120. OP_UNHANDLED = 0,
  121. OP_BRANCH,
  122. OP_BRANCH_N, // conditional known not to be taken
  123. OP_BRANCH_CT, // conditional, branch if T set
  124. OP_BRANCH_CF, // conditional, branch if T clear
  125. OP_BRANCH_R, // indirect
  126. OP_BRANCH_RF, // indirect far (PC + Rm)
  127. OP_SETCLRT, // T flag set/clear
  128. OP_MOVE, // register move
  129. OP_LOAD_CONST,// load const to register
  130. OP_LOAD_POOL, // literal pool load, imm is address
  131. OP_MOVA, // MOVA instruction
  132. OP_SLEEP, // SLEEP instruction
  133. OP_RTE, // RTE instruction
  134. OP_TRAPA, // TRAPA instruction
  135. OP_LDC, // LDC instruction
  136. OP_DIV0, // DIV0[US] instruction
  137. OP_UNDEFINED,
  138. };
  139. struct div {
  140. u32 state:1; // 0: expect DIV1/ROTCL, 1: expect DIV1
  141. u32 rn:5, rm:5, ro:5; // rn and rm for DIV1, ro for ROTCL
  142. u32 div1:8, rotcl:8; // DIV1 count, ROTCL count
  143. };
  144. union _div { u32 imm; struct div div; }; // XXX tut-tut type punning...
  145. #define div(opd) ((union _div *)&((opd)->imm))->div
  146. // XXX consider trap insns: OP_TRAPA, OP_UNDEFINED?
  147. #define OP_ISBRANCH(op) ((BITRANGE(OP_BRANCH, OP_BRANCH_RF)| BITMASK1(OP_RTE)) \
  148. & BITMASK1(op))
  149. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  150. & BITMASK1(op))
  151. #define OP_ISBRACND(op) (BITMASK2(OP_BRANCH_CT, OP_BRANCH_CF) \
  152. & BITMASK1(op))
  153. #define OP_ISBRAIMM(op) (BITMASK3(OP_BRANCH, OP_BRANCH_CT, OP_BRANCH_CF) \
  154. & BITMASK1(op))
  155. #define OP_ISBRAIND(op) (BITMASK3(OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  156. & BITMASK1(op))
  157. #ifdef DRC_SH2
  158. #if (DRC_DEBUG & 4)
  159. static u8 *tcache_dsm_ptrs[3];
  160. static char sh2dasm_buff[64];
  161. #define do_host_disasm(tcid) \
  162. host_dasm(tcache_dsm_ptrs[tcid], emith_insn_ptr() - tcache_dsm_ptrs[tcid]); \
  163. tcache_dsm_ptrs[tcid] = emith_insn_ptr()
  164. #else
  165. #define do_host_disasm(x)
  166. #endif
  167. #define SH2_DUMP(sh2, reason) { \
  168. char ms = (sh2)->is_slave ? 's' : 'm'; \
  169. printf("%csh2 %s %08x\n", ms, reason, (sh2)->pc); \
  170. printf("%csh2 r0-7 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  171. (sh2)->r[0], (sh2)->r[1], (sh2)->r[2], (sh2)->r[3], \
  172. (sh2)->r[4], (sh2)->r[5], (sh2)->r[6], (sh2)->r[7]); \
  173. printf("%csh2 r8-15 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  174. (sh2)->r[8], (sh2)->r[9], (sh2)->r[10], (sh2)->r[11], \
  175. (sh2)->r[12], (sh2)->r[13], (sh2)->r[14], (sh2)->r[15]); \
  176. printf("%csh2 pc-ml %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  177. (sh2)->pc, (sh2)->ppc, (sh2)->pr, (sh2)->sr&0xfff, \
  178. (sh2)->gbr, (sh2)->vbr, (sh2)->mach, (sh2)->macl); \
  179. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  180. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  181. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  182. (sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  183. }
  184. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  185. #if (DRC_DEBUG & (256|512|1024))
  186. static SH2 csh2[2][8];
  187. static FILE *trace[2];
  188. #endif
  189. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  190. {
  191. if (block != NULL) {
  192. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  193. sh2->pc, block, (signed int)sr >> 12);
  194. #if defined PDB
  195. pdb_step(sh2, sh2->pc);
  196. #elif (DRC_DEBUG & 256)
  197. {
  198. int idx = sh2->is_slave;
  199. if (!trace[0]) {
  200. trace[0] = fopen("pico.trace0", "wb");
  201. trace[1] = fopen("pico.trace1", "wb");
  202. }
  203. if (csh2[idx][0].pc != sh2->pc) {
  204. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  205. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  206. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  207. csh2[idx][0].is_slave = idx;
  208. }
  209. }
  210. #elif (DRC_DEBUG & 512)
  211. {
  212. static SH2 fsh2;
  213. int idx = sh2->is_slave;
  214. if (!trace[0]) {
  215. trace[0] = fopen("pico.trace0", "rb");
  216. trace[1] = fopen("pico.trace1", "rb");
  217. }
  218. if (csh2[idx][0].pc != sh2->pc) {
  219. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  220. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  221. printf("trace eof at %08lx\n",ftell(trace[idx]));
  222. exit(1);
  223. }
  224. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  225. fsh2.is_slave = idx;
  226. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  227. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  228. {
  229. printf("difference at %08lx!\n",ftell(trace[idx]));
  230. SH2_DUMP(&fsh2, "file");
  231. SH2_DUMP(sh2, "current");
  232. SH2_DUMP(&csh2[idx][0], "previous");
  233. char *ps = (char *)sh2, *pf = (char *)&fsh2;
  234. for (idx = 0; idx < offsetof(SH2, read8_map); idx += sizeof(u32))
  235. if (*(u32 *)(ps+idx) != *(u32 *)(pf+idx))
  236. printf("diff reg %ld\n",idx/sizeof(u32));
  237. exit(1);
  238. }
  239. csh2[idx][0] = fsh2;
  240. }
  241. }
  242. #elif (DRC_DEBUG & 1024)
  243. {
  244. int x = sh2->is_slave, i;
  245. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  246. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  247. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  248. csh2[x][0].is_slave = x;
  249. }
  250. #endif
  251. }
  252. return block;
  253. }
  254. #endif
  255. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  256. // BIOS shares tcache with data array because it's only used for init
  257. // and can be discarded early
  258. #define TCACHE_BUFFERS 3
  259. struct ring_buffer {
  260. u8 *base; // ring buffer memory
  261. unsigned item_sz; // size of one buffer item
  262. unsigned size; // number of itmes in ring
  263. int first, next; // read and write pointers
  264. int used; // number of used items in ring
  265. };
  266. enum { BL_JMP=1, BL_LDJMP, BL_JCCBLX };
  267. struct block_link {
  268. short tcache_id;
  269. short type; // BL_JMP et al
  270. u32 target_pc;
  271. void *jump; // insn address
  272. void *blx; // block link/exit area if any
  273. u8 jdisp[12]; // jump backup buffer
  274. struct block_link *next; // either in block_entry->links or unresolved
  275. struct block_link *o_next; // ...in block_entry->o_links
  276. struct block_link *prev;
  277. struct block_link *o_prev;
  278. struct block_entry *target;// target block this is linked in (be->links)
  279. };
  280. struct block_entry {
  281. u32 pc;
  282. u8 *tcache_ptr; // translated block for above PC
  283. struct block_entry *next; // chain in hash_table with same pc hash
  284. struct block_entry *prev;
  285. struct block_link *links; // incoming links to this entry
  286. struct block_link *o_links;// outgoing links from this entry
  287. #if (DRC_DEBUG & 2)
  288. struct block_desc *block;
  289. #endif
  290. #if (DRC_DEBUG & 32)
  291. int entry_count;
  292. #endif
  293. };
  294. struct block_desc {
  295. u32 addr; // block start SH2 PC address
  296. u32 addr_lit; // block start SH2 literal pool addr
  297. int size; // ..of recompiled insns
  298. int size_lit; // ..of (insns+)literal pool
  299. u8 *tcache_ptr; // start address of block in cache
  300. u16 crc; // crc of insns and literals
  301. u16 active; // actively used or deactivated?
  302. struct block_list *list;
  303. #if (DRC_DEBUG & 2)
  304. int refcount;
  305. #endif
  306. int entry_count;
  307. struct block_entry *entryp;
  308. };
  309. struct block_list {
  310. struct block_desc *block; // block reference
  311. struct block_list *next; // pointers for doubly linked list
  312. struct block_list *prev;
  313. struct block_list **head; // list head (for removing from list)
  314. struct block_list *l_next;
  315. };
  316. static u8 *tcache_ptr; // ptr for code emitters
  317. // XXX: need to tune sizes
  318. static struct ring_buffer tcache_ring[TCACHE_BUFFERS];
  319. static const int tcache_sizes[TCACHE_BUFFERS] = {
  320. DRC_TCACHE_SIZE * 30 / 32, // ROM (rarely used), DRAM
  321. DRC_TCACHE_SIZE / 32, // BIOS, data array in master sh2
  322. DRC_TCACHE_SIZE / 32, // ... slave
  323. };
  324. #define BLOCK_MAX_COUNT(tcid) ((tcid) ? 256 : 32*256)
  325. static struct ring_buffer block_ring[TCACHE_BUFFERS];
  326. static struct block_desc *block_tables[TCACHE_BUFFERS];
  327. #define ENTRY_MAX_COUNT(tcid) ((tcid) ? 8*512 : 256*512)
  328. static struct ring_buffer entry_ring[TCACHE_BUFFERS];
  329. static struct block_entry *entry_tables[TCACHE_BUFFERS];
  330. // we have block_link_pool to avoid using mallocs
  331. #define BLOCK_LINK_MAX_COUNT(tcid) ((tcid) ? 512 : 32*512)
  332. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  333. static int block_link_pool_counts[TCACHE_BUFFERS];
  334. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  335. static struct block_link *blink_free[TCACHE_BUFFERS];
  336. // used for invalidation
  337. #define RAM_SIZE(tcid) ((tcid) ? 0x1000 : 0x40000)
  338. #define INVAL_PAGE_SIZE 0x100
  339. static struct block_list *inactive_blocks[TCACHE_BUFFERS];
  340. // array of pointers to block_lists for RAM and 2 data arrays
  341. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  342. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  343. #define HASH_TABLE_SIZE(tcid) ((tcid) ? 512 : 64*512)
  344. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  345. #define HASH_FUNC(hash_tab, addr, mask) \
  346. (hash_tab)[((addr) >> 1) & (mask)]
  347. #define BLOCK_LIST_MAX_COUNT (64*1024)
  348. static struct block_list *block_list_pool;
  349. static int block_list_pool_count;
  350. static struct block_list *blist_free;
  351. #if (DRC_DEBUG & 128)
  352. #if BRANCH_CACHE
  353. int bchit, bcmiss;
  354. #endif
  355. #if CALL_STACK
  356. int rchit, rcmiss;
  357. #endif
  358. #endif
  359. // host register tracking
  360. enum cache_reg_htype {
  361. HRT_TEMP = 1, // is for temps and args
  362. HRT_REG = 2, // is for sh2 regs
  363. };
  364. enum cache_reg_flags {
  365. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  366. HRF_PINNED = 1 << 1, // has a pinned mapping
  367. HRF_S16 = 1 << 2, // has a sign extended 16 bit value
  368. HRF_U16 = 1 << 3, // has a zero extended 16 bit value
  369. };
  370. enum cache_reg_type {
  371. HR_FREE,
  372. HR_CACHED, // vreg has sh2_reg_e
  373. HR_TEMP, // reg used for temp storage
  374. };
  375. typedef struct {
  376. u8 hreg:6; // "host" reg
  377. u8 htype:2; // TEMP or REG?
  378. u8 flags:4; // DIRTY, PINNED?
  379. u8 type:2; // CACHED or TEMP?
  380. u8 locked:2; // LOCKED reference counter
  381. u16 stamp; // kind of a timestamp
  382. u32 gregs; // "guest" reg mask
  383. } cache_reg_t;
  384. // guest register tracking
  385. enum guest_reg_flags {
  386. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  387. GRF_CONST = 1 << 1, // reg has a constant
  388. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  389. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  390. GRF_PINNED = 1 << 4, // reg has pinned mapping to vreg
  391. };
  392. typedef struct {
  393. u8 flags; // guest flags: is constant, is dirty?
  394. s8 sreg; // cache reg for static mapping
  395. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  396. s8 cnst; // const index if this is constant
  397. } guest_reg_t;
  398. // possibly needed in code emitter
  399. static int rcache_get_tmp(void);
  400. static void rcache_free_tmp(int hr);
  401. // Note: Register assignment goes by ABI convention. Caller save registers are
  402. // TEMPORARY, callee save registers are PRESERVED. Unusable regs are omitted.
  403. // there must be at least the free (not context or statically mapped) amount of
  404. // PRESERVED/TEMPORARY registers used by handlers in worst case (currently 4).
  405. // there must be at least 3 PARAM, and PARAM+TEMPORARY must be at least 4.
  406. // SR must and R0 should by all means be statically mapped.
  407. // XXX the static definition of SR MUST match that in compiler.h
  408. #if defined(__arm__) || defined(_M_ARM)
  409. #include "../drc/emit_arm.c"
  410. #elif defined(__aarch64__) || defined(_M_ARM64)
  411. #include "../drc/emit_arm64.c"
  412. #elif defined(__mips__)
  413. #include "../drc/emit_mips.c"
  414. #elif defined(__riscv__) || defined(__riscv)
  415. #include "../drc/emit_riscv.c"
  416. #elif defined(__powerpc__)
  417. #include "../drc/emit_ppc.c"
  418. #elif defined(__i386__) || defined(_M_X86)
  419. #include "../drc/emit_x86.c"
  420. #elif defined(__x86_64__) || defined(_M_X64)
  421. #include "../drc/emit_x86.c"
  422. #else
  423. #error unsupported arch
  424. #endif
  425. static const signed char hregs_param[] = PARAM_REGS;
  426. static const signed char hregs_temp [] = TEMPORARY_REGS;
  427. static const signed char hregs_saved[] = PRESERVED_REGS;
  428. static const signed char regs_static[] = STATIC_SH2_REGS;
  429. #define CACHE_REGS \
  430. (ARRAY_SIZE(hregs_param)+ARRAY_SIZE(hregs_temp)+ARRAY_SIZE(hregs_saved)-1)
  431. static cache_reg_t cache_regs[CACHE_REGS];
  432. static signed char reg_map_host[HOST_REGS];
  433. static guest_reg_t guest_regs[SH2_REGS];
  434. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  435. static void REGPARM(1) (*sh2_drc_dispatcher)(u32 pc);
  436. #if CALL_STACK
  437. static u32 REGPARM(2) (*sh2_drc_dispatcher_call)(u32 pc);
  438. static void REGPARM(1) (*sh2_drc_dispatcher_return)(u32 pc);
  439. #endif
  440. static void REGPARM(1) (*sh2_drc_exit)(u32 pc);
  441. static void (*sh2_drc_test_irq)(void);
  442. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  443. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  444. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  445. static u32 REGPARM(1) (*sh2_drc_read8_poll)(u32 a);
  446. static u32 REGPARM(1) (*sh2_drc_read16_poll)(u32 a);
  447. static u32 REGPARM(1) (*sh2_drc_read32_poll)(u32 a);
  448. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  449. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  450. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  451. #ifdef DRC_SR_REG
  452. void REGPARM(1) (*sh2_drc_save_sr)(SH2 *sh2);
  453. void REGPARM(1) (*sh2_drc_restore_sr)(SH2 *sh2);
  454. #endif
  455. // flags for memory access
  456. #define MF_SIZEMASK 0x03 // size of access
  457. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  458. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  459. #define MF_POLLING 0x20 // include polling check in read
  460. // address space stuff
  461. static int dr_is_rom(u32 a)
  462. {
  463. // tweak for WWF Raw which writes data to some high ROM addresses
  464. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  465. }
  466. static int dr_ctx_get_mem_ptr(SH2 *sh2, u32 a, u32 *mask)
  467. {
  468. void *memptr;
  469. int poffs = -1;
  470. // check if region is mapped memory
  471. memptr = p32x_sh2_get_mem_ptr(a, mask, sh2);
  472. if (memptr == NULL)
  473. return poffs;
  474. if (memptr == sh2->p_bios) // BIOS
  475. poffs = offsetof(SH2, p_bios);
  476. else if (memptr == sh2->p_da) // data array
  477. poffs = offsetof(SH2, p_da);
  478. else if (memptr == sh2->p_sdram) // SDRAM
  479. poffs = offsetof(SH2, p_sdram);
  480. else if (memptr == sh2->p_rom) // ROM
  481. poffs = offsetof(SH2, p_rom);
  482. return poffs;
  483. }
  484. static int dr_get_tcache_id(u32 pc, int is_slave)
  485. {
  486. u32 tcid = 0;
  487. if ((pc & 0xe0000000) == 0xc0000000)
  488. tcid = 1 + is_slave; // data array
  489. if ((pc & ~0xfff) == 0)
  490. tcid = 1 + is_slave; // BIOS
  491. return tcid;
  492. }
  493. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  494. {
  495. struct block_entry *be;
  496. *tcache_id = dr_get_tcache_id(pc, is_slave);
  497. be = HASH_FUNC(hash_tables[*tcache_id], pc, HASH_TABLE_SIZE(*tcache_id) - 1);
  498. if (be != NULL) // don't ask... gcc code generation hint
  499. for (; be != NULL; be = be->next)
  500. if (be->pc == pc)
  501. return be;
  502. return NULL;
  503. }
  504. // ---------------------------------------------------------------
  505. // ring buffer management
  506. #define RING_INIT(r,m,n) *(r) = (struct ring_buffer) { .base = (u8 *)m, \
  507. .item_sz = sizeof(*(m)), .size = n };
  508. static void *ring_alloc(struct ring_buffer *rb, int count)
  509. {
  510. // allocate space in ring buffer
  511. void *p;
  512. p = rb->base + rb->next * rb->item_sz;
  513. if (rb->next+count > rb->size) {
  514. rb->used += rb->size - rb->next;
  515. p = rb->base; // wrap if overflow at end
  516. rb->next = count;
  517. } else {
  518. rb->next += count;
  519. if (rb->next == rb->size) rb->next = 0;
  520. }
  521. rb->used += count;
  522. return p;
  523. }
  524. static void ring_wrap(struct ring_buffer *rb)
  525. {
  526. // insufficient space at end of buffer memory, wrap around
  527. rb->used += rb->size - rb->next;
  528. rb->next = 0;
  529. }
  530. static void ring_free(struct ring_buffer *rb, int count)
  531. {
  532. // free oldest space in ring buffer
  533. rb->first += count;
  534. if (rb->first >= rb->size) rb->first -= rb->size;
  535. rb->used -= count;
  536. }
  537. static void ring_free_p(struct ring_buffer *rb, void *p)
  538. {
  539. // free ring buffer space upto given pointer
  540. rb->first = ((u8 *)p - rb->base) / rb->item_sz;
  541. rb->used = rb->next - rb->first;
  542. if (rb->used < 0) rb->used += rb->size;
  543. }
  544. static void *ring_reset(struct ring_buffer *rb)
  545. {
  546. // reset to initial state
  547. rb->first = rb->next = rb->used = 0;
  548. return rb->base + rb->next * rb->item_sz;
  549. }
  550. static void *ring_first(struct ring_buffer *rb)
  551. {
  552. return rb->base + rb->first * rb->item_sz;
  553. }
  554. static void *ring_next(struct ring_buffer *rb)
  555. {
  556. return rb->base + rb->next * rb->item_sz;
  557. }
  558. // block management
  559. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  560. {
  561. struct block_list *added;
  562. if (blist_free) {
  563. added = blist_free;
  564. blist_free = added->next;
  565. } else if (block_list_pool_count >= BLOCK_LIST_MAX_COUNT) {
  566. printf( "block list overflow\n");
  567. exit(1);
  568. } else {
  569. added = block_list_pool + block_list_pool_count;
  570. block_list_pool_count ++;
  571. }
  572. added->block = block;
  573. added->l_next = block->list;
  574. block->list = added;
  575. added->head = blist;
  576. added->prev = NULL;
  577. if (*blist)
  578. (*blist)->prev = added;
  579. added->next = *blist;
  580. *blist = added;
  581. }
  582. static void rm_from_block_lists(struct block_desc *block)
  583. {
  584. struct block_list *entry;
  585. entry = block->list;
  586. while (entry != NULL) {
  587. if (entry->prev != NULL)
  588. entry->prev->next = entry->next;
  589. else
  590. *(entry->head) = entry->next;
  591. if (entry->next != NULL)
  592. entry->next->prev = entry->prev;
  593. entry->next = blist_free;
  594. blist_free = entry;
  595. entry = entry->l_next;
  596. }
  597. block->list = NULL;
  598. }
  599. static void discard_block_list(struct block_list **blist)
  600. {
  601. struct block_list *next, *current = *blist;
  602. while (current != NULL) {
  603. next = current->next;
  604. current->next = blist_free;
  605. blist_free = current;
  606. current = next;
  607. }
  608. *blist = NULL;
  609. }
  610. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  611. {
  612. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  613. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  614. be->prev = NULL;
  615. if (*head)
  616. (*head)->prev = be;
  617. be->next = *head;
  618. *head = be;
  619. #if (DRC_DEBUG & 2)
  620. if (be->next != NULL) {
  621. printf(" %08x@%p: entry hash collision with %08x@%p\n",
  622. be->pc, be->tcache_ptr, be->next->pc, be->next->tcache_ptr);
  623. hash_collisions++;
  624. }
  625. #endif
  626. }
  627. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  628. {
  629. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  630. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  631. #if DRC_DEBUG & 1
  632. struct block_entry *current = be;
  633. while (current->prev != NULL)
  634. current = current->prev;
  635. if (current != *head)
  636. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  637. #endif
  638. if (be->prev != NULL)
  639. be->prev->next = be->next;
  640. else
  641. *head = be->next;
  642. if (be->next != NULL)
  643. be->next->prev = be->prev;
  644. }
  645. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  646. {
  647. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  648. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  649. #if DRC_DEBUG & 1
  650. struct block_link *current = *head;
  651. while (current != NULL && current != bl)
  652. current = current->next;
  653. if (current == bl)
  654. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  655. #endif
  656. bl->target = NULL; // marker for not resolved
  657. bl->prev = NULL;
  658. if (*head)
  659. (*head)->prev = bl;
  660. bl->next = *head;
  661. *head = bl;
  662. }
  663. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  664. {
  665. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  666. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  667. #if DRC_DEBUG & 1
  668. struct block_link *current = bl;
  669. while (current->prev != NULL)
  670. current = current->prev;
  671. if (current != *head)
  672. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  673. #endif
  674. if (bl->prev != NULL)
  675. bl->prev->next = bl->next;
  676. else
  677. *head = bl->next;
  678. if (bl->next != NULL)
  679. bl->next->prev = bl->prev;
  680. }
  681. #if LINK_BRANCHES
  682. static void dr_block_link(struct block_entry *be, struct block_link *bl, int emit_jump)
  683. {
  684. dbg(2, "- %slink from %p to pc %08x entry %p", emit_jump ? "":"early ",
  685. bl->jump, bl->target_pc, be->tcache_ptr);
  686. if (emit_jump) {
  687. u8 *jump = bl->jump;
  688. int jsz = emith_jump_patch_size();
  689. if (bl->type == BL_JMP) { // patch: jump @entry
  690. // inlined: @jump far jump to target
  691. emith_jump_patch(jump, be->tcache_ptr, &jump);
  692. } else if (bl->type == BL_LDJMP) { // write: jump @entry
  693. // inlined: @jump far jump to target
  694. emith_jump_at(jump, be->tcache_ptr);
  695. jsz = emith_jump_at_size();
  696. } else if (bl->type == BL_JCCBLX) { // patch: jump cond -> jump @entry
  697. if (emith_jump_patch_inrange(bl->jump, be->tcache_ptr)) {
  698. // inlined: @jump near jumpcc to target
  699. emith_jump_patch(jump, be->tcache_ptr, &jump);
  700. } else { // dispatcher cond immediate
  701. // via blx: @jump near jumpcc to blx; @blx far jump
  702. emith_jump_patch(jump, bl->blx, &jump);
  703. emith_jump_at(bl->blx, be->tcache_ptr);
  704. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(),
  705. ((uintptr_t)bl->blx & 0x1f) + emith_jump_at_size()-1 > 0x1f);
  706. }
  707. } else {
  708. printf("unknown BL type %d\n", bl->type);
  709. exit(1);
  710. }
  711. host_instructions_updated(jump, jump + jsz, ((uintptr_t)jump & 0x1f) + jsz-1 > 0x1f);
  712. }
  713. // move bl to block_entry
  714. bl->target = be;
  715. bl->prev = NULL;
  716. if (be->links)
  717. be->links->prev = bl;
  718. bl->next = be->links;
  719. be->links = bl;
  720. }
  721. static void dr_block_unlink(struct block_link *bl, int emit_jump)
  722. {
  723. dbg(2,"- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  724. if (bl->target) {
  725. if (emit_jump) {
  726. u8 *jump = bl->jump;
  727. int jsz = emith_jump_patch_size();
  728. if (bl->type == BL_JMP) { // jump_patch @dispatcher
  729. // inlined: @jump far jump to dispatcher
  730. emith_jump_patch(jump, sh2_drc_dispatcher, &jump);
  731. } else if (bl->type == BL_LDJMP) { // restore: load pc, jump @dispatcher
  732. // inlined: @jump load target_pc, far jump to dispatcher
  733. memcpy(jump, bl->jdisp, emith_jump_at_size());
  734. jsz = emith_jump_at_size();
  735. } else if (bl->type == BL_JCCBLX) { // jump cond @blx; @blx: load pc, jump
  736. // via blx: @jump near jumpcc to blx; @blx load target_pc, far jump
  737. emith_jump_patch(bl->jump, bl->blx, &jump);
  738. memcpy(bl->blx, bl->jdisp, emith_jump_at_size());
  739. host_instructions_updated(bl->blx, (char *)bl->blx + emith_jump_at_size(), 1);
  740. } else {
  741. printf("unknown BL type %d\n", bl->type);
  742. exit(1);
  743. }
  744. // update cpu caches since the previous jump target doesn't exist anymore
  745. host_instructions_updated(jump, jump + jsz, 1);
  746. }
  747. if (bl->prev)
  748. bl->prev->next = bl->next;
  749. else
  750. bl->target->links = bl->next;
  751. if (bl->next)
  752. bl->next->prev = bl->prev;
  753. bl->target = NULL;
  754. }
  755. }
  756. #endif
  757. static struct block_link *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  758. {
  759. #if LINK_BRANCHES
  760. struct block_link *bl = block_link_pool[tcache_id];
  761. int cnt = block_link_pool_counts[tcache_id];
  762. int target_tcache_id;
  763. // get the target block entry
  764. target_tcache_id = dr_get_tcache_id(pc, is_slave);
  765. if (target_tcache_id && target_tcache_id != tcache_id)
  766. return NULL;
  767. // get a block link
  768. if (blink_free[tcache_id] != NULL) {
  769. bl = blink_free[tcache_id];
  770. blink_free[tcache_id] = bl->next;
  771. } else if (cnt >= BLOCK_LINK_MAX_COUNT(tcache_id)) {
  772. dbg(1, "bl overflow for tcache %d", tcache_id);
  773. return NULL;
  774. } else {
  775. bl += cnt;
  776. block_link_pool_counts[tcache_id] = cnt+1;
  777. }
  778. // prepare link and add to outgoing list of owner
  779. bl->tcache_id = tcache_id;
  780. bl->target_pc = pc;
  781. bl->jump = tcache_ptr;
  782. bl->blx = NULL;
  783. bl->o_next = owner->o_links;
  784. owner->o_links = bl;
  785. add_to_hashlist_unresolved(bl, tcache_id);
  786. return bl;
  787. #else
  788. return NULL;
  789. #endif
  790. }
  791. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  792. {
  793. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  794. u32 addr, end, mask = 0, shift = 0, idx;
  795. // mark memory blocks as containing compiled code
  796. if ((block->addr & 0xc7fc0000) == 0x06000000
  797. || (block->addr & 0xfffff000) == 0xc0000000)
  798. {
  799. if (tcache_id != 0) {
  800. // data array
  801. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  802. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  803. shift = SH2_DRCBLK_DA_SHIFT;
  804. }
  805. else {
  806. // SDRAM
  807. drc_ram_blk = Pico32xMem->drcblk_ram;
  808. lit_ram_blk = Pico32xMem->drclit_ram;
  809. shift = SH2_DRCBLK_RAM_SHIFT;
  810. }
  811. mask = RAM_SIZE(tcache_id) - 1;
  812. // mark recompiled insns
  813. addr = block->addr & ~((1 << shift) - 1);
  814. end = block->addr + block->size;
  815. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  816. drc_ram_blk[idx++] += mark;
  817. // mark literal pool
  818. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  819. addr = block->addr_lit & ~((1 << shift) - 1);
  820. end = block->addr_lit + block->size_lit;
  821. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  822. drc_ram_blk[idx++] += mark;
  823. // mark for literals disabled
  824. if (nolit) {
  825. addr = nolit & ~((1 << shift) - 1);
  826. end = block->addr_lit + block->size_lit;
  827. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  828. lit_ram_blk[idx++] = 1;
  829. }
  830. if (mark < 0)
  831. rm_from_block_lists(block);
  832. else {
  833. // add to invalidation lookup lists
  834. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  835. end = block->addr + block->size;
  836. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  837. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  838. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  839. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  840. end = block->addr_lit + block->size_lit;
  841. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  842. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  843. }
  844. }
  845. }
  846. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  847. {
  848. u8 *lit_ram_blk = NULL;
  849. u32 mask = 0, shift = 0, addr, idx;
  850. if ((start & 0xc7fc0000) == 0x06000000
  851. || (start & 0xfffff000) == 0xc0000000)
  852. {
  853. if (tcache_id != 0) {
  854. // data array
  855. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  856. shift = SH2_DRCBLK_DA_SHIFT;
  857. }
  858. else {
  859. // SDRAM
  860. lit_ram_blk = Pico32xMem->drclit_ram;
  861. shift = SH2_DRCBLK_RAM_SHIFT;
  862. }
  863. mask = RAM_SIZE(tcache_id) - 1;
  864. addr = start & ~((1 << shift) - 1);
  865. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  866. if (lit_ram_blk[idx++])
  867. break;
  868. return (addr < start ? start : addr > end ? end : addr);
  869. }
  870. return end;
  871. }
  872. static void dr_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free)
  873. {
  874. struct block_link *bl;
  875. u32 i;
  876. free = free || nolit; // block is invalid if literals are overwritten
  877. dbg(2," %sing block %08x-%08x,%08x-%08x, blkid %d,%d", free?"delet":"disabl",
  878. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  879. tcache_id, bd - block_tables[tcache_id]);
  880. if (bd->addr == 0 || bd->entry_count == 0) {
  881. dbg(1, " killing dead block!? %08x", bd->addr);
  882. return;
  883. }
  884. #if LINK_BRANCHES
  885. // remove from hash table, make incoming links unresolved
  886. if (bd->active) {
  887. for (i = 0; i < bd->entry_count; i++) {
  888. rm_from_hashlist(&bd->entryp[i], tcache_id);
  889. while ((bl = bd->entryp[i].links) != NULL) {
  890. dr_block_unlink(bl, 1);
  891. add_to_hashlist_unresolved(bl, tcache_id);
  892. }
  893. }
  894. dr_mark_memory(-1, bd, tcache_id, nolit);
  895. add_to_block_list(&inactive_blocks[tcache_id], bd);
  896. }
  897. bd->active = 0;
  898. #endif
  899. if (free) {
  900. #if LINK_BRANCHES
  901. // revoke outgoing links
  902. for (bl = bd->entryp[0].o_links; bl != NULL; bl = bl->o_next) {
  903. if (bl->target)
  904. dr_block_unlink(bl, 0);
  905. else
  906. rm_from_hashlist_unresolved(bl, tcache_id);
  907. bl->jump = NULL;
  908. bl->next = blink_free[bl->tcache_id];
  909. blink_free[bl->tcache_id] = bl;
  910. }
  911. bd->entryp[0].o_links = NULL;
  912. #endif
  913. // invalidate block
  914. rm_from_block_lists(bd);
  915. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  916. bd->entry_count = 0;
  917. bd->entryp = NULL;
  918. }
  919. emith_update_cache();
  920. }
  921. static struct block_desc *dr_find_inactive_block(int tcache_id, u16 crc,
  922. u32 addr, int size, u32 addr_lit, int size_lit)
  923. {
  924. struct block_list **head = &inactive_blocks[tcache_id];
  925. struct block_list *current;
  926. for (current = *head; current != NULL; current = current->next) {
  927. struct block_desc *block = current->block;
  928. if (block->crc == crc && block->addr == addr && block->size == size &&
  929. block->addr_lit == addr_lit && block->size_lit == size_lit)
  930. {
  931. rm_from_block_lists(block);
  932. return block;
  933. }
  934. }
  935. return NULL;
  936. }
  937. static struct block_desc *dr_add_block(int entries, u32 addr, int size,
  938. u32 addr_lit, int size_lit, u16 crc, int is_slave, int *blk_id)
  939. {
  940. struct block_entry *be;
  941. struct block_desc *bd;
  942. int tcache_id;
  943. // do a lookup to get tcache_id and override check
  944. be = dr_get_entry(addr, is_slave, &tcache_id);
  945. if (be != NULL)
  946. dbg(1, "block override for %08x", addr);
  947. if (block_ring[tcache_id].used + 1 > block_ring[tcache_id].size ||
  948. entry_ring[tcache_id].used + entries > entry_ring[tcache_id].size) {
  949. dbg(1, "bd overflow for tcache %d", tcache_id);
  950. return NULL;
  951. }
  952. *blk_id = block_ring[tcache_id].next;
  953. bd = ring_alloc(&block_ring[tcache_id], 1);
  954. bd->entryp = ring_alloc(&entry_ring[tcache_id], entries);
  955. bd->addr = addr;
  956. bd->size = size;
  957. bd->addr_lit = addr_lit;
  958. bd->size_lit = size_lit;
  959. bd->tcache_ptr = tcache_ptr;
  960. bd->crc = crc;
  961. bd->active = 0;
  962. bd->list = NULL;
  963. bd->entry_count = 0;
  964. #if (DRC_DEBUG & 2)
  965. bd->refcount = 0;
  966. #endif
  967. return bd;
  968. }
  969. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  970. {
  971. #if LINK_BRANCHES
  972. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  973. u32 pc = be->pc;
  974. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  975. struct block_link *bl = *head, *next;
  976. while (bl != NULL) {
  977. next = bl->next;
  978. if (bl->target_pc == pc && (!bl->tcache_id || bl->tcache_id == tcache_id)) {
  979. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  980. dr_block_link(be, bl, 1);
  981. }
  982. bl = next;
  983. }
  984. #endif
  985. }
  986. static void dr_link_outgoing(struct block_entry *be, int tcache_id, int is_slave)
  987. {
  988. #if LINK_BRANCHES
  989. struct block_link *bl;
  990. int target_tcache_id;
  991. for (bl = be->o_links; bl; bl = bl->o_next) {
  992. if (bl->target == NULL) {
  993. be = dr_get_entry(bl->target_pc, is_slave, &target_tcache_id);
  994. if (be != NULL && (!target_tcache_id || target_tcache_id == tcache_id)) {
  995. // remove bl from unresolved_links (must've been since target was NULL)
  996. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  997. dr_block_link(be, bl, 1);
  998. }
  999. }
  1000. }
  1001. #endif
  1002. }
  1003. static void dr_activate_block(struct block_desc *bd, int tcache_id, int is_slave)
  1004. {
  1005. int i;
  1006. // connect branches
  1007. for (i = 0; i < bd->entry_count; i++) {
  1008. struct block_entry *entry = &bd->entryp[i];
  1009. add_to_hashlist(entry, tcache_id);
  1010. // incoming branches
  1011. dr_link_blocks(entry, tcache_id);
  1012. if (!tcache_id)
  1013. dr_link_blocks(entry, is_slave?2:1);
  1014. // outgoing branches
  1015. dr_link_outgoing(entry, tcache_id, is_slave);
  1016. }
  1017. // mark memory for overwrite detection
  1018. dr_mark_memory(1, bd, tcache_id, 0);
  1019. bd->active = 1;
  1020. }
  1021. static void REGPARM(3) *dr_lookup_block(u32 pc, SH2 *sh2, int *tcache_id)
  1022. {
  1023. struct block_entry *be = NULL;
  1024. void *block = NULL;
  1025. be = dr_get_entry(pc, sh2->is_slave, tcache_id);
  1026. if (be != NULL)
  1027. block = be->tcache_ptr;
  1028. #if (DRC_DEBUG & 2)
  1029. if (be != NULL)
  1030. be->block->refcount++;
  1031. #endif
  1032. return block;
  1033. }
  1034. static void dr_free_oldest_block(int tcache_id)
  1035. {
  1036. struct block_desc *bf;
  1037. bf = ring_first(&block_ring[tcache_id]);
  1038. if (bf->addr && bf->entry_count)
  1039. dr_rm_block_entry(bf, tcache_id, 0, 1);
  1040. ring_free(&block_ring[tcache_id], 1);
  1041. if (block_ring[tcache_id].used) {
  1042. bf = ring_first(&block_ring[tcache_id]);
  1043. ring_free_p(&entry_ring[tcache_id], bf->entryp);
  1044. ring_free_p(&tcache_ring[tcache_id], bf->tcache_ptr);
  1045. } else {
  1046. // reset since size of code block isn't known if no successor block exists
  1047. ring_reset(&block_ring[tcache_id]);
  1048. ring_reset(&entry_ring[tcache_id]);
  1049. ring_reset(&tcache_ring[tcache_id]);
  1050. }
  1051. }
  1052. static inline void dr_reserve_cache(int tcache_id, struct ring_buffer *rb, int count)
  1053. {
  1054. // while not enough space available
  1055. if (rb->next + count >= rb->size){
  1056. // not enough space in rest of buffer -> wrap around
  1057. while (rb->first >= rb->next && rb->used)
  1058. dr_free_oldest_block(tcache_id);
  1059. if (rb->first == 0 && rb->used)
  1060. dr_free_oldest_block(tcache_id);
  1061. ring_wrap(rb);
  1062. }
  1063. while (rb->first >= rb->next && rb->next + count > rb->first && rb->used)
  1064. dr_free_oldest_block(tcache_id);
  1065. }
  1066. static u8 *dr_prepare_cache(int tcache_id, int insn_count, int entry_count)
  1067. {
  1068. int bf = block_ring[tcache_id].first;
  1069. // reserve one block desc
  1070. if (block_ring[tcache_id].used >= block_ring[tcache_id].size)
  1071. dr_free_oldest_block(tcache_id);
  1072. // reserve block entries
  1073. dr_reserve_cache(tcache_id, &entry_ring[tcache_id], entry_count);
  1074. // reserve cache space
  1075. dr_reserve_cache(tcache_id, &tcache_ring[tcache_id], insn_count*128);
  1076. if (bf != block_ring[tcache_id].first) {
  1077. // deleted some block(s), clear branch cache and return stack
  1078. #if BRANCH_CACHE
  1079. if (tcache_id)
  1080. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1081. else {
  1082. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  1083. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  1084. }
  1085. #endif
  1086. #if CALL_STACK
  1087. if (tcache_id) {
  1088. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1089. sh2s[tcache_id-1].rts_cache_idx = 0;
  1090. } else {
  1091. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  1092. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  1093. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1094. }
  1095. #endif
  1096. }
  1097. return ring_next(&tcache_ring[tcache_id]);
  1098. }
  1099. static void dr_flush_tcache(int tcid)
  1100. {
  1101. int i;
  1102. #if (DRC_DEBUG & 1)
  1103. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d bes %d/%d)", tcid,
  1104. tcache_ring[tcid].used, tcache_ring[tcid].size, block_ring[tcid].used,
  1105. block_ring[tcid].size, entry_ring[tcid].used, entry_ring[tcid].size);
  1106. #endif
  1107. ring_reset(&tcache_ring[tcid]);
  1108. ring_reset(&block_ring[tcid]);
  1109. ring_reset(&entry_ring[tcid]);
  1110. block_link_pool_counts[tcid] = 0;
  1111. blink_free[tcid] = NULL;
  1112. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * HASH_TABLE_SIZE(tcid));
  1113. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * HASH_TABLE_SIZE(tcid));
  1114. if (tcid == 0) { // ROM, RAM
  1115. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1116. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1117. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1118. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  1119. memset(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1120. memset(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache));
  1121. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  1122. } else {
  1123. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  1124. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  1125. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  1126. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  1127. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  1128. memset(sh2s[tcid - 1].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  1129. sh2s[tcid - 1].rts_cache_idx = 0;
  1130. }
  1131. #if (DRC_DEBUG & 4)
  1132. tcache_dsm_ptrs[tcid] = tcache_ring[tcid].base;
  1133. #endif
  1134. for (i = 0; i < RAM_SIZE(tcid) / INVAL_PAGE_SIZE; i++)
  1135. discard_block_list(&inval_lookup[tcid][i]);
  1136. discard_block_list(&inactive_blocks[tcid]);
  1137. }
  1138. static void *dr_failure(void)
  1139. {
  1140. printf("recompilation failed\n");
  1141. exit(1);
  1142. }
  1143. // ---------------------------------------------------------------
  1144. // NB rcache allocation dependencies:
  1145. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  1146. // - get_reg(..., NULL) before get_reg(..., &hr) if it might get the same reg
  1147. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  1148. // register cache / constant propagation stuff
  1149. typedef enum {
  1150. RC_GR_READ,
  1151. RC_GR_WRITE,
  1152. RC_GR_RMW,
  1153. } rc_gr_mode;
  1154. typedef struct {
  1155. u32 gregs;
  1156. u32 val;
  1157. } gconst_t;
  1158. gconst_t gconsts[ARRAY_SIZE(guest_regs)];
  1159. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  1160. static inline int rcache_is_cached(sh2_reg_e r);
  1161. static void rcache_add_vreg_alias(int x, sh2_reg_e r);
  1162. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  1163. static void rcache_evict_vreg(int x);
  1164. static void rcache_remap_vreg(int x);
  1165. static void rcache_set_x16(int hr, int s16_, int u16_)
  1166. {
  1167. int x = reg_map_host[hr];
  1168. if (x >= 0) {
  1169. cache_regs[x].flags &= ~(HRF_S16|HRF_U16);
  1170. if (s16_) cache_regs[x].flags |= HRF_S16;
  1171. if (u16_) cache_regs[x].flags |= HRF_U16;
  1172. }
  1173. }
  1174. static void rcache_copy_x16(int hr, int hr2)
  1175. {
  1176. int x = reg_map_host[hr], y = reg_map_host[hr2];
  1177. if (x >= 0 && y >= 0) {
  1178. cache_regs[x].flags = (cache_regs[x].flags & ~(HRF_S16|HRF_U16)) |
  1179. (cache_regs[y].flags & (HRF_S16|HRF_U16));
  1180. }
  1181. }
  1182. static int rcache_is_s16(int hr)
  1183. {
  1184. int x = reg_map_host[hr];
  1185. return (x >= 0 ? cache_regs[x].flags & HRF_S16 : 0);
  1186. }
  1187. static int rcache_is_u16(int hr)
  1188. {
  1189. int x = reg_map_host[hr];
  1190. return (x >= 0 ? cache_regs[x].flags & HRF_U16 : 0);
  1191. }
  1192. #define RCACHE_DUMP(msg) { \
  1193. cache_reg_t *cp; \
  1194. guest_reg_t *gp; \
  1195. int i; \
  1196. printf("cache dump %s:\n",msg); \
  1197. printf(" cache_regs:\n"); \
  1198. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1199. cp = &cache_regs[i]; \
  1200. if (cp->type != HR_FREE || cp->gregs || cp->locked || cp->flags) \
  1201. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1202. } \
  1203. printf(" guest_regs:\n"); \
  1204. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1205. gp = &guest_regs[i]; \
  1206. if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
  1207. printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
  1208. } \
  1209. printf(" gconsts:\n"); \
  1210. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1211. if (gconsts[i].gregs) \
  1212. printf(" %d: m=%x v=%x\n", i, gconsts[i].gregs, gconsts[i].val); \
  1213. } \
  1214. }
  1215. #define RCACHE_CHECK(msg) { \
  1216. cache_reg_t *cp; \
  1217. guest_reg_t *gp; \
  1218. int i, x, m = 0, d = 0; \
  1219. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1220. cp = &cache_regs[i]; \
  1221. if (cp->flags & HRF_PINNED) m |= (1 << i); \
  1222. if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
  1223. /* check connectivity greg->vreg */ \
  1224. FOR_ALL_BITS_SET_DO(cp->gregs, x, \
  1225. if (guest_regs[x].vreg != i) \
  1226. { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
  1227. ) \
  1228. } \
  1229. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1230. gp = &guest_regs[i]; \
  1231. if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
  1232. { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
  1233. if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_CACHED) \
  1234. { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
  1235. if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
  1236. { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
  1237. if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)))\
  1238. { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
  1239. if (gp->flags & (GRF_STATIC|GRF_PINNED)) { \
  1240. if (gp->sreg == -1 || !(cache_regs[gp->sreg].flags & HRF_PINNED))\
  1241. { d = 1; printf("cache check r=%d v=%d not pinned?\n", i, gp->vreg); } \
  1242. else m &= ~(1 << gp->sreg); \
  1243. } \
  1244. } \
  1245. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1246. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
  1247. if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
  1248. { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
  1249. ) \
  1250. } \
  1251. if (m) \
  1252. { d = 1; printf("cache check m=%x pinning wrong?\n",m); } \
  1253. if (d) RCACHE_DUMP(msg) \
  1254. /* else { \
  1255. printf("locked regs %s:\n",msg); \
  1256. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1257. cp = &cache_regs[i]; \
  1258. if (cp->locked) \
  1259. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
  1260. } \
  1261. } */ \
  1262. }
  1263. #if PROPAGATE_CONSTANTS
  1264. static inline int gconst_alloc(sh2_reg_e r)
  1265. {
  1266. int i, n = -1;
  1267. for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
  1268. gconsts[i].gregs &= ~(1 << r);
  1269. if (gconsts[i].gregs == 0 && n < 0)
  1270. n = i;
  1271. }
  1272. if (n >= 0)
  1273. gconsts[n].gregs = (1 << r);
  1274. else {
  1275. printf("all gconst buffers in use, aborting\n");
  1276. exit(1); // cannot happen - more constants than guest regs?
  1277. }
  1278. return n;
  1279. }
  1280. static void gconst_set(sh2_reg_e r, u32 val)
  1281. {
  1282. int i = gconst_alloc(r);
  1283. guest_regs[r].flags |= GRF_CONST;
  1284. guest_regs[r].cnst = i;
  1285. gconsts[i].val = val;
  1286. }
  1287. static void gconst_new(sh2_reg_e r, u32 val)
  1288. {
  1289. gconst_set(r, val);
  1290. guest_regs[r].flags |= GRF_CDIRTY;
  1291. // throw away old r that we might have cached
  1292. if (guest_regs[r].vreg >= 0)
  1293. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1294. }
  1295. #endif
  1296. static int gconst_get(sh2_reg_e r, u32 *val)
  1297. {
  1298. if (guest_regs[r].flags & GRF_CONST) {
  1299. *val = gconsts[guest_regs[r].cnst].val;
  1300. return 1;
  1301. }
  1302. *val = 0;
  1303. return 0;
  1304. }
  1305. static int gconst_check(sh2_reg_e r)
  1306. {
  1307. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1308. return 1;
  1309. return 0;
  1310. }
  1311. // update hr if dirty, else do nothing
  1312. static int gconst_try_read(int vreg, sh2_reg_e r)
  1313. {
  1314. int i, x;
  1315. u32 v;
  1316. if (guest_regs[r].flags & GRF_CDIRTY) {
  1317. x = guest_regs[r].cnst;
  1318. v = gconsts[x].val;
  1319. emith_move_r_imm(cache_regs[vreg].hreg, v);
  1320. rcache_set_x16(cache_regs[vreg].hreg, v == (s16)v, v == (u16)v);
  1321. FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
  1322. {
  1323. if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
  1324. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  1325. if (guest_regs[i].vreg < 0)
  1326. rcache_add_vreg_alias(vreg, i);
  1327. guest_regs[i].flags &= ~GRF_CDIRTY;
  1328. guest_regs[i].flags |= GRF_DIRTY;
  1329. });
  1330. cache_regs[vreg].type = HR_CACHED;
  1331. cache_regs[vreg].flags |= HRF_DIRTY;
  1332. return 1;
  1333. }
  1334. return 0;
  1335. }
  1336. static u32 gconst_dirty_mask(void)
  1337. {
  1338. u32 mask = 0;
  1339. int i;
  1340. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1341. if (guest_regs[i].flags & GRF_CDIRTY)
  1342. mask |= (1 << i);
  1343. return mask;
  1344. }
  1345. static void gconst_kill(sh2_reg_e r)
  1346. {
  1347. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1348. gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
  1349. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1350. }
  1351. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1352. {
  1353. gconst_kill(rd);
  1354. if (guest_regs[rs].flags & GRF_CONST) {
  1355. guest_regs[rd].flags |= GRF_CONST;
  1356. if (guest_regs[rd].vreg < 0)
  1357. guest_regs[rd].flags |= GRF_CDIRTY;
  1358. guest_regs[rd].cnst = guest_regs[rs].cnst;
  1359. gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
  1360. }
  1361. }
  1362. static void gconst_clean(void)
  1363. {
  1364. int i;
  1365. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1366. if (guest_regs[i].flags & GRF_CDIRTY) {
  1367. // using RC_GR_READ here: it will call gconst_try_read,
  1368. // cache the reg and mark it dirty.
  1369. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1370. }
  1371. }
  1372. static void gconst_invalidate(void)
  1373. {
  1374. int i;
  1375. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1376. if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
  1377. gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
  1378. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1379. }
  1380. }
  1381. static u16 rcache_counter;
  1382. // SH2 register usage bitmasks
  1383. static u32 rcache_vregs_reg; // regs of type HRT_REG (for pinning)
  1384. static u32 rcache_regs_static; // statically allocated regs
  1385. static u32 rcache_regs_pinned; // pinned regs
  1386. static u32 rcache_regs_now; // regs used in current insn
  1387. static u32 rcache_regs_soon; // regs used in the next few insns
  1388. static u32 rcache_regs_late; // regs used in later insns
  1389. static u32 rcache_regs_discard; // regs overwritten without being used
  1390. static u32 rcache_regs_clean; // regs needing cleaning
  1391. static void rcache_lock_vreg(int x)
  1392. {
  1393. if (x >= 0) {
  1394. cache_regs[x].locked ++;
  1395. #if DRC_DEBUG & 64
  1396. if (cache_regs[x].type == HR_FREE) {
  1397. printf("locking free vreg %x, aborting\n", x);
  1398. exit(1);
  1399. }
  1400. if (!cache_regs[x].locked) {
  1401. printf("locking overflow vreg %x, aborting\n", x);
  1402. exit(1);
  1403. }
  1404. #endif
  1405. }
  1406. }
  1407. static void rcache_unlock_vreg(int x)
  1408. {
  1409. if (x >= 0) {
  1410. #if DRC_DEBUG & 64
  1411. if (cache_regs[x].type == HR_FREE) {
  1412. printf("unlocking free vreg %x, aborting\n", x);
  1413. exit(1);
  1414. }
  1415. #endif
  1416. if (cache_regs[x].locked)
  1417. cache_regs[x].locked --;
  1418. }
  1419. }
  1420. static void rcache_free_vreg(int x)
  1421. {
  1422. cache_regs[x].type = cache_regs[x].locked ? HR_TEMP : HR_FREE;
  1423. cache_regs[x].flags &= HRF_PINNED;
  1424. cache_regs[x].gregs = 0;
  1425. }
  1426. static void rcache_unmap_vreg(int x)
  1427. {
  1428. int i;
  1429. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1430. if (guest_regs[i].flags & GRF_DIRTY) {
  1431. // if a dirty reg is unmapped save its value to context
  1432. if ((~rcache_regs_discard | rcache_regs_now) & (1 << i))
  1433. emith_ctx_write(cache_regs[x].hreg, i * 4);
  1434. guest_regs[i].flags &= ~GRF_DIRTY;
  1435. }
  1436. guest_regs[i].vreg = -1);
  1437. rcache_free_vreg(x);
  1438. }
  1439. static void rcache_move_vreg(int d, int x)
  1440. {
  1441. int i;
  1442. cache_regs[d].type = HR_CACHED;
  1443. cache_regs[d].gregs = cache_regs[x].gregs;
  1444. cache_regs[d].flags &= HRF_PINNED;
  1445. cache_regs[d].flags |= cache_regs[x].flags & ~HRF_PINNED;
  1446. cache_regs[d].locked = 0;
  1447. cache_regs[d].stamp = cache_regs[x].stamp;
  1448. emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
  1449. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1450. if (guest_regs[i].vreg == x)
  1451. guest_regs[i].vreg = d;
  1452. rcache_free_vreg(x);
  1453. }
  1454. static void rcache_clean_vreg(int x)
  1455. {
  1456. u32 rns = rcache_regs_now | rcache_regs_soon;
  1457. int r;
  1458. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1459. cache_regs[x].flags &= ~HRF_DIRTY;
  1460. rcache_lock_vreg(x);
  1461. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1462. if (guest_regs[r].flags & GRF_DIRTY) {
  1463. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) {
  1464. if (guest_regs[r].vreg != guest_regs[r].sreg &&
  1465. !cache_regs[guest_regs[r].sreg].locked &&
  1466. ((~rcache_regs_discard | rcache_regs_now) & (1 << r)) &&
  1467. !(rns & cache_regs[guest_regs[r].sreg].gregs)) {
  1468. // statically mapped reg not in its sreg. move back to sreg
  1469. rcache_evict_vreg(guest_regs[r].sreg);
  1470. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
  1471. cache_regs[guest_regs[r].vreg].hreg);
  1472. rcache_copy_x16(cache_regs[guest_regs[r].sreg].hreg,
  1473. cache_regs[guest_regs[r].vreg].hreg);
  1474. rcache_remove_vreg_alias(x, r);
  1475. rcache_add_vreg_alias(guest_regs[r].sreg, r);
  1476. cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
  1477. } else
  1478. // cannot remap. keep dirty for writeback in unmap
  1479. cache_regs[x].flags |= HRF_DIRTY;
  1480. } else {
  1481. if ((~rcache_regs_discard | rcache_regs_now) & (1 << r))
  1482. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1483. guest_regs[r].flags &= ~GRF_DIRTY;
  1484. }
  1485. rcache_regs_clean &= ~(1 << r);
  1486. })
  1487. rcache_unlock_vreg(x);
  1488. }
  1489. #if DRC_DEBUG & 64
  1490. RCACHE_CHECK("after clean");
  1491. #endif
  1492. }
  1493. static void rcache_add_vreg_alias(int x, sh2_reg_e r)
  1494. {
  1495. cache_regs[x].gregs |= (1 << r);
  1496. guest_regs[r].vreg = x;
  1497. cache_regs[x].type = HR_CACHED;
  1498. }
  1499. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1500. {
  1501. cache_regs[x].gregs &= ~(1 << r);
  1502. if (!cache_regs[x].gregs) {
  1503. // no reg mapped -> free vreg
  1504. if (cache_regs[x].locked)
  1505. cache_regs[x].type = HR_TEMP;
  1506. else
  1507. rcache_free_vreg(x);
  1508. }
  1509. guest_regs[r].vreg = -1;
  1510. }
  1511. static void rcache_evict_vreg(int x)
  1512. {
  1513. rcache_remap_vreg(x);
  1514. rcache_unmap_vreg(x);
  1515. }
  1516. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1517. {
  1518. rcache_remove_vreg_alias(x, r);
  1519. rcache_evict_vreg(x);
  1520. rcache_add_vreg_alias(x, r);
  1521. }
  1522. static int rcache_allocate(int what, int minprio)
  1523. {
  1524. // evict reg with oldest stamp (only for HRT_REG, no temps)
  1525. int i, i_prio, oldest = -1, prio = 0;
  1526. u16 min_stamp = (u16)-1;
  1527. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--) {
  1528. // consider only non-static, unpinned, unlocked REG or TEMP
  1529. if ((cache_regs[i].flags & HRF_PINNED) || cache_regs[i].locked)
  1530. continue;
  1531. if ((what > 0 && !(cache_regs[i].htype & HRT_REG)) || // get a REG
  1532. (what == 0 && (cache_regs[i].htype & HRT_TEMP)) || // get a non-TEMP
  1533. (what < 0 && !(cache_regs[i].htype & HRT_TEMP))) // get a TEMP
  1534. continue;
  1535. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
  1536. // REG is free
  1537. prio = 10;
  1538. oldest = i;
  1539. break;
  1540. }
  1541. if (cache_regs[i].type == HR_CACHED) {
  1542. if (rcache_regs_now & cache_regs[i].gregs)
  1543. // REGs needed for the current insn
  1544. i_prio = 0;
  1545. else if (rcache_regs_soon & cache_regs[i].gregs)
  1546. // REGs needed in the next insns
  1547. i_prio = 2;
  1548. else if (rcache_regs_late & cache_regs[i].gregs)
  1549. // REGs needed in some future insn
  1550. i_prio = 4;
  1551. else if (~rcache_regs_discard & cache_regs[i].gregs)
  1552. // REGs not needed in the foreseeable future
  1553. i_prio = 6;
  1554. else
  1555. // REGs soon overwritten anyway
  1556. i_prio = 8;
  1557. if (!(cache_regs[i].flags & HRF_DIRTY)) i_prio ++;
  1558. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1559. min_stamp = cache_regs[i].stamp;
  1560. oldest = i;
  1561. prio = i_prio;
  1562. }
  1563. }
  1564. }
  1565. if (prio < minprio || oldest == -1)
  1566. return -1;
  1567. if (cache_regs[oldest].type == HR_CACHED)
  1568. rcache_evict_vreg(oldest);
  1569. else
  1570. rcache_free_vreg(oldest);
  1571. return oldest;
  1572. }
  1573. static int rcache_allocate_vreg(int needed)
  1574. {
  1575. int x;
  1576. x = rcache_allocate(1, needed ? 0 : 4);
  1577. if (x < 0)
  1578. x = rcache_allocate(-1, 0);
  1579. return x;
  1580. }
  1581. static int rcache_allocate_nontemp(void)
  1582. {
  1583. int x = rcache_allocate(0, 4);
  1584. return x;
  1585. }
  1586. static int rcache_allocate_temp(void)
  1587. {
  1588. int x = rcache_allocate(-1, 0);
  1589. if (x < 0)
  1590. x = rcache_allocate(0, 0);
  1591. return x;
  1592. }
  1593. // maps a host register to a REG
  1594. static int rcache_map_reg(sh2_reg_e r, int hr)
  1595. {
  1596. #if REMAP_REGISTER
  1597. int i;
  1598. gconst_kill(r);
  1599. // lookup the TEMP hr maps to
  1600. i = reg_map_host[hr];
  1601. if (i < 0) {
  1602. // must not happen
  1603. printf("invalid host register %d\n", hr);
  1604. exit(1);
  1605. }
  1606. // remove old mappings of r and i if one exists
  1607. if (guest_regs[r].vreg >= 0)
  1608. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1609. if (cache_regs[i].type == HR_CACHED)
  1610. rcache_evict_vreg(i);
  1611. // set new mappping
  1612. cache_regs[i].type = HR_CACHED;
  1613. cache_regs[i].gregs = 1 << r;
  1614. cache_regs[i].locked = 0;
  1615. cache_regs[i].stamp = ++rcache_counter;
  1616. cache_regs[i].flags |= HRF_DIRTY;
  1617. rcache_lock_vreg(i);
  1618. guest_regs[r].flags |= GRF_DIRTY;
  1619. guest_regs[r].vreg = i;
  1620. #if DRC_DEBUG & 64
  1621. RCACHE_CHECK("after map");
  1622. #endif
  1623. return cache_regs[i].hreg;
  1624. #else
  1625. return rcache_get_reg(r, RC_GR_WRITE, NULL);
  1626. #endif
  1627. }
  1628. // remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
  1629. static void rcache_remap_vreg(int x)
  1630. {
  1631. #if REMAP_REGISTER
  1632. u32 rsl_d = rcache_regs_soon | rcache_regs_late;
  1633. int d;
  1634. // x must be a cached vreg
  1635. if (cache_regs[x].type != HR_CACHED || cache_regs[x].locked)
  1636. return;
  1637. // don't do it if x isn't used
  1638. if (!(rsl_d & cache_regs[x].gregs)) {
  1639. // clean here to avoid data loss on invalidation
  1640. rcache_clean_vreg(x);
  1641. return;
  1642. }
  1643. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, d,
  1644. if ((guest_regs[d].flags & (GRF_STATIC|GRF_PINNED)) &&
  1645. !cache_regs[guest_regs[d].sreg].locked &&
  1646. !((rsl_d|rcache_regs_now) & cache_regs[guest_regs[d].sreg].gregs)) {
  1647. // STATIC not in its sreg and sreg is available
  1648. rcache_evict_vreg(guest_regs[d].sreg);
  1649. rcache_move_vreg(guest_regs[d].sreg, x);
  1650. return;
  1651. }
  1652. )
  1653. // allocate a non-TEMP vreg
  1654. rcache_lock_vreg(x); // lock to avoid evicting x
  1655. d = rcache_allocate_nontemp();
  1656. rcache_unlock_vreg(x);
  1657. if (d < 0) {
  1658. rcache_clean_vreg(x);
  1659. return;
  1660. }
  1661. // move vreg to new location
  1662. rcache_move_vreg(d, x);
  1663. #if DRC_DEBUG & 64
  1664. RCACHE_CHECK("after remap");
  1665. #endif
  1666. #else
  1667. rcache_clean_vreg(x);
  1668. #endif
  1669. }
  1670. static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
  1671. {
  1672. #if ALIAS_REGISTERS
  1673. int x;
  1674. // if s isn't constant, it must be in cache for aliasing
  1675. if (!gconst_check(rs))
  1676. rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
  1677. // if d and s are not already aliased
  1678. x = guest_regs[rs].vreg;
  1679. if (guest_regs[rd].vreg != x) {
  1680. // remove possible old mapping of dst
  1681. if (guest_regs[rd].vreg >= 0)
  1682. rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
  1683. // make dst an alias of src
  1684. if (x >= 0)
  1685. rcache_add_vreg_alias(x, rd);
  1686. // if d is now in cache, it must be dirty
  1687. if (guest_regs[rd].vreg >= 0) {
  1688. x = guest_regs[rd].vreg;
  1689. cache_regs[x].flags |= HRF_DIRTY;
  1690. guest_regs[rd].flags |= GRF_DIRTY;
  1691. }
  1692. }
  1693. gconst_copy(rd, rs);
  1694. #if DRC_DEBUG & 64
  1695. RCACHE_CHECK("after alias");
  1696. #endif
  1697. #else
  1698. int hr_s = rcache_get_reg(rs, RC_GR_READ, NULL);
  1699. int hr_d = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  1700. emith_move_r_r(hr_d, hr_s);
  1701. gconst_copy(rd, rs);
  1702. #endif
  1703. }
  1704. // note: must not be called when doing conditional code
  1705. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1706. {
  1707. int src, dst, ali;
  1708. cache_reg_t *tr;
  1709. u32 rsp_d = (rcache_regs_soon | rcache_regs_static | rcache_regs_pinned) &
  1710. ~rcache_regs_discard;
  1711. dst = src = guest_regs[r].vreg;
  1712. rcache_lock_vreg(src); // lock to avoid evicting src
  1713. // good opportunity to relocate a remapped STATIC?
  1714. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1715. src != guest_regs[r].sreg && (src < 0 || mode != RC_GR_READ) &&
  1716. !cache_regs[guest_regs[r].sreg].locked &&
  1717. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[r].sreg].gregs)) {
  1718. dst = guest_regs[r].sreg;
  1719. rcache_evict_vreg(dst);
  1720. } else if (dst < 0) {
  1721. // allocate a cache register
  1722. if ((dst = rcache_allocate_vreg(rsp_d & (1 << r))) < 0) {
  1723. printf("no registers to evict, aborting\n");
  1724. exit(1);
  1725. }
  1726. }
  1727. tr = &cache_regs[dst];
  1728. tr->stamp = rcache_counter;
  1729. // remove r from src
  1730. if (src >= 0 && src != dst)
  1731. rcache_remove_vreg_alias(src, r);
  1732. rcache_unlock_vreg(src);
  1733. // if r has a constant it may have aliases
  1734. if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
  1735. src = dst;
  1736. // if r will be modified, check for aliases being needed rsn
  1737. ali = tr->gregs & ~(1 << r);
  1738. if (mode != RC_GR_READ && src == dst && ali) {
  1739. int x = -1;
  1740. if ((rsp_d|rcache_regs_now) & ali) {
  1741. if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
  1742. guest_regs[r].sreg == dst && !tr->locked) {
  1743. // split aliases if r is STATIC in sreg and dst isn't already locked
  1744. int t;
  1745. FOR_ALL_BITS_SET_DO(ali, t,
  1746. if ((guest_regs[t].flags & (GRF_STATIC|GRF_PINNED)) &&
  1747. !(ali & ~(1 << t)) &&
  1748. !cache_regs[guest_regs[t].sreg].locked &&
  1749. !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[t].sreg].gregs)) {
  1750. // alias is a single STATIC and its sreg is available
  1751. x = guest_regs[t].sreg;
  1752. rcache_evict_vreg(x);
  1753. } else {
  1754. rcache_lock_vreg(dst); // lock to avoid evicting dst
  1755. x = rcache_allocate_vreg(rsp_d & ali);
  1756. rcache_unlock_vreg(dst);
  1757. }
  1758. break;
  1759. )
  1760. if (x >= 0) {
  1761. rcache_remove_vreg_alias(src, r);
  1762. src = dst;
  1763. rcache_move_vreg(x, dst);
  1764. }
  1765. } else {
  1766. // split r
  1767. rcache_lock_vreg(src); // lock to avoid evicting src
  1768. x = rcache_allocate_vreg(rsp_d & (1 << r));
  1769. rcache_unlock_vreg(src);
  1770. if (x >= 0) {
  1771. rcache_remove_vreg_alias(src, r);
  1772. dst = x;
  1773. tr = &cache_regs[dst];
  1774. tr->stamp = rcache_counter;
  1775. }
  1776. }
  1777. }
  1778. if (x < 0)
  1779. // aliases not needed or no vreg available, remove them
  1780. rcache_evict_vreg_aliases(dst, r);
  1781. }
  1782. // assign r to dst
  1783. rcache_add_vreg_alias(dst, r);
  1784. // handle dst register transfer
  1785. if (src < 0 && mode != RC_GR_WRITE)
  1786. emith_ctx_read(tr->hreg, r * 4);
  1787. if (hr) {
  1788. *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
  1789. rcache_lock_vreg(src >= 0 ? src : dst);
  1790. } else if (src >= 0 && mode != RC_GR_WRITE && cache_regs[src].hreg != tr->hreg)
  1791. emith_move_r_r(tr->hreg, cache_regs[src].hreg);
  1792. // housekeeping
  1793. if (do_locking)
  1794. rcache_lock_vreg(dst);
  1795. if (mode != RC_GR_READ) {
  1796. tr->flags |= HRF_DIRTY;
  1797. guest_regs[r].flags |= GRF_DIRTY;
  1798. gconst_kill(r);
  1799. rcache_set_x16(tr->hreg, 0, 0);
  1800. } else if (src >= 0 && cache_regs[src].hreg != tr->hreg)
  1801. rcache_copy_x16(tr->hreg, cache_regs[src].hreg);
  1802. #if DRC_DEBUG & 64
  1803. RCACHE_CHECK("after getreg");
  1804. #endif
  1805. return tr->hreg;
  1806. }
  1807. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1808. {
  1809. return rcache_get_reg_(r, mode, 1, hr);
  1810. }
  1811. static void rcache_pin_reg(sh2_reg_e r)
  1812. {
  1813. int hr, x;
  1814. // don't pin if static or already pinned
  1815. if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED))
  1816. return;
  1817. rcache_regs_soon |= (1 << r); // kludge to prevent allocation of a temp
  1818. hr = rcache_get_reg_(r, RC_GR_RMW, 0, NULL);
  1819. x = reg_map_host[hr];
  1820. // can only pin non-TEMPs
  1821. if (!(cache_regs[x].htype & HRT_TEMP)) {
  1822. guest_regs[r].flags |= GRF_PINNED;
  1823. cache_regs[x].flags |= HRF_PINNED;
  1824. guest_regs[r].sreg = x;
  1825. rcache_regs_pinned |= (1 << r);
  1826. }
  1827. #if DRC_DEBUG & 64
  1828. RCACHE_CHECK("after pin");
  1829. #endif
  1830. }
  1831. static int rcache_get_tmp(void)
  1832. {
  1833. int i;
  1834. i = rcache_allocate_temp();
  1835. if (i < 0) {
  1836. printf("cannot allocate temp\n");
  1837. exit(1);
  1838. }
  1839. cache_regs[i].type = HR_TEMP;
  1840. rcache_lock_vreg(i);
  1841. return cache_regs[i].hreg;
  1842. }
  1843. static int rcache_get_vreg_hr(int hr)
  1844. {
  1845. int i;
  1846. i = reg_map_host[hr];
  1847. if (i < 0 || cache_regs[i].locked) {
  1848. printf("host register %d is locked\n", hr);
  1849. exit(1);
  1850. }
  1851. if (cache_regs[i].type == HR_CACHED)
  1852. rcache_evict_vreg(i);
  1853. else if (cache_regs[i].type == HR_TEMP && cache_regs[i].locked) {
  1854. printf("host reg %d already used, aborting\n", hr);
  1855. exit(1);
  1856. }
  1857. return i;
  1858. }
  1859. static int rcache_get_vreg_arg(int arg)
  1860. {
  1861. int hr = 0;
  1862. host_arg2reg(hr, arg);
  1863. return rcache_get_vreg_hr(hr);
  1864. }
  1865. // get a reg to be used as function arg
  1866. static int rcache_get_tmp_arg(int arg)
  1867. {
  1868. int x = rcache_get_vreg_arg(arg);
  1869. cache_regs[x].type = HR_TEMP;
  1870. rcache_lock_vreg(x);
  1871. return cache_regs[x].hreg;
  1872. }
  1873. // ... as return value after a call
  1874. static int rcache_get_tmp_ret(void)
  1875. {
  1876. int x = rcache_get_vreg_hr(RET_REG);
  1877. cache_regs[x].type = HR_TEMP;
  1878. rcache_lock_vreg(x);
  1879. return cache_regs[x].hreg;
  1880. }
  1881. // same but caches a reg if access is readonly (announced by hr being NULL)
  1882. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1883. {
  1884. int i, srcr, dstr, dstid, keep;
  1885. u32 val;
  1886. host_arg2reg(dstr, arg);
  1887. i = guest_regs[r].vreg;
  1888. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1889. // r is already in arg, avoid evicting
  1890. dstid = i;
  1891. else
  1892. dstid = rcache_get_vreg_arg(arg);
  1893. dstr = cache_regs[dstid].hreg;
  1894. if (rcache_is_cached(r)) {
  1895. // r is needed later on anyway
  1896. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1897. keep = 1;
  1898. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1899. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1900. srcr = dstr;
  1901. emith_move_r_imm(srcr, val);
  1902. keep = 0;
  1903. } else {
  1904. // must read from ctx
  1905. srcr = dstr;
  1906. emith_ctx_read(srcr, r * 4);
  1907. keep = 1;
  1908. }
  1909. if (cache_regs[dstid].type == HR_CACHED)
  1910. rcache_evict_vreg(dstid);
  1911. cache_regs[dstid].type = HR_TEMP;
  1912. if (hr == NULL) {
  1913. if (dstr != srcr)
  1914. // arg is a copy of cached r
  1915. emith_move_r_r(dstr, srcr);
  1916. else if (keep && guest_regs[r].vreg < 0)
  1917. // keep arg as vreg for r
  1918. rcache_add_vreg_alias(dstid, r);
  1919. } else {
  1920. *hr = srcr;
  1921. if (dstr != srcr) // must lock srcr if not copied here
  1922. rcache_lock_vreg(reg_map_host[srcr]);
  1923. }
  1924. cache_regs[dstid].stamp = ++rcache_counter;
  1925. rcache_lock_vreg(dstid);
  1926. #if DRC_DEBUG & 64
  1927. RCACHE_CHECK("after getarg");
  1928. #endif
  1929. return dstr;
  1930. }
  1931. static void rcache_free_tmp(int hr)
  1932. {
  1933. int i = reg_map_host[hr];
  1934. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1935. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1936. exit(1);
  1937. }
  1938. rcache_unlock_vreg(i);
  1939. }
  1940. // saves temporary result either in REG or in drctmp
  1941. static int rcache_save_tmp(int hr)
  1942. {
  1943. int i;
  1944. // find REG, either free or unlocked temp or oldest non-hinted cached
  1945. i = rcache_allocate_nontemp();
  1946. if (i < 0) {
  1947. // if none is available, store in drctmp
  1948. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1949. rcache_free_tmp(hr);
  1950. return -1;
  1951. }
  1952. cache_regs[i].type = HR_CACHED;
  1953. cache_regs[i].gregs = 0; // not storing any guest register
  1954. cache_regs[i].flags &= HRF_PINNED;
  1955. cache_regs[i].locked = 0;
  1956. cache_regs[i].stamp = ++rcache_counter;
  1957. rcache_lock_vreg(i);
  1958. emith_move_r_r(cache_regs[i].hreg, hr);
  1959. rcache_free_tmp(hr);
  1960. return i;
  1961. }
  1962. static int rcache_restore_tmp(int x)
  1963. {
  1964. int hr;
  1965. // find REG with tmp store: cached but with no gregs
  1966. if (x >= 0) {
  1967. if (cache_regs[x].type != HR_CACHED || cache_regs[x].gregs) {
  1968. printf("invalid tmp storage %d\n", x);
  1969. exit(1);
  1970. }
  1971. // found, transform to a TEMP
  1972. cache_regs[x].type = HR_TEMP;
  1973. return cache_regs[x].hreg;
  1974. }
  1975. // if not available, create a TEMP store and fetch from drctmp
  1976. hr = rcache_get_tmp();
  1977. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1978. return hr;
  1979. }
  1980. static void rcache_free(int hr)
  1981. {
  1982. int x = reg_map_host[hr];
  1983. rcache_unlock_vreg(x);
  1984. }
  1985. static void rcache_unlock(int x)
  1986. {
  1987. if (x >= 0)
  1988. cache_regs[x].locked = 0;
  1989. }
  1990. static void rcache_unlock_all(void)
  1991. {
  1992. int i;
  1993. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1994. cache_regs[i].locked = 0;
  1995. }
  1996. static void rcache_unpin_all(void)
  1997. {
  1998. int i;
  1999. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2000. if (guest_regs[i].flags & GRF_PINNED) {
  2001. guest_regs[i].flags &= ~GRF_PINNED;
  2002. cache_regs[guest_regs[i].sreg].flags &= ~HRF_PINNED;
  2003. guest_regs[i].sreg = -1;
  2004. rcache_regs_pinned &= ~(1 << i);
  2005. }
  2006. }
  2007. #if DRC_DEBUG & 64
  2008. RCACHE_CHECK("after unpin");
  2009. #endif
  2010. }
  2011. static void rcache_save_pinned(void)
  2012. {
  2013. int i;
  2014. // save pinned regs to context
  2015. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2016. if ((guest_regs[i].flags & GRF_PINNED) && guest_regs[i].vreg >= 0)
  2017. emith_ctx_write(cache_regs[guest_regs[i].vreg].hreg, i * 4);
  2018. }
  2019. static inline void rcache_set_usage_now(u32 mask)
  2020. {
  2021. rcache_regs_now = mask;
  2022. }
  2023. static inline void rcache_set_usage_soon(u32 mask)
  2024. {
  2025. rcache_regs_soon = mask;
  2026. }
  2027. static inline void rcache_set_usage_late(u32 mask)
  2028. {
  2029. rcache_regs_late = mask;
  2030. }
  2031. static inline void rcache_set_usage_discard(u32 mask)
  2032. {
  2033. rcache_regs_discard = mask;
  2034. }
  2035. static inline int rcache_is_cached(sh2_reg_e r)
  2036. {
  2037. // is r in cache or needed RSN?
  2038. u32 rsc = rcache_regs_soon | rcache_regs_clean;
  2039. return (guest_regs[r].vreg >= 0 || (rsc & (1 << r)));
  2040. }
  2041. static inline int rcache_is_hreg_used(int hr)
  2042. {
  2043. int x = reg_map_host[hr];
  2044. // is hr in use?
  2045. return cache_regs[x].type != HR_FREE &&
  2046. (cache_regs[x].type != HR_TEMP || cache_regs[x].locked);
  2047. }
  2048. static inline u32 rcache_used_hregs_mask(void)
  2049. {
  2050. u32 mask = 0;
  2051. int i;
  2052. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2053. if ((cache_regs[i].htype & HRT_TEMP) && cache_regs[i].type != HR_FREE &&
  2054. (cache_regs[i].type != HR_TEMP || cache_regs[i].locked))
  2055. mask |= 1 << cache_regs[i].hreg;
  2056. return mask;
  2057. }
  2058. static inline u32 rcache_dirty_mask(void)
  2059. {
  2060. u32 mask = 0;
  2061. int i;
  2062. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2063. if (guest_regs[i].flags & GRF_DIRTY)
  2064. mask |= 1 << i;
  2065. mask |= gconst_dirty_mask();
  2066. return mask;
  2067. }
  2068. static inline u32 rcache_cached_mask(void)
  2069. {
  2070. u32 mask = 0;
  2071. int i;
  2072. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2073. if (cache_regs[i].type == HR_CACHED)
  2074. mask |= cache_regs[i].gregs;
  2075. return mask;
  2076. }
  2077. static void rcache_clean_tmp(void)
  2078. {
  2079. int i;
  2080. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2081. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2082. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].htype & HRT_TEMP)) {
  2083. rcache_unlock(i);
  2084. rcache_remap_vreg(i);
  2085. }
  2086. rcache_regs_clean = 0;
  2087. }
  2088. static void rcache_clean_masked(u32 mask)
  2089. {
  2090. int i, r, hr;
  2091. u32 m;
  2092. rcache_regs_clean |= mask;
  2093. mask = rcache_regs_clean;
  2094. // clean constants where all aliases are covered by the mask, exempt statics
  2095. // to avoid flushing them to context if sreg isn't available
  2096. m = mask & ~(rcache_regs_static | rcache_regs_pinned);
  2097. for (i = 0; i < ARRAY_SIZE(gconsts); i++)
  2098. if ((gconsts[i].gregs & m) && !(gconsts[i].gregs & ~mask)) {
  2099. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, r,
  2100. if (guest_regs[r].flags & GRF_CDIRTY) {
  2101. hr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  2102. rcache_clean_vreg(reg_map_host[hr]);
  2103. break;
  2104. });
  2105. }
  2106. // clean vregs where all aliases are covered by the mask
  2107. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2108. if (cache_regs[i].type == HR_CACHED &&
  2109. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  2110. rcache_clean_vreg(i);
  2111. }
  2112. static void rcache_clean(void)
  2113. {
  2114. int i;
  2115. gconst_clean();
  2116. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2117. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  2118. if (cache_regs[i].type == HR_CACHED)
  2119. rcache_clean_vreg(i);
  2120. // relocate statics to their sregs (necessary before conditional jumps)
  2121. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2122. if ((guest_regs[i].flags & (GRF_STATIC|GRF_PINNED)) &&
  2123. guest_regs[i].vreg != guest_regs[i].sreg) {
  2124. rcache_lock_vreg(guest_regs[i].vreg);
  2125. rcache_evict_vreg(guest_regs[i].sreg);
  2126. rcache_unlock_vreg(guest_regs[i].vreg);
  2127. if (guest_regs[i].vreg < 0)
  2128. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  2129. else {
  2130. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  2131. cache_regs[guest_regs[i].vreg].hreg);
  2132. rcache_copy_x16(cache_regs[guest_regs[i].sreg].hreg,
  2133. cache_regs[guest_regs[i].vreg].hreg);
  2134. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  2135. }
  2136. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2137. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2138. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2139. guest_regs[i].flags |= GRF_DIRTY;
  2140. guest_regs[i].vreg = guest_regs[i].sreg;
  2141. }
  2142. }
  2143. rcache_regs_clean = 0;
  2144. }
  2145. static void rcache_invalidate_tmp(void)
  2146. {
  2147. int i;
  2148. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2149. if (cache_regs[i].htype & HRT_TEMP) {
  2150. rcache_unlock(i);
  2151. if (cache_regs[i].type == HR_CACHED)
  2152. rcache_evict_vreg(i);
  2153. else
  2154. rcache_free_vreg(i);
  2155. }
  2156. }
  2157. }
  2158. static void rcache_invalidate(void)
  2159. {
  2160. int i;
  2161. gconst_invalidate();
  2162. rcache_unlock_all();
  2163. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2164. rcache_free_vreg(i);
  2165. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2166. guest_regs[i].flags &= GRF_STATIC;
  2167. if (!(guest_regs[i].flags & GRF_STATIC))
  2168. guest_regs[i].vreg = -1;
  2169. else {
  2170. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2171. cache_regs[guest_regs[i].sreg].type = HR_CACHED;
  2172. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY|HRF_PINNED;
  2173. guest_regs[i].flags |= GRF_DIRTY;
  2174. guest_regs[i].vreg = guest_regs[i].sreg;
  2175. }
  2176. }
  2177. rcache_counter = 0;
  2178. rcache_regs_now = rcache_regs_soon = rcache_regs_late = 0;
  2179. rcache_regs_discard = rcache_regs_clean = 0;
  2180. }
  2181. static void rcache_flush(void)
  2182. {
  2183. rcache_clean();
  2184. rcache_invalidate();
  2185. }
  2186. static void rcache_create(void)
  2187. {
  2188. int x = 0, i;
  2189. // create cache_regs as host register representation
  2190. // RET_REG/params should be first TEMPs to avoid allocation conflicts in calls
  2191. cache_regs[x++] = (cache_reg_t) {.hreg = RET_REG, .htype = HRT_TEMP};
  2192. for (i = 0; i < ARRAY_SIZE(hregs_param); i++)
  2193. if (hregs_param[i] != RET_REG)
  2194. cache_regs[x++] = (cache_reg_t){.hreg = hregs_param[i],.htype = HRT_TEMP};
  2195. for (i = 0; i < ARRAY_SIZE(hregs_temp); i++)
  2196. if (hregs_temp[i] != RET_REG)
  2197. cache_regs[x++] = (cache_reg_t){.hreg = hregs_temp[i], .htype = HRT_TEMP};
  2198. for (i = ARRAY_SIZE(hregs_saved)-1; i >= 0; i--)
  2199. if (hregs_saved[i] != CONTEXT_REG)
  2200. cache_regs[x++] = (cache_reg_t){.hreg = hregs_saved[i], .htype = HRT_REG};
  2201. if (x != ARRAY_SIZE(cache_regs)) {
  2202. printf("rcache_create failed (conflicting register count)\n");
  2203. exit(1);
  2204. }
  2205. // mapping from host_register to cache regs index
  2206. memset(reg_map_host, -1, sizeof(reg_map_host));
  2207. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2208. if (cache_regs[i].htype)
  2209. reg_map_host[cache_regs[i].hreg] = i;
  2210. if (cache_regs[i].htype == HRT_REG)
  2211. rcache_vregs_reg |= (1 << i);
  2212. }
  2213. // create static host register mapping for SH2 regs
  2214. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2215. guest_regs[i] = (guest_reg_t){.sreg = -1};
  2216. }
  2217. for (i = 0; i < ARRAY_SIZE(regs_static); i += 2) {
  2218. for (x = ARRAY_SIZE(cache_regs)-1; x >= 0; x--)
  2219. if (cache_regs[x].hreg == regs_static[i+1]) break;
  2220. if (x >= 0) {
  2221. guest_regs[regs_static[i]] = (guest_reg_t){.flags = GRF_STATIC,.sreg = x};
  2222. rcache_regs_static |= (1 << regs_static[i]);
  2223. rcache_vregs_reg &= ~(1 << x);
  2224. }
  2225. }
  2226. printf("DRC registers created, %ld host regs (%d REG, %d STATIC, 1 CTX)\n",
  2227. CACHE_REGS+1L, count_bits(rcache_vregs_reg),count_bits(rcache_regs_static));
  2228. }
  2229. static void rcache_init(void)
  2230. {
  2231. // create DRC data structures
  2232. rcache_create();
  2233. rcache_invalidate();
  2234. #if DRC_DEBUG & 64
  2235. RCACHE_CHECK("after init");
  2236. #endif
  2237. }
  2238. // ---------------------------------------------------------------
  2239. // NB may return either REG or TEMP
  2240. static int emit_get_rbase_and_offs(SH2 *sh2, sh2_reg_e r, int rmode, u32 *offs)
  2241. {
  2242. uptr omask = emith_rw_offs_max(); // offset mask
  2243. u32 mask = 0;
  2244. u32 a;
  2245. int poffs;
  2246. int hr, hr2;
  2247. uptr la;
  2248. // is r constant and points to a memory region?
  2249. if (! gconst_get(r, &a))
  2250. return -1;
  2251. poffs = dr_ctx_get_mem_ptr(sh2, a, &mask);
  2252. if (poffs == -1)
  2253. return -1;
  2254. if (mask < 0x20000) {
  2255. // data array, BIOS, DRAM, can't safely access directly since host addr may
  2256. // change (BIOS,da code may run on either core, DRAM may be switched)
  2257. hr = rcache_get_tmp();
  2258. a = (a + *offs) & mask;
  2259. if (poffs == offsetof(SH2, p_da)) {
  2260. // access sh2->data_array directly
  2261. a += offsetof(SH2, data_array);
  2262. emith_add_r_r_ptr_imm(hr, CONTEXT_REG, a & ~omask);
  2263. } else {
  2264. emith_ctx_read_ptr(hr, poffs);
  2265. if (a & ~omask)
  2266. emith_add_r_r_ptr_imm(hr, hr, a & ~omask);
  2267. }
  2268. *offs = a & omask;
  2269. return hr;
  2270. }
  2271. // ROM, SDRAM. Host address should be mmapped to be equal to SH2 address.
  2272. la = (uptr)*(void **)((char *)sh2 + poffs);
  2273. // if r is in rcache or needed soon anyway, and offs is relative to region,
  2274. // and address translation fits in add_ptr_imm (s32), then use rcached const
  2275. if (la == (s32)la && !(*offs & ~mask) && rcache_is_cached(r)) {
  2276. u32 odd = a & 1; // need to fix odd address for correct byte addressing
  2277. la -= (s32)((a & ~mask) - *offs - odd); // diff between reg and memory
  2278. hr = hr2 = rcache_get_reg(r, rmode, NULL);
  2279. if ((s32)a < 0) emith_uext_ptr(hr2);
  2280. if ((la & ~omask) - odd) {
  2281. hr = rcache_get_tmp();
  2282. emith_add_r_r_ptr_imm(hr, hr2, (la & ~omask) - odd);
  2283. rcache_free(hr2);
  2284. }
  2285. *offs = (la & omask);
  2286. } else {
  2287. // known fixed host address
  2288. la += (a + *offs) & mask;
  2289. hr = rcache_get_tmp();
  2290. emith_move_r_ptr_imm(hr, la & ~omask);
  2291. *offs = la & omask;
  2292. }
  2293. return hr;
  2294. }
  2295. // read const data from const ROM address
  2296. static int emit_get_rom_data(SH2 *sh2, sh2_reg_e r, u32 offs, int size, u32 *val)
  2297. {
  2298. u32 a, mask;
  2299. *val = 0;
  2300. if (gconst_get(r, &a)) {
  2301. a += offs;
  2302. // check if rom is memory mapped (not bank switched), and address is in rom
  2303. if (dr_is_rom(a) && p32x_sh2_get_mem_ptr(a, &mask, sh2) == sh2->p_rom) {
  2304. switch (size & MF_SIZEMASK) {
  2305. case 0: *val = (s8)p32x_sh2_read8(a, sh2s); break; // 8
  2306. case 1: *val = (s16)p32x_sh2_read16(a, sh2s); break; // 16
  2307. case 2: *val = p32x_sh2_read32(a, sh2s); break; // 32
  2308. }
  2309. return 1;
  2310. }
  2311. }
  2312. return 0;
  2313. }
  2314. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  2315. {
  2316. #if PROPAGATE_CONSTANTS
  2317. gconst_new(dst, imm);
  2318. #else
  2319. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2320. emith_move_r_imm(hr, imm);
  2321. #endif
  2322. }
  2323. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  2324. {
  2325. if (gconst_check(src) || rcache_is_cached(src))
  2326. rcache_alias_vreg(dst, src);
  2327. else {
  2328. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2329. emith_ctx_read(hr_d, src * 4);
  2330. }
  2331. }
  2332. static void emit_add_r_imm(sh2_reg_e r, u32 imm)
  2333. {
  2334. u32 val;
  2335. int isgc = gconst_get(r, &val);
  2336. int hr, hr2;
  2337. if (!isgc || rcache_is_cached(r)) {
  2338. // not constant, or r is already in cache
  2339. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2340. emith_add_r_r_imm(hr, hr2, imm);
  2341. rcache_free(hr2);
  2342. if (isgc)
  2343. gconst_set(r, val + imm);
  2344. } else
  2345. gconst_new(r, val + imm);
  2346. }
  2347. static void emit_sub_r_imm(sh2_reg_e r, u32 imm)
  2348. {
  2349. u32 val;
  2350. int isgc = gconst_get(r, &val);
  2351. int hr, hr2;
  2352. if (!isgc || rcache_is_cached(r)) {
  2353. // not constant, or r is already in cache
  2354. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2355. emith_sub_r_r_imm(hr, hr2, imm);
  2356. rcache_free(hr2);
  2357. if (isgc)
  2358. gconst_set(r, val - imm);
  2359. } else
  2360. gconst_new(r, val - imm);
  2361. }
  2362. static void emit_sync_t_to_sr(void)
  2363. {
  2364. // avoid reloading SR from context if there's nothing to do
  2365. if (emith_get_t_cond() >= 0) {
  2366. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2367. emith_sync_t(sr);
  2368. }
  2369. }
  2370. // rd = @(arg0)
  2371. static int emit_memhandler_read(int size)
  2372. {
  2373. int hr;
  2374. emit_sync_t_to_sr();
  2375. rcache_clean_tmp();
  2376. #ifndef DRC_SR_REG
  2377. // must writeback cycles for poll detection stuff
  2378. if (guest_regs[SHR_SR].vreg != -1)
  2379. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2380. #endif
  2381. rcache_invalidate_tmp();
  2382. if (size & MF_POLLING)
  2383. switch (size & MF_SIZEMASK) {
  2384. case 0: emith_call(sh2_drc_read8_poll); break; // 8
  2385. case 1: emith_call(sh2_drc_read16_poll); break; // 16
  2386. case 2: emith_call(sh2_drc_read32_poll); break; // 32
  2387. }
  2388. else
  2389. switch (size & MF_SIZEMASK) {
  2390. case 0: emith_call(sh2_drc_read8); break; // 8
  2391. case 1: emith_call(sh2_drc_read16); break; // 16
  2392. case 2: emith_call(sh2_drc_read32); break; // 32
  2393. }
  2394. hr = rcache_get_tmp_ret();
  2395. rcache_set_x16(hr, (size & MF_SIZEMASK) < 2, 0);
  2396. return hr;
  2397. }
  2398. // @(arg0) = arg1
  2399. static void emit_memhandler_write(int size)
  2400. {
  2401. emit_sync_t_to_sr();
  2402. rcache_clean_tmp();
  2403. #ifndef DRC_SR_REG
  2404. if (guest_regs[SHR_SR].vreg != -1)
  2405. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2406. #endif
  2407. rcache_invalidate_tmp();
  2408. switch (size & MF_SIZEMASK) {
  2409. case 0: emith_call(sh2_drc_write8); break; // 8
  2410. case 1: emith_call(sh2_drc_write16); break; // 16
  2411. case 2: emith_call(sh2_drc_write32); break; // 32
  2412. }
  2413. }
  2414. // rd = @(Rs,#offs); rd < 0 -> return a temp
  2415. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2416. {
  2417. int hr, hr2;
  2418. u32 val;
  2419. #if PROPAGATE_CONSTANTS
  2420. if (emit_get_rom_data(sh2, rs, offs, size, &val)) {
  2421. if (rd == SHR_TMP) {
  2422. hr2 = rcache_get_tmp();
  2423. emith_move_r_imm(hr2, val);
  2424. } else {
  2425. emit_move_r_imm32(rd, val);
  2426. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  2427. }
  2428. rcache_set_x16(hr2, val == (s16)val, val == (u16)val);
  2429. if (size & MF_POSTINCR)
  2430. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2431. return hr2;
  2432. }
  2433. val = size & MF_POSTINCR;
  2434. hr = emit_get_rbase_and_offs(sh2, rs, val ? RC_GR_RMW : RC_GR_READ, &offs);
  2435. if (hr != -1) {
  2436. if (rd == SHR_TMP)
  2437. hr2 = rcache_get_tmp();
  2438. else
  2439. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2440. switch (size & MF_SIZEMASK) {
  2441. case 0: emith_read8s_r_r_offs(hr2, hr, offs ^ 1); break; // 8
  2442. case 1: emith_read16s_r_r_offs(hr2, hr, offs); break; // 16
  2443. case 2: emith_read_r_r_offs(hr2, hr, offs); emith_ror(hr2, hr2, 16); break;
  2444. }
  2445. rcache_free(hr);
  2446. if (size & MF_POSTINCR)
  2447. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2448. return hr2;
  2449. }
  2450. #endif
  2451. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2452. hr = rcache_get_tmp_arg(0);
  2453. emith_move_r_imm(hr, val + offs);
  2454. if (size & MF_POSTINCR)
  2455. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  2456. } else if (size & MF_POSTINCR) {
  2457. hr = rcache_get_tmp_arg(0);
  2458. hr2 = rcache_get_reg(rs, RC_GR_RMW, NULL);
  2459. emith_add_r_r_imm(hr, hr2, offs);
  2460. emith_add_r_imm(hr2, 1 << (size & MF_SIZEMASK));
  2461. if (gconst_get(rs, &val))
  2462. gconst_set(rs, val + (1 << (size & MF_SIZEMASK)));
  2463. } else {
  2464. hr = rcache_get_reg_arg(0, rs, &hr2);
  2465. if (offs || hr != hr2)
  2466. emith_add_r_r_imm(hr, hr2, offs);
  2467. }
  2468. hr = emit_memhandler_read(size);
  2469. if (rd == SHR_TMP)
  2470. hr2 = hr;
  2471. else
  2472. hr2 = rcache_map_reg(rd, hr);
  2473. if (hr != hr2) {
  2474. emith_move_r_r(hr2, hr);
  2475. rcache_free_tmp(hr);
  2476. }
  2477. return hr2;
  2478. }
  2479. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  2480. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2481. {
  2482. int hr, hr2;
  2483. u32 val;
  2484. if (rd == SHR_TMP) {
  2485. host_arg2reg(hr2, 1); // already locked and prepared by caller
  2486. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  2487. hr2 = rcache_get_reg_arg(1, rd, &hr);
  2488. if (hr != hr2) {
  2489. emith_move_r_r(hr2, hr);
  2490. rcache_free(hr2);
  2491. }
  2492. } else
  2493. hr2 = rcache_get_reg_arg(1, rd, NULL);
  2494. if (rd != SHR_TMP)
  2495. rcache_unlock(guest_regs[rd].vreg); // unlock in case rd is in arg0
  2496. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2497. hr = rcache_get_tmp_arg(0);
  2498. if (size & MF_PREDECR) {
  2499. val -= 1 << (size & MF_SIZEMASK);
  2500. gconst_new(rs, val);
  2501. }
  2502. emith_move_r_imm(hr, val + offs);
  2503. } else if (offs || (size & MF_PREDECR)) {
  2504. if (size & MF_PREDECR)
  2505. emit_sub_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2506. rcache_unlock(guest_regs[rs].vreg); // unlock in case rs is in arg0
  2507. hr = rcache_get_reg_arg(0, rs, &hr2);
  2508. if (offs || hr != hr2)
  2509. emith_add_r_r_imm(hr, hr2, offs);
  2510. } else
  2511. hr = rcache_get_reg_arg(0, rs, NULL);
  2512. emit_memhandler_write(size);
  2513. }
  2514. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2515. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2516. {
  2517. int hr, hr2;
  2518. int tx, ty;
  2519. #if PROPAGATE_CONSTANTS
  2520. u32 offs;
  2521. // if offs is larger than 0x01000000, it's most probably the base address part
  2522. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2523. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2524. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2525. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2526. #endif
  2527. hr = rcache_get_reg_arg(0, rx, &tx);
  2528. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2529. emith_add_r_r_r(hr, tx, ty);
  2530. hr = emit_memhandler_read(size);
  2531. if (rd == SHR_TMP)
  2532. hr2 = hr;
  2533. else
  2534. hr2 = rcache_map_reg(rd, hr);
  2535. if (hr != hr2) {
  2536. emith_move_r_r(hr2, hr);
  2537. rcache_free_tmp(hr);
  2538. }
  2539. return hr2;
  2540. }
  2541. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2542. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2543. {
  2544. int hr, tx, ty;
  2545. #if PROPAGATE_CONSTANTS
  2546. u32 offs;
  2547. // if offs is larger than 0x01000000, it's most probably the base address part
  2548. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2549. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2550. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2551. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2552. #endif
  2553. if (rd != SHR_TMP)
  2554. rcache_get_reg_arg(1, rd, NULL);
  2555. hr = rcache_get_reg_arg(0, rx, &tx);
  2556. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2557. emith_add_r_r_r(hr, tx, ty);
  2558. emit_memhandler_write(size);
  2559. }
  2560. // @Rn+,@Rm+
  2561. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2562. {
  2563. int tmp;
  2564. // unlock rn, rm here to avoid REG shortage in MAC operation
  2565. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2566. rcache_unlock(guest_regs[rn].vreg);
  2567. tmp = rcache_save_tmp(tmp);
  2568. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2569. rcache_unlock(guest_regs[rm].vreg);
  2570. *rnr = rcache_restore_tmp(tmp);
  2571. }
  2572. static void emit_do_static_regs(int is_write, int tmpr)
  2573. {
  2574. int i, r, count;
  2575. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2576. if (guest_regs[i].flags & (GRF_STATIC|GRF_PINNED))
  2577. r = cache_regs[guest_regs[i].vreg].hreg;
  2578. else
  2579. continue;
  2580. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2581. if ((guest_regs[i + 1].flags & (GRF_STATIC|GRF_PINNED)) &&
  2582. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2583. count++;
  2584. else
  2585. break;
  2586. }
  2587. if (count > 1) {
  2588. // i, r point to last item
  2589. if (is_write)
  2590. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2591. else
  2592. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2593. } else {
  2594. if (is_write)
  2595. emith_ctx_write(r, i * 4);
  2596. else
  2597. emith_ctx_read(r, i * 4);
  2598. }
  2599. }
  2600. }
  2601. #if DIV_OPTIMIZER
  2602. // divide operation replacement functions, called by compiled code. Only the
  2603. // 32:16 cases and the 64:32 cases described in the SH2 prog man are replaced.
  2604. static uint32_t REGPARM(2) sh2_drc_divu32(uint32_t dv, uint32_t ds)
  2605. {
  2606. if (ds && ds >= dv) {
  2607. // good case: no divide by 0, and no result overflow
  2608. uint32_t quot = dv / (ds>>16), rem = dv - (quot * (ds>>16));
  2609. if (~quot&1) rem -= ds>>16;
  2610. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2611. } else {
  2612. // bad case: use the sh2 algo to get the right result
  2613. int q = 0, t = 0, s = 16;
  2614. while (s--) {
  2615. uint32_t v = dv>>31;
  2616. dv = (dv<<1) | t;
  2617. t = v;
  2618. v = dv;
  2619. if (q) dv += ds, q = dv < v;
  2620. else dv -= ds, q = !(dv < v);
  2621. q ^= t, t = !q;
  2622. }
  2623. return (dv<<1) | t;
  2624. }
  2625. }
  2626. static uint32_t REGPARM(3) sh2_drc_divu64(uint32_t dh, uint32_t *dl, uint32_t ds)
  2627. {
  2628. if (ds > 1 && ds >= dh) {
  2629. // good case: no divide by 0, and no result overflow
  2630. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2631. uint32_t quot = dv / ds, rem = dv - (quot * ds);
  2632. if (~quot&1) rem -= ds;
  2633. *dl = quot;
  2634. return rem;
  2635. } else {
  2636. // bad case: use the sh2 algo to get the right result
  2637. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2638. int q = 0, t = 0, s = 32;
  2639. while (s--) {
  2640. uint64_t v = dv>>63;
  2641. dv = (dv<<1) | t;
  2642. t = v;
  2643. v = dv;
  2644. if (q) dv += ((uint64_t)ds << 32), q = dv < v;
  2645. else dv -= ((uint64_t)ds << 32), q = !(dv < v);
  2646. q ^= t, t = !q;
  2647. }
  2648. *dl = (dv<<1) | t;
  2649. return (dv>>32);
  2650. }
  2651. }
  2652. static uint32_t REGPARM(2) sh2_drc_divs32(int32_t dv, int32_t ds)
  2653. {
  2654. uint32_t adv = abs(dv), ads = abs(ds)>>16;
  2655. if (ads > 1 && ads > adv>>16 && (int32_t)ads > 0 && !(uint16_t)ds) {
  2656. // good case: no divide by 0, and no result overflow
  2657. uint32_t quot = adv / ads, rem = adv - (quot * ads);
  2658. int m1 = (rem ? dv^ds : ds) < 0;
  2659. if (rem && dv < 0) rem = (quot&1 ? -rem : +ads-rem);
  2660. else rem = (quot&1 ? +rem : -ads+rem);
  2661. quot = ((dv^ds)<0 ? -quot : +quot) - m1;
  2662. return (uint16_t)quot | ((2*rem + (quot>>31)) << 16);
  2663. } else {
  2664. // bad case: use the sh2 algo to get the right result
  2665. int m = (uint32_t)ds>>31, q = (uint32_t)dv>>31, t = m^q, s = 16;
  2666. while (s--) {
  2667. uint32_t v = (uint32_t)dv>>31;
  2668. dv = (dv<<1) | t;
  2669. t = v;
  2670. v = dv;
  2671. if (m^q) dv += ds, q = (uint32_t)dv < v;
  2672. else dv -= ds, q = !((uint32_t)dv < v);
  2673. q ^= m^t, t = !(m^q);
  2674. }
  2675. return (dv<<1) | t;
  2676. }
  2677. }
  2678. static uint32_t REGPARM(3) sh2_drc_divs64(int32_t dh, uint32_t *dl, int32_t ds)
  2679. {
  2680. int64_t _dv = *dl | ((int64_t)dh << 32);
  2681. uint64_t adv = (_dv < 0 ? -_dv : _dv); // llabs isn't in older toolchains
  2682. uint32_t ads = abs(ds);
  2683. if (ads > 1 && ads > adv>>32 && (int64_t)adv > 0) {
  2684. // good case: no divide by 0, and no result overflow
  2685. uint32_t quot = adv / ads, rem = adv - ((uint64_t)quot * ads);
  2686. int m1 = (rem ? dh^ds : ds) < 0;
  2687. if (rem && dh < 0) rem = (quot&1 ? -rem : +ads-rem);
  2688. else rem = (quot&1 ? +rem : -ads+rem);
  2689. quot = ((dh^ds)<0 ? -quot : +quot) - m1;
  2690. *dl = quot;
  2691. return rem;
  2692. } else {
  2693. // bad case: use the sh2 algo to get the right result
  2694. uint64_t dv = *dl | ((uint64_t)dh << 32);
  2695. int m = (uint32_t)ds>>31, q = (uint64_t)dv>>63, t = m^q, s = 32;
  2696. while (s--) {
  2697. int64_t v = (uint64_t)dv>>63;
  2698. dv = (dv<<1) | t;
  2699. t = v;
  2700. v = dv;
  2701. if (m^q) dv += ((uint64_t)ds << 32), q = dv < v;
  2702. else dv -= ((uint64_t)ds << 32), q = !(dv < v);
  2703. q ^= m^t, t = !(m^q);
  2704. }
  2705. *dl = (dv<<1) | t;
  2706. return (dv>>32);
  2707. }
  2708. }
  2709. #endif
  2710. // block local link stuff
  2711. struct linkage {
  2712. u32 pc;
  2713. void *ptr;
  2714. struct block_link *bl;
  2715. u32 mask;
  2716. };
  2717. static inline int find_in_linkage(const struct linkage *array, int size, u32 pc)
  2718. {
  2719. size_t i;
  2720. for (i = 0; i < size; i++)
  2721. if (pc == array[i].pc)
  2722. return i;
  2723. return -1;
  2724. }
  2725. static int find_in_sorted_linkage(const struct linkage *array, int size, u32 pc)
  2726. {
  2727. // binary search in sorted array
  2728. int left = 0, right = size-1;
  2729. while (left <= right)
  2730. {
  2731. int middle = (left + right) / 2;
  2732. if (array[middle].pc == pc)
  2733. return middle;
  2734. else if (array[middle].pc < pc)
  2735. left = middle + 1;
  2736. else
  2737. right = middle - 1;
  2738. }
  2739. return -1;
  2740. }
  2741. static void emit_branch_linkage_code(SH2 *sh2, struct block_desc *block, int tcache_id,
  2742. const struct linkage *targets, int target_count,
  2743. const struct linkage *links, int link_count)
  2744. {
  2745. struct block_link *bl;
  2746. int u, v, tmp;
  2747. emith_flush();
  2748. for (u = 0; u < link_count; u++) {
  2749. emith_pool_check();
  2750. // look up local branch targets
  2751. if (links[u].mask & 0x2) {
  2752. v = find_in_sorted_linkage(targets, target_count, links[u].pc);
  2753. if (v < 0 || ! targets[v].ptr) {
  2754. // forward branch not yet resolved, prepare external linking
  2755. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2756. bl = dr_prepare_ext_branch(block->entryp, links[u].pc, sh2->is_slave, tcache_id);
  2757. if (bl)
  2758. bl->type = BL_LDJMP;
  2759. tmp = rcache_get_tmp_arg(0);
  2760. emith_move_r_imm(tmp, links[u].pc);
  2761. rcache_free_tmp(tmp);
  2762. emith_jump_patchable(sh2_drc_dispatcher);
  2763. } else if (emith_jump_patch_inrange(links[u].ptr, targets[v].ptr)) {
  2764. // inrange local branch
  2765. emith_jump_patch(links[u].ptr, targets[v].ptr, NULL);
  2766. } else {
  2767. // far local branch
  2768. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2769. emith_jump(targets[v].ptr);
  2770. }
  2771. } else {
  2772. // external or exit, emit blx area entry
  2773. void *target = (links[u].mask & 0x1 ? sh2_drc_exit : sh2_drc_dispatcher);
  2774. if (links[u].bl)
  2775. links[u].bl->blx = tcache_ptr;
  2776. emith_jump_patch(links[u].ptr, tcache_ptr, NULL);
  2777. tmp = rcache_get_tmp_arg(0);
  2778. emith_move_r_imm(tmp, links[u].pc & ~1);
  2779. rcache_free_tmp(tmp);
  2780. emith_jump(target);
  2781. }
  2782. }
  2783. }
  2784. #define DELAY_SAVE_T(sr) { \
  2785. int t_ = rcache_get_tmp(); \
  2786. emith_bic_r_imm(sr, T_save); \
  2787. emith_and_r_r_imm(t_, sr, 1); \
  2788. emith_or_r_r_lsl(sr, t_, T_SHIFT); \
  2789. rcache_free_tmp(t_); \
  2790. }
  2791. #define FLUSH_CYCLES(sr) \
  2792. if (cycles > 0) { \
  2793. emith_sub_r_imm(sr, cycles << 12); \
  2794. cycles = 0; \
  2795. }
  2796. static void *dr_get_pc_base(u32 pc, SH2 *sh2);
  2797. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2798. {
  2799. // branch targets in current block
  2800. static struct linkage branch_targets[MAX_LOCAL_TARGETS];
  2801. int branch_target_count = 0;
  2802. // unresolved local or external targets with block link/exit area if needed
  2803. static struct linkage blx_targets[MAX_LOCAL_BRANCHES];
  2804. int blx_target_count = 0;
  2805. static u8 op_flags[BLOCK_INSN_LIMIT];
  2806. enum flg_states { FLG_UNKNOWN, FLG_UNUSED, FLG_0, FLG_1 };
  2807. struct drcf {
  2808. int delay_reg:8;
  2809. u32 loop_type:8;
  2810. u32 polling:8;
  2811. u32 pinning:1;
  2812. u32 test_irq:1;
  2813. u32 pending_branch_direct:1;
  2814. u32 pending_branch_indirect:1;
  2815. u32 Tflag:2, Mflag:2;
  2816. } drcf = { 0, };
  2817. #if LOOP_OPTIMIZER
  2818. // loops with pinned registers for optimzation
  2819. // pinned regs are like statics and don't need saving/restoring inside a loop
  2820. static struct linkage pinned_loops[MAX_LOCAL_TARGETS/16];
  2821. int pinned_loop_count = 0;
  2822. #endif
  2823. // PC of current, first, last SH2 insn
  2824. u32 pc, base_pc, end_pc;
  2825. u32 base_literals, end_literals;
  2826. u8 *block_entry_ptr;
  2827. struct block_desc *block;
  2828. struct block_entry *entry;
  2829. struct block_link *bl;
  2830. u16 *dr_pc_base;
  2831. struct op_data *opd;
  2832. int blkid_main = 0;
  2833. int skip_op = 0;
  2834. int tmp, tmp2;
  2835. int cycles;
  2836. int i, v;
  2837. u32 u, m1, m2, m3, m4;
  2838. int op;
  2839. u16 crc;
  2840. base_pc = sh2->pc;
  2841. // get base/validate PC
  2842. dr_pc_base = dr_get_pc_base(base_pc, sh2);
  2843. if (dr_pc_base == (void *)-1) {
  2844. printf("invalid PC, aborting: %08lx\n", (long)base_pc);
  2845. // FIXME: be less destructive
  2846. exit(1);
  2847. }
  2848. // initial passes to disassemble and analyze the block
  2849. crc = scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2850. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2851. if (base_literals == end_literals) // map empty lit section to end of code
  2852. base_literals = end_literals = end_pc;
  2853. // if there is already a translated but inactive block, reuse it
  2854. block = dr_find_inactive_block(tcache_id, crc, base_pc, end_pc - base_pc,
  2855. base_literals, end_literals - base_literals);
  2856. if (block) {
  2857. dbg(2, "== %csh2 reuse block %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2858. base_pc, end_pc, base_literals, end_literals, block->entryp->tcache_ptr);
  2859. dr_activate_block(block, tcache_id, sh2->is_slave);
  2860. emith_update_cache();
  2861. return block->entryp[0].tcache_ptr;
  2862. }
  2863. // collect branch_targets that don't land on delay slots
  2864. m1 = m2 = m3 = m4 = v = op = 0;
  2865. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2866. if (op_flags[i] & OF_DELAY_OP)
  2867. op_flags[i] &= ~OF_BTARGET;
  2868. if (op_flags[i] & OF_BTARGET) {
  2869. if (branch_target_count < ARRAY_SIZE(branch_targets))
  2870. branch_targets[branch_target_count++] = (struct linkage) { .pc = pc };
  2871. else {
  2872. printf("warning: linkage overflow\n");
  2873. end_pc = pc;
  2874. break;
  2875. }
  2876. }
  2877. if (ops[i].op == OP_LDC && (ops[i].dest & BITMASK1(SHR_SR)) && pc+2 < end_pc)
  2878. op_flags[i+1] |= OF_BTARGET; // RTE entrypoint in case of SR.IMASK change
  2879. // unify T and SR since rcache doesn't know about "virtual" guest regs
  2880. if (ops[i].source & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2881. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].source |= BITMASK1(SHR_SR);
  2882. if (ops[i].dest & BITMASK1(SHR_T)) ops[i].dest |= BITMASK1(SHR_SR);
  2883. #if LOOP_DETECTION
  2884. // loop types detected:
  2885. // 1. target: ... BRA target -> idle loop
  2886. // 2. target: ... delay insn ... BF target -> delay loop
  2887. // 3. target: ... poll insn ... BF/BT target -> poll loop
  2888. // 4. target: ... poll insn ... BF/BT exit ... BRA target, exit: -> poll
  2889. // conditions:
  2890. // a. no further branch targets between target and back jump.
  2891. // b. no unconditional branch insn inside the loop.
  2892. // c. exactly one poll or delay insn is allowed inside a delay/poll loop
  2893. // (scan_block marks loops only if they meet conditions a through c)
  2894. // d. idle loops do not modify anything but PC,SR and contain no branches
  2895. // e. delay/poll loops do not modify anything but the concerned reg,PC,SR
  2896. // f. loading constants into registers inside the loop is allowed
  2897. // g. a delay/poll loop must have a conditional branch somewhere
  2898. // h. an idle loop must not have a conditional branch
  2899. if (op_flags[i] & OF_BTARGET) {
  2900. // possible loop entry point
  2901. drcf.loop_type = op_flags[i] & OF_LOOP;
  2902. drcf.pending_branch_direct = drcf.pending_branch_indirect = 0;
  2903. op = OF_IDLE_LOOP; // loop type
  2904. v = i;
  2905. m1 = m2 = m3 = m4 = 0;
  2906. if (!drcf.loop_type) // reset basic loop it it isn't recognized as loop
  2907. op_flags[i] &= ~OF_BASIC_LOOP;
  2908. }
  2909. if (drcf.loop_type) {
  2910. // calculate reg masks for loop pinning
  2911. m4 |= ops[i].source & ~m3;
  2912. m3 |= ops[i].dest;
  2913. // detect loop type, and store poll/delay register
  2914. if (op_flags[i] & OF_POLL_INSN) {
  2915. op = OF_POLL_LOOP;
  2916. m1 |= ops[i].dest; // loop poll/delay regs
  2917. } else if (op_flags[i] & OF_DELAY_INSN) {
  2918. op = OF_DELAY_LOOP;
  2919. m1 |= ops[i].dest;
  2920. } else if (ops[i].op != OP_LOAD_POOL && ops[i].op != OP_LOAD_CONST
  2921. && (ops[i].op != OP_MOVE || op != OF_POLL_LOOP)) {
  2922. // not (MOV @(PC) or MOV # or (MOV reg and poll)), condition f
  2923. m2 |= ops[i].dest; // regs modified by other insns
  2924. }
  2925. // branch detector
  2926. if (OP_ISBRAIMM(ops[i].op)) {
  2927. if (ops[i].imm == base_pc + 2*v)
  2928. drcf.pending_branch_direct = 1; // backward branch detected
  2929. else
  2930. op_flags[v] &= ~OF_BASIC_LOOP; // no basic loop
  2931. }
  2932. if (OP_ISBRACND(ops[i].op))
  2933. drcf.pending_branch_indirect = 1; // conditions g,h - cond.branch
  2934. // poll/idle loops terminate with their backwards branch to the loop start
  2935. if (drcf.pending_branch_direct && !(op_flags[i+1] & OF_DELAY_OP)) {
  2936. m2 &= ~(m1 | BITMASK3(SHR_PC, SHR_SR, SHR_T)); // conditions d,e + g,h
  2937. if (m2 || ((op == OF_IDLE_LOOP) == (drcf.pending_branch_indirect)))
  2938. op = 0; // conditions not met
  2939. op_flags[v] = (op_flags[v] & ~OF_LOOP) | op; // set loop type
  2940. drcf.loop_type = 0;
  2941. #if LOOP_OPTIMIZER
  2942. if (op_flags[v] & OF_BASIC_LOOP) {
  2943. m3 &= ~rcache_regs_static & ~BITMASK5(SHR_PC, SHR_PR, SHR_SR, SHR_T, SHR_MEM);
  2944. if (m3 && count_bits(m3) < count_bits(rcache_vregs_reg) &&
  2945. pinned_loop_count < ARRAY_SIZE(pinned_loops)-1) {
  2946. pinned_loops[pinned_loop_count++] =
  2947. (struct linkage) { .pc = base_pc + 2*v, .mask = m3 };
  2948. } else
  2949. op_flags[v] &= ~OF_BASIC_LOOP;
  2950. }
  2951. #endif
  2952. }
  2953. }
  2954. #endif
  2955. }
  2956. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2, branch_target_count);
  2957. #if (DRC_DEBUG & 4)
  2958. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  2959. #endif
  2960. block = dr_add_block(branch_target_count, base_pc, end_pc - base_pc,
  2961. base_literals, end_literals-base_literals, crc, sh2->is_slave, &blkid_main);
  2962. if (block == NULL)
  2963. return NULL;
  2964. block_entry_ptr = tcache_ptr;
  2965. dbg(2, "== %csh2 block #%d,%d %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2966. tcache_id, blkid_main, base_pc, end_pc, base_literals, end_literals, block_entry_ptr);
  2967. // clear stale state after compile errors
  2968. rcache_invalidate();
  2969. emith_invalidate_t();
  2970. drcf = (struct drcf) { 0 };
  2971. #if LOOP_OPTIMIZER
  2972. pinned_loops[pinned_loop_count].pc = -1;
  2973. pinned_loop_count = 0;
  2974. #endif
  2975. // -------------------------------------------------
  2976. // 3rd pass: actual compilation
  2977. pc = base_pc;
  2978. cycles = 0;
  2979. for (i = 0; pc < end_pc; i++)
  2980. {
  2981. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  2982. int tmp3, tmp4;
  2983. int sr;
  2984. if (op_flags[i] & OF_BTARGET)
  2985. {
  2986. if (pc != base_pc)
  2987. {
  2988. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2989. FLUSH_CYCLES(sr);
  2990. emith_sync_t(sr);
  2991. drcf.Mflag = FLG_UNKNOWN;
  2992. rcache_flush();
  2993. emith_flush();
  2994. }
  2995. // make block entry
  2996. v = block->entry_count;
  2997. entry = &block->entryp[v];
  2998. if (v < branch_target_count)
  2999. {
  3000. entry = &block->entryp[v];
  3001. entry->pc = pc;
  3002. entry->tcache_ptr = tcache_ptr;
  3003. entry->links = entry->o_links = NULL;
  3004. #if (DRC_DEBUG & 2)
  3005. entry->block = block;
  3006. #endif
  3007. block->entry_count++;
  3008. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  3009. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  3010. pc, tcache_ptr);
  3011. }
  3012. else {
  3013. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  3014. tcache_id, blkid_main, pc);
  3015. break;
  3016. }
  3017. v = find_in_sorted_linkage(branch_targets, branch_target_count, pc);
  3018. if (v >= 0)
  3019. branch_targets[v].ptr = tcache_ptr;
  3020. #if LOOP_DETECTION
  3021. drcf.loop_type = op_flags[i] & OF_LOOP;
  3022. drcf.delay_reg = -1;
  3023. drcf.polling = (drcf.loop_type == OF_POLL_LOOP ? MF_POLLING : 0);
  3024. #endif
  3025. rcache_clean();
  3026. #if (DRC_DEBUG & 0x10)
  3027. tmp = rcache_get_tmp_arg(0);
  3028. emith_move_r_imm(tmp, pc);
  3029. tmp = emit_memhandler_read(1);
  3030. tmp2 = rcache_get_tmp();
  3031. tmp3 = rcache_get_tmp();
  3032. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  3033. emith_move_r_imm(tmp3, 0);
  3034. emith_cmp_r_r(tmp, tmp2);
  3035. EMITH_SJMP_START(DCOND_EQ);
  3036. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  3037. EMITH_SJMP_END(DCOND_EQ);
  3038. rcache_free_tmp(tmp);
  3039. rcache_free_tmp(tmp2);
  3040. rcache_free_tmp(tmp3);
  3041. #endif
  3042. // check cycles
  3043. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3044. #if LOOP_OPTIMIZER
  3045. if (op_flags[i] & OF_BASIC_LOOP) {
  3046. if (pinned_loops[pinned_loop_count].pc == pc) {
  3047. // pin needed regs on loop entry
  3048. FOR_ALL_BITS_SET_DO(pinned_loops[pinned_loop_count].mask, v, rcache_pin_reg(v));
  3049. emith_flush();
  3050. // store current PC as loop target
  3051. pinned_loops[pinned_loop_count].ptr = tcache_ptr;
  3052. drcf.pinning = 1;
  3053. } else
  3054. op_flags[i] &= ~OF_BASIC_LOOP;
  3055. }
  3056. if (op_flags[i] & OF_BASIC_LOOP) {
  3057. // if exiting a pinned loop pinned regs must be written back to ctx
  3058. // since they are reloaded in the loop entry code
  3059. emith_cmp_r_imm(sr, 0);
  3060. EMITH_JMP_START(DCOND_GT);
  3061. rcache_save_pinned();
  3062. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3063. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3064. blx_targets[blx_target_count++] =
  3065. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3066. emith_jump_patchable(tcache_ptr);
  3067. } else {
  3068. // blx table full, must inline exit code
  3069. tmp = rcache_get_tmp_arg(0);
  3070. emith_move_r_imm(tmp, pc);
  3071. emith_jump(sh2_drc_exit);
  3072. rcache_free_tmp(tmp);
  3073. }
  3074. EMITH_JMP_END(DCOND_GT);
  3075. } else
  3076. #endif
  3077. {
  3078. if (blx_target_count < ARRAY_SIZE(blx_targets)) {
  3079. // exit via stub in blx table (saves some 1-3 insns in the main flow)
  3080. emith_cmp_r_imm(sr, 0);
  3081. blx_targets[blx_target_count++] =
  3082. (struct linkage) { .pc = pc, .ptr = tcache_ptr, .mask = 0x1 };
  3083. emith_jump_cond_patchable(DCOND_LE, tcache_ptr);
  3084. } else {
  3085. // blx table full, must inline exit code
  3086. tmp = rcache_get_tmp_arg(0);
  3087. emith_cmp_r_imm(sr, 0);
  3088. EMITH_SJMP_START(DCOND_GT);
  3089. emith_move_r_imm_c(DCOND_LE, tmp, pc);
  3090. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  3091. EMITH_SJMP_END(DCOND_GT);
  3092. rcache_free_tmp(tmp);
  3093. }
  3094. }
  3095. #if (DRC_DEBUG & 32)
  3096. // block hit counter
  3097. tmp = rcache_get_tmp_arg(0);
  3098. tmp2 = rcache_get_tmp_arg(1);
  3099. emith_move_r_ptr_imm(tmp, (uptr)entry);
  3100. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3101. emith_add_r_imm(tmp2, 1);
  3102. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  3103. rcache_free_tmp(tmp);
  3104. rcache_free_tmp(tmp2);
  3105. #endif
  3106. #if (DRC_DEBUG & (8|256|512|1024))
  3107. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3108. emith_sync_t(sr);
  3109. rcache_clean();
  3110. tmp = rcache_used_hregs_mask();
  3111. emith_save_caller_regs(tmp);
  3112. emit_do_static_regs(1, 0);
  3113. rcache_get_reg_arg(2, SHR_SR, NULL);
  3114. tmp2 = rcache_get_tmp_arg(0);
  3115. tmp3 = rcache_get_tmp_arg(1);
  3116. tmp4 = rcache_get_tmp();
  3117. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  3118. emith_move_r_r_ptr(tmp3, CONTEXT_REG);
  3119. emith_move_r_imm(tmp4, pc);
  3120. emith_ctx_write(tmp4, SHR_PC * 4);
  3121. rcache_invalidate_tmp();
  3122. emith_abicall(sh2_drc_log_entry);
  3123. emith_restore_caller_regs(tmp);
  3124. #endif
  3125. do_host_disasm(tcache_id);
  3126. rcache_unlock_all();
  3127. }
  3128. #ifdef DRC_CMP
  3129. if (!(op_flags[i] & OF_DELAY_OP)) {
  3130. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3131. FLUSH_CYCLES(sr);
  3132. emith_sync_t(sr);
  3133. emit_move_r_imm32(SHR_PC, pc);
  3134. rcache_clean();
  3135. tmp = rcache_used_hregs_mask();
  3136. emith_save_caller_regs(tmp);
  3137. emit_do_static_regs(1, 0);
  3138. emith_pass_arg_r(0, CONTEXT_REG);
  3139. emith_abicall(do_sh2_cmp);
  3140. emith_restore_caller_regs(tmp);
  3141. }
  3142. #endif
  3143. // emit blx area if limits are approached
  3144. if (blx_target_count && (blx_target_count > ARRAY_SIZE(blx_targets)-4 ||
  3145. !emith_jump_patch_inrange(blx_targets[0].ptr, tcache_ptr+0x100))) {
  3146. u8 *jp;
  3147. rcache_invalidate_tmp();
  3148. jp = tcache_ptr;
  3149. emith_jump_patchable(tcache_ptr);
  3150. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  3151. branch_target_count, blx_targets, blx_target_count);
  3152. blx_target_count = 0;
  3153. do_host_disasm(tcache_id);
  3154. emith_jump_patch(jp, tcache_ptr, NULL);
  3155. }
  3156. emith_pool_check();
  3157. opd = &ops[i];
  3158. op = FETCH_OP(pc);
  3159. #if (DRC_DEBUG & 4)
  3160. DasmSH2(sh2dasm_buff, pc, op);
  3161. if (op_flags[i] & OF_BTARGET) {
  3162. if ((op_flags[i] & OF_LOOP) == OF_DELAY_LOOP) tmp3 = '+';
  3163. else if ((op_flags[i] & OF_LOOP) == OF_POLL_LOOP) tmp3 = '=';
  3164. else if ((op_flags[i] & OF_LOOP) == OF_IDLE_LOOP) tmp3 = '~';
  3165. else tmp3 = '*';
  3166. } else if (drcf.loop_type) tmp3 = '.';
  3167. else tmp3 = ' ';
  3168. printf("%c%08x %04x %s\n", tmp3, pc, op, sh2dasm_buff);
  3169. #endif
  3170. pc += 2;
  3171. #if (DRC_DEBUG & 2)
  3172. insns_compiled++;
  3173. #endif
  3174. if (skip_op > 0) {
  3175. skip_op--;
  3176. continue;
  3177. }
  3178. if (op_flags[i] & OF_DELAY_OP)
  3179. {
  3180. // handle delay slot dependencies
  3181. delay_dep_fw = opd->dest & ops[i-1].source;
  3182. delay_dep_bk = opd->source & ops[i-1].dest;
  3183. if (delay_dep_fw & BITMASK1(SHR_T)) {
  3184. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3185. emith_sync_t(sr);
  3186. DELAY_SAVE_T(sr);
  3187. }
  3188. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  3189. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  3190. // can only be those 2 really..
  3191. elprintf_sh2(sh2, EL_ANOMALY,
  3192. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  3193. }
  3194. // store PC for MOVA/MOV @PC address calculation
  3195. if (opd->imm != 0)
  3196. ; // case OP_BRANCH - addr already resolved in scan_block
  3197. else {
  3198. switch (ops[i-1].op) {
  3199. case OP_BRANCH:
  3200. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  3201. break;
  3202. case OP_BRANCH_CT:
  3203. case OP_BRANCH_CF:
  3204. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3205. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3206. emith_move_r_imm(tmp, pc);
  3207. tmp2 = emith_tst_t(sr, (ops[i-1].op == OP_BRANCH_CT));
  3208. tmp3 = emith_invert_cond(tmp2);
  3209. EMITH_SJMP_START(tmp3);
  3210. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  3211. EMITH_SJMP_END(tmp3);
  3212. break;
  3213. case OP_BRANCH_N: // BT/BF known not to be taken
  3214. // XXX could modify opd->imm instead?
  3215. emit_move_r_imm32(SHR_PC, pc);
  3216. break;
  3217. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  3218. }
  3219. }
  3220. }
  3221. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  3222. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  3223. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  3224. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  3225. }
  3226. // inform cache about future register usage
  3227. u32 late = 0; // regs read by future ops
  3228. u32 write = 0; // regs written to (to detect write before read)
  3229. u32 soon = 0; // regs read soon
  3230. for (v = 1; v <= 9; v++) {
  3231. // no sense in looking any further than the next rcache flush
  3232. tmp = ((op_flags[i+v] & OF_BTARGET) || (op_flags[i+v-1] & OF_DELAY_OP) ||
  3233. (OP_ISBRACND(opd[v-1].op) && !(op_flags[i+v] & OF_DELAY_OP)));
  3234. // XXX looking behind cond branch to avoid evicting regs used later?
  3235. if (pc + 2*v <= end_pc && !tmp) { // (pc already incremented above)
  3236. late |= opd[v].source & ~write;
  3237. // ignore source regs after they have been written to
  3238. write |= opd[v].dest;
  3239. // regs needed in the next few instructions
  3240. if (v <= 4)
  3241. soon = late;
  3242. } else
  3243. break;
  3244. }
  3245. rcache_set_usage_now(opd[0].source); // current insn
  3246. rcache_set_usage_soon(soon); // insns 1-4
  3247. rcache_set_usage_late(late & ~soon); // insns 5-9
  3248. rcache_set_usage_discard(write & ~(late|soon));
  3249. if (v <= 9)
  3250. // upcoming rcache_flush, start writing back unused dirty stuff
  3251. rcache_clean_masked(rcache_dirty_mask() & ~(write|opd[0].dest));
  3252. switch (opd->op)
  3253. {
  3254. case OP_BRANCH_N:
  3255. // never taken, just use up cycles
  3256. goto end_op;
  3257. case OP_BRANCH:
  3258. case OP_BRANCH_CT:
  3259. case OP_BRANCH_CF:
  3260. if (opd->dest & BITMASK1(SHR_PR))
  3261. emit_move_r_imm32(SHR_PR, pc + 2);
  3262. drcf.pending_branch_direct = 1;
  3263. goto end_op;
  3264. case OP_BRANCH_R:
  3265. if (opd->dest & BITMASK1(SHR_PR))
  3266. emit_move_r_imm32(SHR_PR, pc + 2);
  3267. emit_move_r_r(SHR_PC, opd->rm);
  3268. drcf.pending_branch_indirect = 1;
  3269. goto end_op;
  3270. case OP_BRANCH_RF:
  3271. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3272. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  3273. emith_move_r_imm(tmp, pc + 2);
  3274. if (opd->dest & BITMASK1(SHR_PR)) {
  3275. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  3276. emith_move_r_r(tmp3, tmp);
  3277. }
  3278. emith_add_r_r(tmp, tmp2);
  3279. if (gconst_get(GET_Rn(), &u))
  3280. gconst_set(SHR_PC, pc + 2 + u);
  3281. drcf.pending_branch_indirect = 1;
  3282. goto end_op;
  3283. case OP_SLEEP: // SLEEP 0000000000011011
  3284. printf("TODO sleep\n");
  3285. goto end_op;
  3286. case OP_RTE: // RTE 0000000000101011
  3287. emith_invalidate_t();
  3288. // pop PC
  3289. tmp = emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  3290. rcache_free(tmp);
  3291. // pop SR
  3292. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  3293. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3294. emith_write_sr(sr, tmp);
  3295. rcache_free_tmp(tmp);
  3296. drcf.test_irq = 1;
  3297. drcf.pending_branch_indirect = 1;
  3298. goto end_op;
  3299. case OP_UNDEFINED:
  3300. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  3301. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  3302. // fallthrough
  3303. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  3304. // push SR
  3305. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  3306. emith_sync_t(tmp2);
  3307. emith_clear_msb(tmp, tmp2, 22);
  3308. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3309. // push PC
  3310. if (opd->op == OP_TRAPA) {
  3311. tmp = rcache_get_tmp_arg(1);
  3312. emith_move_r_imm(tmp, pc);
  3313. } else if (drcf.pending_branch_indirect) {
  3314. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  3315. } else {
  3316. tmp = rcache_get_tmp_arg(1);
  3317. emith_move_r_imm(tmp, pc - 2);
  3318. }
  3319. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  3320. // obtain new PC
  3321. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  3322. // indirect jump -> back to dispatcher
  3323. drcf.pending_branch_indirect = 1;
  3324. goto end_op;
  3325. case OP_LOAD_POOL:
  3326. #if PROPAGATE_CONSTANTS
  3327. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  3328. dr_is_rom(opd->imm))
  3329. {
  3330. if (opd->size == 2)
  3331. u = FETCH32(opd->imm);
  3332. else
  3333. u = (s16)FETCH_OP(opd->imm);
  3334. // tweak for Blackthorne: avoid stack overwriting
  3335. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  3336. gconst_new(GET_Rn(), u);
  3337. }
  3338. else
  3339. #endif
  3340. {
  3341. if (opd->imm != 0) {
  3342. tmp = rcache_get_tmp_arg(0);
  3343. emith_move_r_imm(tmp, opd->imm);
  3344. } else {
  3345. // have to calculate read addr from PC for delay slot
  3346. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  3347. if (opd->size == 2) {
  3348. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3349. emith_bic_r_imm(tmp, 3);
  3350. }
  3351. else
  3352. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  3353. }
  3354. tmp2 = emit_memhandler_read(opd->size);
  3355. tmp3 = rcache_map_reg(GET_Rn(), tmp2);
  3356. if (tmp3 != tmp2) {
  3357. emith_move_r_r(tmp3, tmp2);
  3358. rcache_free_tmp(tmp2);
  3359. }
  3360. }
  3361. goto end_op;
  3362. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  3363. if (opd->imm != 0)
  3364. emit_move_r_imm32(SHR_R0, opd->imm);
  3365. else {
  3366. // have to calculate addr from PC for delay slot
  3367. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  3368. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  3369. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  3370. emith_bic_r_imm(tmp, 3);
  3371. }
  3372. goto end_op;
  3373. }
  3374. switch ((op >> 12) & 0x0f)
  3375. {
  3376. /////////////////////////////////////////////
  3377. case 0x00:
  3378. switch (op & 0x0f)
  3379. {
  3380. case 0x02:
  3381. switch (GET_Fx())
  3382. {
  3383. case 0: // STC SR,Rn 0000nnnn00000010
  3384. tmp2 = SHR_SR;
  3385. break;
  3386. case 1: // STC GBR,Rn 0000nnnn00010010
  3387. tmp2 = SHR_GBR;
  3388. break;
  3389. case 2: // STC VBR,Rn 0000nnnn00100010
  3390. tmp2 = SHR_VBR;
  3391. break;
  3392. default:
  3393. goto default_;
  3394. }
  3395. if (tmp2 == SHR_SR) {
  3396. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3397. emith_sync_t(sr);
  3398. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3399. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  3400. } else
  3401. emit_move_r_r(GET_Rn(), tmp2);
  3402. goto end_op;
  3403. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  3404. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  3405. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  3406. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  3407. goto end_op;
  3408. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  3409. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3410. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3411. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3412. emith_mul(tmp3, tmp2, tmp);
  3413. goto end_op;
  3414. case 0x08:
  3415. switch (GET_Fx())
  3416. {
  3417. case 0: // CLRT 0000000000001000
  3418. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3419. #if T_OPTIMIZER
  3420. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3421. #endif
  3422. emith_set_t(sr, 0);
  3423. break;
  3424. case 1: // SETT 0000000000011000
  3425. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3426. #if T_OPTIMIZER
  3427. if (~rcache_regs_discard & BITMASK1(SHR_T))
  3428. #endif
  3429. emith_set_t(sr, 1);
  3430. break;
  3431. case 2: // CLRMAC 0000000000101000
  3432. emit_move_r_imm32(SHR_MACL, 0);
  3433. emit_move_r_imm32(SHR_MACH, 0);
  3434. break;
  3435. default:
  3436. goto default_;
  3437. }
  3438. goto end_op;
  3439. case 0x09:
  3440. switch (GET_Fx())
  3441. {
  3442. case 0: // NOP 0000000000001001
  3443. break;
  3444. case 1: // DIV0U 0000000000011001
  3445. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3446. emith_invalidate_t();
  3447. emith_bic_r_imm(sr, M|Q|T);
  3448. drcf.Mflag = FLG_0;
  3449. #if DIV_OPTIMIZER
  3450. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3451. // divide 32/16
  3452. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3453. rcache_get_reg_arg(1, div(opd).rm, NULL);
  3454. rcache_invalidate_tmp();
  3455. emith_abicall(sh2_drc_divu32);
  3456. tmp = rcache_get_tmp_ret();
  3457. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3458. if (tmp != tmp2)
  3459. emith_move_r_r(tmp2, tmp);
  3460. tmp3 = rcache_get_tmp();
  3461. emith_and_r_r_imm(tmp3, tmp2, 1); // Q = !Rn[0]
  3462. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3463. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3464. rcache_free_tmp(tmp3);
  3465. emith_or_r_r_r_lsr(sr, sr, tmp2, 31); // T = Rn[31]
  3466. skip_op = div(opd).div1 + div(opd).rotcl;
  3467. }
  3468. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3469. // divide 64/32
  3470. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3471. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3472. tmp = rcache_get_tmp_arg(1);
  3473. emith_add_r_r_ptr_imm(tmp, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3474. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3475. rcache_get_reg_arg(2, div(opd).rm, NULL);
  3476. rcache_invalidate_tmp();
  3477. emith_abicall(sh2_drc_divu64);
  3478. tmp = rcache_get_tmp_ret();
  3479. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3480. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3481. if (tmp != tmp2)
  3482. emith_move_r_r(tmp2, tmp);
  3483. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3484. tmp3 = rcache_get_tmp();
  3485. emith_and_r_r_imm(tmp3, tmp4, 1); // Q = !Ro[0]
  3486. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3487. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT);
  3488. rcache_free_tmp(tmp3);
  3489. emith_or_r_r_r_lsr(sr, sr, tmp4, 31); // T = Ro[31]
  3490. skip_op = div(opd).div1 + div(opd).rotcl;
  3491. }
  3492. #endif
  3493. break;
  3494. case 2: // MOVT Rn 0000nnnn00101001
  3495. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3496. emith_sync_t(sr);
  3497. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3498. emith_clear_msb(tmp2, sr, 31);
  3499. break;
  3500. default:
  3501. goto default_;
  3502. }
  3503. goto end_op;
  3504. case 0x0a:
  3505. switch (GET_Fx())
  3506. {
  3507. case 0: // STS MACH,Rn 0000nnnn00001010
  3508. tmp2 = SHR_MACH;
  3509. break;
  3510. case 1: // STS MACL,Rn 0000nnnn00011010
  3511. tmp2 = SHR_MACL;
  3512. break;
  3513. case 2: // STS PR,Rn 0000nnnn00101010
  3514. tmp2 = SHR_PR;
  3515. break;
  3516. default:
  3517. goto default_;
  3518. }
  3519. emit_move_r_r(GET_Rn(), tmp2);
  3520. goto end_op;
  3521. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  3522. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  3523. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  3524. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), (op & 3) | drcf.polling);
  3525. goto end_op;
  3526. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  3527. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  3528. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3529. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3530. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3531. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  3532. rcache_free_tmp(tmp2);
  3533. rcache_free_tmp(tmp);
  3534. goto end_op;
  3535. }
  3536. goto default_;
  3537. /////////////////////////////////////////////
  3538. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  3539. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  3540. goto end_op;
  3541. case 0x02:
  3542. switch (op & 0x0f)
  3543. {
  3544. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  3545. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  3546. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  3547. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  3548. goto end_op;
  3549. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  3550. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  3551. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  3552. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  3553. goto end_op;
  3554. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  3555. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3556. emith_invalidate_t();
  3557. emith_bic_r_imm(sr, M|Q|T);
  3558. drcf.Mflag = FLG_UNKNOWN;
  3559. #if DIV_OPTIMIZER
  3560. if (div(opd).div1 == 16 && div(opd).ro == div(opd).rn) {
  3561. // divide 32/16
  3562. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3563. tmp2 = rcache_get_reg_arg(1, div(opd).rm, NULL);
  3564. tmp3 = rcache_get_tmp();
  3565. emith_lsr(tmp3, tmp2, 31);
  3566. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3567. rcache_invalidate_tmp();
  3568. emith_abicall(sh2_drc_divs32);
  3569. tmp = rcache_get_tmp_ret();
  3570. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3571. if (tmp != tmp2)
  3572. emith_move_r_r(tmp2, tmp);
  3573. tmp3 = rcache_get_tmp();
  3574. emith_eor_r_r_r_lsr(tmp3, tmp2, sr, M_SHIFT);
  3575. emith_and_r_r_imm(tmp3, tmp3, 1);
  3576. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3577. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Rn[0]^M
  3578. rcache_free_tmp(tmp3);
  3579. emith_or_r_r_r_lsr(sr, sr, tmp2, 31); // T = Rn[31]
  3580. skip_op = div(opd).div1 + div(opd).rotcl;
  3581. }
  3582. else if (div(opd).div1 == 32 && div(opd).ro != div(opd).rn) {
  3583. // divide 64/32
  3584. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_READ, NULL);
  3585. emith_ctx_write(tmp4, offsetof(SH2, drc_tmp));
  3586. rcache_get_reg_arg(0, div(opd).rn, NULL);
  3587. tmp2 = rcache_get_reg_arg(2, div(opd).rm, NULL);
  3588. tmp3 = rcache_get_tmp_arg(1);
  3589. emith_lsr(tmp3, tmp2, 31);
  3590. emith_or_r_r_lsl(sr, tmp3, M_SHIFT); // M = Rm[31]
  3591. emith_add_r_r_ptr_imm(tmp3, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3592. rcache_invalidate_tmp();
  3593. emith_abicall(sh2_drc_divs64);
  3594. tmp = rcache_get_tmp_ret();
  3595. tmp2 = rcache_map_reg(div(opd).rn, tmp);
  3596. tmp4 = rcache_get_reg(div(opd).ro, RC_GR_WRITE, NULL);
  3597. if (tmp != tmp2)
  3598. emith_move_r_r(tmp2, tmp);
  3599. emith_ctx_read(tmp4, offsetof(SH2, drc_tmp));
  3600. tmp3 = rcache_get_tmp();
  3601. emith_eor_r_r_r_lsr(tmp3, tmp4, sr, M_SHIFT);
  3602. emith_and_r_r_imm(tmp3, tmp3, 1);
  3603. emith_eor_r_r_imm(tmp3, tmp3, 1);
  3604. emith_or_r_r_lsl(sr, tmp3, Q_SHIFT); // Q = !Ro[0]^M
  3605. rcache_free_tmp(tmp3);
  3606. emith_or_r_r_r_lsr(sr, sr, tmp4, 31); // T = Ro[31]
  3607. skip_op = div(opd).div1 + div(opd).rotcl;
  3608. } else
  3609. #endif
  3610. {
  3611. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3612. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3613. tmp = rcache_get_tmp();
  3614. emith_lsr(tmp, tmp2, 31); // Q = Nn
  3615. emith_or_r_r_lsl(sr, tmp, Q_SHIFT);
  3616. emith_lsr(tmp, tmp3, 31); // M = Nm
  3617. emith_or_r_r_lsl(sr, tmp, M_SHIFT);
  3618. emith_eor_r_r_lsr(tmp, tmp2, 31);
  3619. emith_or_r_r(sr, tmp); // T = Q^M
  3620. rcache_free(tmp);
  3621. }
  3622. goto end_op;
  3623. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  3624. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3625. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3626. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3627. emith_clr_t_cond(sr);
  3628. emith_tst_r_r(tmp2, tmp3);
  3629. emith_set_t_cond(sr, DCOND_EQ);
  3630. goto end_op;
  3631. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  3632. if (GET_Rm() != GET_Rn()) {
  3633. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3634. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3635. emith_and_r_r_r(tmp, tmp3, tmp2);
  3636. }
  3637. goto end_op;
  3638. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  3639. #if PROPAGATE_CONSTANTS
  3640. if (GET_Rn() == GET_Rm()) {
  3641. gconst_new(GET_Rn(), 0);
  3642. goto end_op;
  3643. }
  3644. #endif
  3645. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3646. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3647. emith_eor_r_r_r(tmp, tmp3, tmp2);
  3648. goto end_op;
  3649. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  3650. if (GET_Rm() != GET_Rn()) {
  3651. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3652. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3653. emith_or_r_r_r(tmp, tmp3, tmp2);
  3654. }
  3655. goto end_op;
  3656. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  3657. tmp = rcache_get_tmp();
  3658. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3659. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3660. emith_eor_r_r_r(tmp, tmp2, tmp3);
  3661. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3662. emith_clr_t_cond(sr);
  3663. emith_tst_r_imm(tmp, 0x000000ff);
  3664. EMITH_SJMP_START(DCOND_EQ);
  3665. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  3666. EMITH_SJMP_START(DCOND_EQ);
  3667. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  3668. EMITH_SJMP_START(DCOND_EQ);
  3669. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  3670. EMITH_SJMP_END(DCOND_EQ);
  3671. EMITH_SJMP_END(DCOND_EQ);
  3672. EMITH_SJMP_END(DCOND_EQ);
  3673. emith_set_t_cond(sr, DCOND_EQ);
  3674. rcache_free_tmp(tmp);
  3675. goto end_op;
  3676. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  3677. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3678. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3679. emith_lsr(tmp, tmp3, 16);
  3680. emith_or_r_r_lsl(tmp, tmp2, 16);
  3681. goto end_op;
  3682. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  3683. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  3684. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3685. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3686. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3687. tmp4 = tmp3;
  3688. if (op & 1) {
  3689. if (! rcache_is_s16(tmp2)) {
  3690. emith_sext(tmp, tmp2, 16);
  3691. tmp2 = tmp;
  3692. }
  3693. if (! rcache_is_s16(tmp3)) {
  3694. tmp4 = rcache_get_tmp();
  3695. emith_sext(tmp4, tmp3, 16);
  3696. }
  3697. } else {
  3698. if (! rcache_is_u16(tmp2)) {
  3699. emith_clear_msb(tmp, tmp2, 16);
  3700. tmp2 = tmp;
  3701. }
  3702. if (! rcache_is_u16(tmp3)) {
  3703. tmp4 = rcache_get_tmp();
  3704. emith_clear_msb(tmp4, tmp3, 16);
  3705. }
  3706. }
  3707. emith_mul(tmp, tmp2, tmp4);
  3708. if (tmp4 != tmp3)
  3709. rcache_free_tmp(tmp4);
  3710. goto end_op;
  3711. }
  3712. goto default_;
  3713. /////////////////////////////////////////////
  3714. case 0x03:
  3715. switch (op & 0x0f)
  3716. {
  3717. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  3718. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  3719. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  3720. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  3721. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  3722. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3723. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3724. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3725. switch (op & 0x07)
  3726. {
  3727. case 0x00: // CMP/EQ
  3728. tmp = DCOND_EQ;
  3729. break;
  3730. case 0x02: // CMP/HS
  3731. tmp = DCOND_HS;
  3732. break;
  3733. case 0x03: // CMP/GE
  3734. tmp = DCOND_GE;
  3735. break;
  3736. case 0x06: // CMP/HI
  3737. tmp = DCOND_HI;
  3738. break;
  3739. case 0x07: // CMP/GT
  3740. tmp = DCOND_GT;
  3741. break;
  3742. }
  3743. emith_clr_t_cond(sr);
  3744. emith_cmp_r_r(tmp2, tmp3);
  3745. emith_set_t_cond(sr, tmp);
  3746. goto end_op;
  3747. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  3748. // Q1 = carry(Rn = (Rn << 1) | T)
  3749. // if Q ^ M
  3750. // Q2 = carry(Rn += Rm)
  3751. // else
  3752. // Q2 = carry(Rn -= Rm)
  3753. // Q = M ^ Q1 ^ Q2
  3754. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  3755. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3756. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3757. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3758. emith_sync_t(sr);
  3759. tmp = rcache_get_tmp();
  3760. if (drcf.Mflag != FLG_0) {
  3761. emith_and_r_r_imm(tmp, sr, M);
  3762. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT); // Q ^= M
  3763. }
  3764. rcache_free_tmp(tmp);
  3765. // shift Rn, add T, add or sub Rm, set T = !(Q1 ^ Q2)
  3766. // in: (Q ^ M) passed in Q
  3767. emith_sh2_div1_step(tmp2, tmp3, sr);
  3768. tmp = rcache_get_tmp();
  3769. emith_or_r_imm(sr, Q); // Q = !T
  3770. emith_and_r_r_imm(tmp, sr, T);
  3771. emith_eor_r_r_lsl(sr, tmp, Q_SHIFT);
  3772. if (drcf.Mflag != FLG_0) { // Q = M ^ !T = M ^ Q1 ^ Q2
  3773. emith_and_r_r_imm(tmp, sr, M);
  3774. emith_eor_r_r_lsr(sr, tmp, M_SHIFT - Q_SHIFT);
  3775. }
  3776. rcache_free_tmp(tmp);
  3777. goto end_op;
  3778. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  3779. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3780. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3781. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3782. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3783. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  3784. goto end_op;
  3785. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  3786. #if PROPAGATE_CONSTANTS
  3787. if (GET_Rn() == GET_Rm()) {
  3788. gconst_new(GET_Rn(), 0);
  3789. goto end_op;
  3790. }
  3791. #endif
  3792. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  3793. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3794. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3795. if (op & 4) {
  3796. emith_add_r_r_r(tmp, tmp3, tmp2);
  3797. } else
  3798. emith_sub_r_r_r(tmp, tmp3, tmp2);
  3799. goto end_op;
  3800. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  3801. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  3802. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3803. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3804. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3805. emith_sync_t(sr);
  3806. #if T_OPTIMIZER
  3807. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3808. if (op & 4) {
  3809. emith_t_to_carry(sr, 0);
  3810. emith_adc_r_r_r(tmp, tmp3, tmp2);
  3811. } else {
  3812. emith_t_to_carry(sr, 1);
  3813. emith_sbc_r_r_r(tmp, tmp3, tmp2);
  3814. }
  3815. } else
  3816. #endif
  3817. {
  3818. EMITH_HINT_COND(DCOND_CS);
  3819. if (op & 4) { // adc
  3820. emith_tpop_carry(sr, 0);
  3821. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  3822. emith_tpush_carry(sr, 0);
  3823. } else {
  3824. emith_tpop_carry(sr, 1);
  3825. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  3826. emith_tpush_carry(sr, 1);
  3827. }
  3828. }
  3829. goto end_op;
  3830. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  3831. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  3832. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3833. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3834. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3835. #if T_OPTIMIZER
  3836. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3837. if (op & 4)
  3838. emith_add_r_r_r(tmp,tmp3,tmp2);
  3839. else
  3840. emith_sub_r_r_r(tmp,tmp3,tmp2);
  3841. } else
  3842. #endif
  3843. {
  3844. emith_clr_t_cond(sr);
  3845. EMITH_HINT_COND(DCOND_VS);
  3846. if (op & 4)
  3847. emith_addf_r_r_r(tmp, tmp3, tmp2);
  3848. else
  3849. emith_subf_r_r_r(tmp, tmp3, tmp2);
  3850. emith_set_t_cond(sr, DCOND_VS);
  3851. }
  3852. goto end_op;
  3853. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  3854. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3855. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3856. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3857. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3858. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  3859. goto end_op;
  3860. }
  3861. goto default_;
  3862. /////////////////////////////////////////////
  3863. case 0x04:
  3864. switch (op & 0x0f)
  3865. {
  3866. case 0x00:
  3867. switch (GET_Fx())
  3868. {
  3869. case 0: // SHLL Rn 0100nnnn00000000
  3870. case 2: // SHAL Rn 0100nnnn00100000
  3871. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3872. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3873. #if T_OPTIMIZER
  3874. if (rcache_regs_discard & BITMASK1(SHR_T))
  3875. emith_lsl(tmp, tmp2, 1);
  3876. else
  3877. #endif
  3878. {
  3879. emith_invalidate_t();
  3880. emith_lslf(tmp, tmp2, 1);
  3881. emith_carry_to_t(sr, 0);
  3882. }
  3883. goto end_op;
  3884. case 1: // DT Rn 0100nnnn00010000
  3885. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3886. #if LOOP_DETECTION
  3887. if (drcf.loop_type == OF_DELAY_LOOP) {
  3888. if (drcf.delay_reg == -1)
  3889. drcf.delay_reg = GET_Rn();
  3890. else
  3891. drcf.polling = drcf.loop_type = 0;
  3892. }
  3893. #endif
  3894. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3895. emith_clr_t_cond(sr);
  3896. EMITH_HINT_COND(DCOND_EQ);
  3897. emith_subf_r_r_imm(tmp, tmp2, 1);
  3898. emith_set_t_cond(sr, DCOND_EQ);
  3899. goto end_op;
  3900. }
  3901. goto default_;
  3902. case 0x01:
  3903. switch (GET_Fx())
  3904. {
  3905. case 0: // SHLR Rn 0100nnnn00000001
  3906. case 2: // SHAR Rn 0100nnnn00100001
  3907. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3908. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3909. #if T_OPTIMIZER
  3910. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3911. if (op & 0x20)
  3912. emith_asr(tmp,tmp2,1);
  3913. else
  3914. emith_lsr(tmp,tmp2,1);
  3915. } else
  3916. #endif
  3917. {
  3918. emith_invalidate_t();
  3919. if (op & 0x20) {
  3920. emith_asrf(tmp, tmp2, 1);
  3921. } else
  3922. emith_lsrf(tmp, tmp2, 1);
  3923. emith_carry_to_t(sr, 0);
  3924. }
  3925. goto end_op;
  3926. case 1: // CMP/PZ Rn 0100nnnn00010001
  3927. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3928. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3929. emith_clr_t_cond(sr);
  3930. emith_cmp_r_imm(tmp, 0);
  3931. emith_set_t_cond(sr, DCOND_GE);
  3932. goto end_op;
  3933. }
  3934. goto default_;
  3935. case 0x02:
  3936. case 0x03:
  3937. switch (op & 0x3f)
  3938. {
  3939. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  3940. tmp = SHR_MACH;
  3941. break;
  3942. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  3943. tmp = SHR_MACL;
  3944. break;
  3945. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  3946. tmp = SHR_PR;
  3947. break;
  3948. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  3949. tmp = SHR_SR;
  3950. break;
  3951. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  3952. tmp = SHR_GBR;
  3953. break;
  3954. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  3955. tmp = SHR_VBR;
  3956. break;
  3957. default:
  3958. goto default_;
  3959. }
  3960. if (tmp == SHR_SR) {
  3961. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  3962. emith_sync_t(tmp4);
  3963. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  3964. } else
  3965. tmp3 = rcache_get_reg_arg(1, tmp, NULL);
  3966. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  3967. goto end_op;
  3968. case 0x04:
  3969. case 0x05:
  3970. switch (op & 0x3f)
  3971. {
  3972. case 0x04: // ROTL Rn 0100nnnn00000100
  3973. case 0x05: // ROTR Rn 0100nnnn00000101
  3974. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3975. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3976. #if T_OPTIMIZER
  3977. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  3978. if (op & 1)
  3979. emith_ror(tmp, tmp2, 1);
  3980. else
  3981. emith_rol(tmp, tmp2, 1);
  3982. } else
  3983. #endif
  3984. {
  3985. emith_invalidate_t();
  3986. if (op & 1)
  3987. emith_rorf(tmp, tmp2, 1);
  3988. else
  3989. emith_rolf(tmp, tmp2, 1);
  3990. emith_carry_to_t(sr, 0);
  3991. }
  3992. goto end_op;
  3993. case 0x24: // ROTCL Rn 0100nnnn00100100
  3994. case 0x25: // ROTCR Rn 0100nnnn00100101
  3995. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3996. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3997. emith_sync_t(sr);
  3998. #if T_OPTIMIZER
  3999. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4000. emith_t_to_carry(sr, 0);
  4001. if (op & 1)
  4002. emith_rorc(tmp);
  4003. else
  4004. emith_rolc(tmp);
  4005. } else
  4006. #endif
  4007. {
  4008. emith_tpop_carry(sr, 0);
  4009. if (op & 1)
  4010. emith_rorcf(tmp);
  4011. else
  4012. emith_rolcf(tmp);
  4013. emith_tpush_carry(sr, 0);
  4014. }
  4015. goto end_op;
  4016. case 0x15: // CMP/PL Rn 0100nnnn00010101
  4017. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4018. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4019. emith_clr_t_cond(sr);
  4020. emith_cmp_r_imm(tmp, 0);
  4021. emith_set_t_cond(sr, DCOND_GT);
  4022. goto end_op;
  4023. }
  4024. goto default_;
  4025. case 0x06:
  4026. case 0x07:
  4027. switch (op & 0x3f)
  4028. {
  4029. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  4030. tmp = SHR_MACH;
  4031. break;
  4032. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  4033. tmp = SHR_MACL;
  4034. break;
  4035. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  4036. tmp = SHR_PR;
  4037. break;
  4038. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  4039. tmp = SHR_SR;
  4040. break;
  4041. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  4042. tmp = SHR_GBR;
  4043. break;
  4044. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  4045. tmp = SHR_VBR;
  4046. break;
  4047. default:
  4048. goto default_;
  4049. }
  4050. if (tmp == SHR_SR) {
  4051. emith_invalidate_t();
  4052. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  4053. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4054. emith_write_sr(sr, tmp2);
  4055. rcache_free_tmp(tmp2);
  4056. drcf.test_irq = 1;
  4057. } else
  4058. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  4059. goto end_op;
  4060. case 0x08:
  4061. case 0x09:
  4062. switch (GET_Fx())
  4063. {
  4064. case 0: // SHLL2 Rn 0100nnnn00001000
  4065. // SHLR2 Rn 0100nnnn00001001
  4066. tmp = 2;
  4067. break;
  4068. case 1: // SHLL8 Rn 0100nnnn00011000
  4069. // SHLR8 Rn 0100nnnn00011001
  4070. tmp = 8;
  4071. break;
  4072. case 2: // SHLL16 Rn 0100nnnn00101000
  4073. // SHLR16 Rn 0100nnnn00101001
  4074. tmp = 16;
  4075. break;
  4076. default:
  4077. goto default_;
  4078. }
  4079. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  4080. if (op & 1) {
  4081. emith_lsr(tmp2, tmp3, tmp);
  4082. } else
  4083. emith_lsl(tmp2, tmp3, tmp);
  4084. goto end_op;
  4085. case 0x0a:
  4086. switch (GET_Fx())
  4087. {
  4088. case 0: // LDS Rm,MACH 0100mmmm00001010
  4089. tmp2 = SHR_MACH;
  4090. break;
  4091. case 1: // LDS Rm,MACL 0100mmmm00011010
  4092. tmp2 = SHR_MACL;
  4093. break;
  4094. case 2: // LDS Rm,PR 0100mmmm00101010
  4095. tmp2 = SHR_PR;
  4096. break;
  4097. default:
  4098. goto default_;
  4099. }
  4100. emit_move_r_r(tmp2, GET_Rn());
  4101. goto end_op;
  4102. case 0x0b:
  4103. switch (GET_Fx())
  4104. {
  4105. case 1: // TAS.B @Rn 0100nnnn00011011
  4106. // XXX: is TAS working on 32X?
  4107. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4108. tmp = emit_memhandler_read(0);
  4109. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4110. emith_clr_t_cond(sr);
  4111. emith_cmp_r_imm(tmp, 0);
  4112. emith_set_t_cond(sr, DCOND_EQ);
  4113. emith_or_r_imm(tmp, 0x80);
  4114. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  4115. emith_move_r_r(tmp2, tmp);
  4116. rcache_free_tmp(tmp);
  4117. rcache_get_reg_arg(0, GET_Rn(), NULL);
  4118. emit_memhandler_write(0);
  4119. break;
  4120. default:
  4121. goto default_;
  4122. }
  4123. goto end_op;
  4124. case 0x0e:
  4125. switch (GET_Fx())
  4126. {
  4127. case 0: // LDC Rm,SR 0100mmmm00001110
  4128. tmp2 = SHR_SR;
  4129. break;
  4130. case 1: // LDC Rm,GBR 0100mmmm00011110
  4131. tmp2 = SHR_GBR;
  4132. break;
  4133. case 2: // LDC Rm,VBR 0100mmmm00101110
  4134. tmp2 = SHR_VBR;
  4135. break;
  4136. default:
  4137. goto default_;
  4138. }
  4139. if (tmp2 == SHR_SR) {
  4140. emith_invalidate_t();
  4141. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4142. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  4143. emith_write_sr(sr, tmp);
  4144. drcf.test_irq = 1;
  4145. } else
  4146. emit_move_r_r(tmp2, GET_Rn());
  4147. goto end_op;
  4148. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  4149. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  4150. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4151. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  4152. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  4153. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  4154. rcache_free_tmp(tmp2);
  4155. rcache_free_tmp(tmp);
  4156. goto end_op;
  4157. }
  4158. goto default_;
  4159. /////////////////////////////////////////////
  4160. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  4161. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2 | drcf.polling);
  4162. goto end_op;
  4163. /////////////////////////////////////////////
  4164. case 0x06:
  4165. switch (op & 0x0f)
  4166. {
  4167. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  4168. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  4169. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  4170. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  4171. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  4172. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  4173. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : drcf.polling;
  4174. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  4175. goto end_op;
  4176. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  4177. emit_move_r_r(GET_Rn(), GET_Rm());
  4178. goto end_op;
  4179. case 0x07:
  4180. case 0x08:
  4181. case 0x09:
  4182. case 0x0a:
  4183. case 0x0b:
  4184. case 0x0c:
  4185. case 0x0d:
  4186. case 0x0e:
  4187. case 0x0f:
  4188. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  4189. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  4190. switch (op & 0x0f)
  4191. {
  4192. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  4193. emith_mvn_r_r(tmp2, tmp);
  4194. break;
  4195. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  4196. tmp3 = tmp2;
  4197. if (tmp == tmp2)
  4198. tmp3 = rcache_get_tmp();
  4199. tmp4 = rcache_get_tmp();
  4200. emith_lsr(tmp3, tmp, 16);
  4201. emith_or_r_r_lsl(tmp3, tmp, 24);
  4202. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  4203. emith_or_r_r_lsl(tmp3, tmp4, 8);
  4204. emith_rol(tmp2, tmp3, 16);
  4205. rcache_free_tmp(tmp4);
  4206. if (tmp == tmp2)
  4207. rcache_free_tmp(tmp3);
  4208. break;
  4209. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  4210. emith_rol(tmp2, tmp, 16);
  4211. break;
  4212. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  4213. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4214. emith_sync_t(sr);
  4215. #if T_OPTIMIZER
  4216. if (rcache_regs_discard & BITMASK1(SHR_T)) {
  4217. emith_t_to_carry(sr, 1);
  4218. emith_negc_r_r(tmp2, tmp);
  4219. } else
  4220. #endif
  4221. {
  4222. EMITH_HINT_COND(DCOND_CS);
  4223. emith_tpop_carry(sr, 1);
  4224. emith_negcf_r_r(tmp2, tmp);
  4225. emith_tpush_carry(sr, 1);
  4226. }
  4227. break;
  4228. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  4229. emith_neg_r_r(tmp2, tmp);
  4230. break;
  4231. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  4232. emith_clear_msb(tmp2, tmp, 24);
  4233. rcache_set_x16(tmp2, 1, 1);
  4234. break;
  4235. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  4236. emith_clear_msb(tmp2, tmp, 16);
  4237. rcache_set_x16(tmp2, 0, 1);
  4238. break;
  4239. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  4240. emith_sext(tmp2, tmp, 8);
  4241. rcache_set_x16(tmp2, 1, 0);
  4242. break;
  4243. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  4244. emith_sext(tmp2, tmp, 16);
  4245. rcache_set_x16(tmp2, 1, 0);
  4246. break;
  4247. }
  4248. goto end_op;
  4249. }
  4250. goto default_;
  4251. /////////////////////////////////////////////
  4252. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  4253. if (op & 0x80) // adding negative
  4254. emit_sub_r_imm(GET_Rn(), (u8)-op);
  4255. else
  4256. emit_add_r_imm(GET_Rn(), (u8)op);
  4257. goto end_op;
  4258. /////////////////////////////////////////////
  4259. case 0x08:
  4260. switch (op & 0x0f00)
  4261. {
  4262. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  4263. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  4264. tmp = (op & 0x100) >> 8;
  4265. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  4266. goto end_op;
  4267. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  4268. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  4269. tmp = (op & 0x100) >> 8;
  4270. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp | drcf.polling);
  4271. goto end_op;
  4272. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  4273. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4274. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4275. emith_clr_t_cond(sr);
  4276. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  4277. emith_set_t_cond(sr, DCOND_EQ);
  4278. goto end_op;
  4279. }
  4280. goto default_;
  4281. /////////////////////////////////////////////
  4282. case 0x0c:
  4283. switch (op & 0x0f00)
  4284. {
  4285. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  4286. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  4287. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  4288. tmp = (op & 0x300) >> 8;
  4289. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  4290. goto end_op;
  4291. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  4292. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  4293. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  4294. tmp = (op & 0x300) >> 8;
  4295. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp | drcf.polling);
  4296. goto end_op;
  4297. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  4298. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  4299. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4300. emith_clr_t_cond(sr);
  4301. emith_tst_r_imm(tmp, op & 0xff);
  4302. emith_set_t_cond(sr, DCOND_EQ);
  4303. goto end_op;
  4304. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  4305. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4306. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  4307. goto end_op;
  4308. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  4309. if (op & 0xff) {
  4310. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4311. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  4312. }
  4313. goto end_op;
  4314. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  4315. if (op & 0xff) {
  4316. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  4317. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  4318. }
  4319. goto end_op;
  4320. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  4321. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0 | drcf.polling);
  4322. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4323. emith_clr_t_cond(sr);
  4324. emith_tst_r_imm(tmp, op & 0xff);
  4325. emith_set_t_cond(sr, DCOND_EQ);
  4326. rcache_free_tmp(tmp);
  4327. goto end_op;
  4328. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  4329. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4330. tmp2 = rcache_get_tmp_arg(1);
  4331. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  4332. goto end_rmw_op;
  4333. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  4334. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4335. tmp2 = rcache_get_tmp_arg(1);
  4336. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  4337. goto end_rmw_op;
  4338. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  4339. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4340. tmp2 = rcache_get_tmp_arg(1);
  4341. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  4342. end_rmw_op:
  4343. rcache_free_tmp(tmp);
  4344. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  4345. goto end_op;
  4346. }
  4347. goto default_;
  4348. /////////////////////////////////////////////
  4349. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  4350. emit_move_r_imm32(GET_Rn(), (s8)op);
  4351. goto end_op;
  4352. default:
  4353. default_:
  4354. if (!(op_flags[i] & OF_B_IN_DS)) {
  4355. elprintf_sh2(sh2, EL_ANOMALY,
  4356. "drc: illegal op %04x @ %08x", op, pc - 2);
  4357. exit(1);
  4358. }
  4359. }
  4360. end_op:
  4361. rcache_unlock_all();
  4362. rcache_set_usage_now(0);
  4363. #if DRC_DEBUG & 64
  4364. RCACHE_CHECK("after insn");
  4365. #endif
  4366. cycles += opd->cycles;
  4367. if (op_flags[i+1] & OF_DELAY_OP) {
  4368. do_host_disasm(tcache_id);
  4369. continue;
  4370. }
  4371. // test irq?
  4372. if (drcf.test_irq && !drcf.pending_branch_direct) {
  4373. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4374. FLUSH_CYCLES(sr);
  4375. emith_sync_t(sr);
  4376. if (!drcf.pending_branch_indirect)
  4377. emit_move_r_imm32(SHR_PC, pc);
  4378. rcache_flush();
  4379. emith_call(sh2_drc_test_irq);
  4380. drcf.test_irq = 0;
  4381. }
  4382. // branch handling
  4383. if (drcf.pending_branch_direct)
  4384. {
  4385. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4386. u32 target_pc = opd_b->imm;
  4387. int cond = -1;
  4388. int ctaken = 0;
  4389. void *target = NULL;
  4390. if (OP_ISBRACND(opd_b->op))
  4391. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  4392. cycles += ctaken; // assume branch taken
  4393. #if LOOP_OPTIMIZER
  4394. if ((drcf.loop_type == OF_IDLE_LOOP ||
  4395. (drcf.loop_type == OF_DELAY_LOOP && drcf.delay_reg >= 0)))
  4396. {
  4397. // idle or delay loop
  4398. emit_sync_t_to_sr();
  4399. emith_sh2_delay_loop(cycles, drcf.delay_reg);
  4400. rcache_unlock_all(); // may lock delay_reg
  4401. drcf.polling = drcf.loop_type = drcf.pinning = 0;
  4402. }
  4403. #endif
  4404. #if CALL_STACK
  4405. void *rtsadd = NULL, *rtsret = NULL;
  4406. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4407. // BSR - save rts data
  4408. tmp = rcache_get_tmp_arg(1);
  4409. rtsadd = tcache_ptr;
  4410. emith_move_r_imm_s8_patchable(tmp, 0);
  4411. rcache_clean_tmp();
  4412. rcache_invalidate_tmp();
  4413. emith_call(sh2_drc_dispatcher_call);
  4414. rtsret = tcache_ptr;
  4415. }
  4416. #endif
  4417. // XXX move below cond test if not changing host cond (MIPS delay slot)?
  4418. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4419. FLUSH_CYCLES(sr);
  4420. rcache_clean();
  4421. if (OP_ISBRACND(opd_b->op)) {
  4422. // BT[S], BF[S] - emit condition test
  4423. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  4424. if (delay_dep_fw & BITMASK1(SHR_T)) {
  4425. emith_sync_t(sr);
  4426. emith_tst_r_imm(sr, T_save);
  4427. } else {
  4428. cond = emith_tst_t(sr, (opd_b->op == OP_BRANCH_CT));
  4429. if (emith_get_t_cond() >= 0) {
  4430. if (opd_b->op == OP_BRANCH_CT)
  4431. emith_or_r_imm_c(cond, sr, T);
  4432. else
  4433. emith_bic_r_imm_c(cond, sr, T);
  4434. }
  4435. }
  4436. } else
  4437. emith_sync_t(sr);
  4438. // no modification of host status/flags between here and branching!
  4439. v = find_in_sorted_linkage(branch_targets, branch_target_count, target_pc);
  4440. if (v >= 0)
  4441. {
  4442. // local branch
  4443. if (branch_targets[v].ptr) {
  4444. // local backward jump, link here now since host PC is already known
  4445. target = branch_targets[v].ptr;
  4446. #if LOOP_OPTIMIZER
  4447. if (pinned_loops[pinned_loop_count].pc == target_pc) {
  4448. // backward jump at end of optimized loop
  4449. rcache_unpin_all();
  4450. target = pinned_loops[pinned_loop_count].ptr;
  4451. pinned_loop_count ++;
  4452. }
  4453. #endif
  4454. if (cond != -1) {
  4455. if (emith_jump_patch_inrange(tcache_ptr, target)) {
  4456. emith_jump_cond(cond, target);
  4457. } else {
  4458. // not reachable directly, must use far branch
  4459. EMITH_JMP_START(emith_invert_cond(cond));
  4460. emith_jump(target);
  4461. EMITH_JMP_END(emith_invert_cond(cond));
  4462. }
  4463. } else {
  4464. emith_jump(target);
  4465. rcache_invalidate();
  4466. }
  4467. } else if (blx_target_count < MAX_LOCAL_BRANCHES) {
  4468. // local forward jump
  4469. target = tcache_ptr;
  4470. blx_targets[blx_target_count++] =
  4471. (struct linkage) { .pc = target_pc, .ptr = target, .mask = 0x2 };
  4472. if (cond != -1)
  4473. emith_jump_cond_patchable(cond, target);
  4474. else {
  4475. emith_jump_patchable(target);
  4476. rcache_invalidate();
  4477. }
  4478. } else
  4479. // no space for resolving forward branch, handle it as external
  4480. dbg(1, "warning: too many unresolved branches");
  4481. }
  4482. if (target == NULL)
  4483. {
  4484. // can't resolve branch locally, make a block exit
  4485. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4486. if (cond != -1) {
  4487. #if 1
  4488. if (bl && blx_target_count < ARRAY_SIZE(blx_targets)) {
  4489. // conditional jumps get a blx stub for the far jump
  4490. bl->type = BL_JCCBLX;
  4491. target = tcache_ptr;
  4492. blx_targets[blx_target_count++] =
  4493. (struct linkage) { .pc = target_pc, .ptr = target, .bl = bl };
  4494. emith_jump_cond_patchable(cond, target);
  4495. } else {
  4496. // not linkable, or blx table full; inline jump @dispatcher
  4497. EMITH_JMP_START(emith_invert_cond(cond));
  4498. if (bl) {
  4499. bl->jump = tcache_ptr;
  4500. emith_flush(); // flush to inhibit insn swapping
  4501. bl->type = BL_LDJMP;
  4502. }
  4503. tmp = rcache_get_tmp_arg(0);
  4504. emith_move_r_imm(tmp, target_pc);
  4505. rcache_free_tmp(tmp);
  4506. target = sh2_drc_dispatcher;
  4507. emith_jump_patchable(target);
  4508. EMITH_JMP_END(emith_invert_cond(cond));
  4509. }
  4510. #else
  4511. // jump @dispatcher - ARM 32bit version with conditional execution
  4512. EMITH_SJMP_START(emith_invert_cond(cond));
  4513. tmp = rcache_get_tmp_arg(0);
  4514. emith_move_r_imm_c(cond, tmp, target_pc);
  4515. rcache_free_tmp(tmp);
  4516. target = sh2_drc_dispatcher;
  4517. if (bl) {
  4518. bl->jump = tcache_ptr;
  4519. bl->type = BL_JMP;
  4520. }
  4521. emith_jump_cond_patchable(cond, target);
  4522. EMITH_SJMP_END(emith_invert_cond(cond));
  4523. #endif
  4524. } else {
  4525. // unconditional, has the far jump inlined
  4526. if (bl) {
  4527. emith_flush(); // flush to inhibit insn swapping
  4528. bl->type = BL_LDJMP;
  4529. }
  4530. tmp = rcache_get_tmp_arg(0);
  4531. emith_move_r_imm(tmp, target_pc);
  4532. rcache_free_tmp(tmp);
  4533. target = sh2_drc_dispatcher;
  4534. emith_jump_patchable(target);
  4535. rcache_invalidate();
  4536. }
  4537. }
  4538. #if CALL_STACK
  4539. if (rtsadd)
  4540. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4541. #endif
  4542. // branch not taken, correct cycle count
  4543. if (ctaken)
  4544. cycles -= ctaken;
  4545. // set T bit to reflect branch not taken for OP_BRANCH_CT/CF
  4546. if (emith_get_t_cond() >= 0) // T is synced for all other cases
  4547. emith_set_t(sr, opd_b->op == OP_BRANCH_CF);
  4548. drcf.pending_branch_direct = 0;
  4549. if (target_pc >= base_pc && target_pc < pc)
  4550. drcf.polling = drcf.loop_type = 0;
  4551. }
  4552. else if (drcf.pending_branch_indirect) {
  4553. u32 target_pc;
  4554. tmp = rcache_get_reg_arg(0, SHR_PC, NULL);
  4555. #if CALL_STACK
  4556. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  4557. void *rtsadd = NULL, *rtsret = NULL;
  4558. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  4559. // JSR, BSRF - save rts data
  4560. tmp = rcache_get_tmp_arg(1);
  4561. rtsadd = tcache_ptr;
  4562. emith_move_r_imm_s8_patchable(tmp, 0);
  4563. rcache_clean_tmp();
  4564. rcache_invalidate_tmp();
  4565. emith_call(sh2_drc_dispatcher_call);
  4566. rtsret = tcache_ptr;
  4567. }
  4568. #endif
  4569. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4570. FLUSH_CYCLES(sr);
  4571. emith_sync_t(sr);
  4572. rcache_clean();
  4573. #if CALL_STACK
  4574. if (opd_b->rm == SHR_PR) {
  4575. // RTS - restore rts data, else jump to dispatcher
  4576. emith_jump(sh2_drc_dispatcher_return);
  4577. } else
  4578. #endif
  4579. if (gconst_get(SHR_PC, &target_pc)) {
  4580. // JMP, JSR, BRAF, BSRF const - treat like unconditional direct branch
  4581. bl = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  4582. if (bl) // pc already loaded somewhere else, can patch jump only
  4583. bl->type = BL_JMP;
  4584. emith_jump_patchable(sh2_drc_dispatcher);
  4585. } else {
  4586. // JMP, JSR, BRAF, BSRF not const
  4587. emith_jump(sh2_drc_dispatcher);
  4588. }
  4589. rcache_invalidate();
  4590. #if CALL_STACK
  4591. if (rtsadd)
  4592. emith_move_r_imm_s8_patch(rtsadd, tcache_ptr - (u8 *)rtsret);
  4593. #endif
  4594. drcf.pending_branch_indirect = 0;
  4595. drcf.polling = drcf.loop_type = 0;
  4596. }
  4597. rcache_unlock_all();
  4598. do_host_disasm(tcache_id);
  4599. }
  4600. // check the last op
  4601. if (op_flags[i-1] & OF_DELAY_OP)
  4602. opd = &ops[i-2];
  4603. else
  4604. opd = &ops[i-1];
  4605. if (! OP_ISBRAUC(opd->op))
  4606. {
  4607. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4608. FLUSH_CYCLES(tmp);
  4609. emith_sync_t(tmp);
  4610. rcache_clean();
  4611. bl = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  4612. if (bl) {
  4613. emith_flush(); // flush to inhibit insn swapping
  4614. bl->type = BL_LDJMP;
  4615. }
  4616. tmp = rcache_get_tmp_arg(0);
  4617. emith_move_r_imm(tmp, pc);
  4618. emith_jump_patchable(sh2_drc_dispatcher);
  4619. rcache_invalidate();
  4620. } else
  4621. rcache_flush();
  4622. // link unresolved branches, emitting blx area entries as needed
  4623. emit_branch_linkage_code(sh2, block, tcache_id, branch_targets,
  4624. branch_target_count, blx_targets, blx_target_count);
  4625. emith_flush();
  4626. do_host_disasm(tcache_id);
  4627. emith_pool_commit(0);
  4628. // fill blx backup; do this last to backup final patched code
  4629. for (i = 0; i < block->entry_count; i++)
  4630. for (bl = block->entryp[i].o_links; bl; bl = bl->o_next)
  4631. memcpy(bl->jdisp, bl->blx ? bl->blx : bl->jump, emith_jump_at_size());
  4632. ring_alloc(&tcache_ring[tcache_id], tcache_ptr - block_entry_ptr);
  4633. host_instructions_updated(block_entry_ptr, tcache_ptr, 1);
  4634. dr_activate_block(block, tcache_id, sh2->is_slave);
  4635. emith_update_cache();
  4636. do_host_disasm(tcache_id);
  4637. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  4638. tcache_id, blkid_main, tcache_ptr,
  4639. tcache_ring[tcache_id].used, tcache_ring[tcache_id].size,
  4640. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  4641. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  4642. dbg(2, " hash collisions %d/%d", hash_collisions, block_ring[tcache_id].used);
  4643. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  4644. }
  4645. /*
  4646. printf("~~~\n");
  4647. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  4648. do_host_disasm(tcache_id);
  4649. printf("~~~\n");
  4650. */
  4651. #if (DRC_DEBUG)
  4652. fflush(stdout);
  4653. #endif
  4654. return block_entry_ptr;
  4655. }
  4656. static void sh2_generate_utils(void)
  4657. {
  4658. int arg0, arg1, arg2, arg3, sr, tmp, tmp2;
  4659. #if DRC_DEBUG
  4660. int hic = host_insn_count; // don't count utils for insn statistics
  4661. #endif
  4662. host_arg2reg(arg0, 0);
  4663. host_arg2reg(arg1, 1);
  4664. host_arg2reg(arg2, 2);
  4665. host_arg2reg(arg3, 3);
  4666. emith_move_r_r(arg0, arg0); // nop
  4667. emith_flush();
  4668. // sh2_drc_write8(u32 a, u32 d)
  4669. sh2_drc_write8 = (void *)tcache_ptr;
  4670. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  4671. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4672. emith_flush();
  4673. // sh2_drc_write16(u32 a, u32 d)
  4674. sh2_drc_write16 = (void *)tcache_ptr;
  4675. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  4676. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4677. emith_flush();
  4678. // sh2_drc_write32(u32 a, u32 d)
  4679. sh2_drc_write32 = (void *)tcache_ptr;
  4680. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  4681. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4682. emith_flush();
  4683. // d = sh2_drc_read8(u32 a)
  4684. sh2_drc_read8 = (void *)tcache_ptr;
  4685. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4686. EMITH_HINT_COND(DCOND_CS);
  4687. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4688. EMITH_SJMP_START(DCOND_CS);
  4689. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4690. emith_eor_r_imm_ptr_c(DCOND_CC, arg0, 1);
  4691. emith_read8s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4692. emith_ret_c(DCOND_CC);
  4693. EMITH_SJMP_END(DCOND_CS);
  4694. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4695. emith_abijump_reg(arg2);
  4696. emith_flush();
  4697. // d = sh2_drc_read16(u32 a)
  4698. sh2_drc_read16 = (void *)tcache_ptr;
  4699. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4700. EMITH_HINT_COND(DCOND_CS);
  4701. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4702. EMITH_SJMP_START(DCOND_CS);
  4703. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4704. emith_read16s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4705. emith_ret_c(DCOND_CC);
  4706. EMITH_SJMP_END(DCOND_CS);
  4707. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4708. emith_abijump_reg(arg2);
  4709. emith_flush();
  4710. // d = sh2_drc_read32(u32 a)
  4711. sh2_drc_read32 = (void *)tcache_ptr;
  4712. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4713. EMITH_HINT_COND(DCOND_CS);
  4714. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4715. EMITH_SJMP_START(DCOND_CS);
  4716. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4717. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4718. emith_ror_c(DCOND_CC, RET_REG, RET_REG, 16);
  4719. emith_ret_c(DCOND_CC);
  4720. EMITH_SJMP_END(DCOND_CS);
  4721. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4722. emith_abijump_reg(arg2);
  4723. emith_flush();
  4724. // d = sh2_drc_read8_poll(u32 a)
  4725. sh2_drc_read8_poll = (void *)tcache_ptr;
  4726. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4727. EMITH_HINT_COND(DCOND_CS);
  4728. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4729. EMITH_SJMP_START(DCOND_CC);
  4730. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4731. emith_abijump_reg_c(DCOND_CS, arg2);
  4732. EMITH_SJMP_END(DCOND_CC);
  4733. emith_and_r_r_r(arg1, arg0, arg3);
  4734. emith_eor_r_imm_ptr(arg1, 1);
  4735. emith_read8s_r_r_r(arg1, arg2, arg1);
  4736. emith_push_ret(arg1);
  4737. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4738. emith_abicall(p32x_sh2_poll_memory8);
  4739. emith_pop_and_ret(arg1);
  4740. emith_flush();
  4741. // d = sh2_drc_read16_poll(u32 a)
  4742. sh2_drc_read16_poll = (void *)tcache_ptr;
  4743. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4744. EMITH_HINT_COND(DCOND_CS);
  4745. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4746. EMITH_SJMP_START(DCOND_CC);
  4747. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4748. emith_abijump_reg_c(DCOND_CS, arg2);
  4749. EMITH_SJMP_END(DCOND_CC);
  4750. emith_and_r_r_r(arg1, arg0, arg3);
  4751. emith_read16s_r_r_r(arg1, arg2, arg1);
  4752. emith_push_ret(arg1);
  4753. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4754. emith_abicall(p32x_sh2_poll_memory16);
  4755. emith_pop_and_ret(arg1);
  4756. emith_flush();
  4757. // d = sh2_drc_read32_poll(u32 a)
  4758. sh2_drc_read32_poll = (void *)tcache_ptr;
  4759. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4760. EMITH_HINT_COND(DCOND_CS);
  4761. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4762. EMITH_SJMP_START(DCOND_CC);
  4763. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4764. emith_abijump_reg_c(DCOND_CS, arg2);
  4765. EMITH_SJMP_END(DCOND_CC);
  4766. emith_and_r_r_r(arg1, arg0, arg3);
  4767. emith_read_r_r_r(arg1, arg2, arg1);
  4768. emith_ror(arg1, arg1, 16);
  4769. emith_push_ret(arg1);
  4770. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4771. emith_abicall(p32x_sh2_poll_memory32);
  4772. emith_pop_and_ret(arg1);
  4773. emith_flush();
  4774. // sh2_drc_exit(u32 pc)
  4775. sh2_drc_exit = (void *)tcache_ptr;
  4776. emith_ctx_write(arg0, SHR_PC * 4);
  4777. emit_do_static_regs(1, arg2);
  4778. emith_sh2_drc_exit();
  4779. emith_flush();
  4780. // sh2_drc_dispatcher(u32 pc)
  4781. sh2_drc_dispatcher = (void *)tcache_ptr;
  4782. emith_ctx_write(arg0, SHR_PC * 4);
  4783. #if BRANCH_CACHE
  4784. // check if PC is in branch target cache
  4785. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4786. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4787. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  4788. emith_cmp_r_r(arg2, arg0);
  4789. EMITH_SJMP_START(DCOND_NE);
  4790. #if (DRC_DEBUG & 128)
  4791. emith_move_r_ptr_imm(arg2, (uptr)&bchit);
  4792. emith_read_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4793. emith_add_r_imm_c(DCOND_EQ, arg3, 1);
  4794. emith_write_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4795. #endif
  4796. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4797. emith_jump_reg_c(DCOND_EQ, RET_REG);
  4798. EMITH_SJMP_END(DCOND_NE);
  4799. #endif
  4800. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4801. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  4802. emith_abicall(dr_lookup_block);
  4803. // store PC and block entry ptr (in arg0) in branch target cache
  4804. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4805. EMITH_SJMP_START(DCOND_EQ);
  4806. #if BRANCH_CACHE
  4807. #if (DRC_DEBUG & 128)
  4808. emith_move_r_ptr_imm(arg2, (uptr)&bcmiss);
  4809. emith_read_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4810. emith_add_r_imm_c(DCOND_NE, arg3, 1);
  4811. emith_write_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4812. #endif
  4813. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  4814. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4815. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4816. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  4817. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4818. #endif
  4819. emith_jump_reg_c(DCOND_NE, RET_REG);
  4820. EMITH_SJMP_END(DCOND_EQ);
  4821. // lookup failed, call sh2_translate()
  4822. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4823. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  4824. emith_abicall(sh2_translate);
  4825. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4826. EMITH_SJMP_START(DCOND_EQ);
  4827. emith_jump_reg_c(DCOND_NE, RET_REG);
  4828. EMITH_SJMP_END(DCOND_EQ);
  4829. // XXX: can't translate, fail
  4830. emith_abicall(dr_failure);
  4831. emith_flush();
  4832. #if CALL_STACK
  4833. // pc = sh2_drc_dispatcher_call(u32 pc)
  4834. sh2_drc_dispatcher_call = (void *)tcache_ptr;
  4835. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4836. emith_add_r_imm(arg2, (u32)(2*sizeof(void *)));
  4837. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4838. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4839. emith_add_r_r_r_lsl_ptr(arg3, CONTEXT_REG, arg2, 0);
  4840. rcache_get_reg_arg(2, SHR_PR, NULL);
  4841. emith_add_r_ret(arg1);
  4842. emith_write_r_r_offs_ptr(arg1, arg3, offsetof(SH2, rts_cache)+sizeof(void *));
  4843. emith_write_r_r_offs(arg2, arg3, offsetof(SH2, rts_cache));
  4844. rcache_flush();
  4845. emith_ret();
  4846. emith_flush();
  4847. // sh2_drc_dispatcher_return(u32 pc)
  4848. sh2_drc_dispatcher_return = (void *)tcache_ptr;
  4849. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4850. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg2, 0);
  4851. emith_read_r_r_offs(arg3, arg1, offsetof(SH2, rts_cache));
  4852. emith_cmp_r_r(arg0, arg3);
  4853. #if (DRC_DEBUG & 128)
  4854. EMITH_SJMP_START(DCOND_EQ);
  4855. emith_move_r_ptr_imm(arg3, (uptr)&rcmiss);
  4856. emith_read_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4857. emith_add_r_imm_c(DCOND_NE, arg1, 1);
  4858. emith_write_r_r_offs_c(DCOND_NE, arg1, arg3, 0);
  4859. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4860. EMITH_SJMP_END(DCOND_EQ);
  4861. #else
  4862. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4863. #endif
  4864. emith_read_r_r_offs_ptr(arg0, arg1, offsetof(SH2, rts_cache) + sizeof(void *));
  4865. emith_sub_r_imm(arg2, (u32)(2*sizeof(void *)));
  4866. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4867. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4868. #if (DRC_DEBUG & 128)
  4869. emith_move_r_ptr_imm(arg3, (uptr)&rchit);
  4870. emith_read_r_r_offs(arg1, arg3, 0);
  4871. emith_add_r_imm(arg1, 1);
  4872. emith_write_r_r_offs(arg1, arg3, 0);
  4873. #endif
  4874. emith_jump_reg(arg0);
  4875. emith_flush();
  4876. #endif
  4877. // sh2_drc_test_irq(void)
  4878. // assumes it's called from main function (may jump to dispatcher)
  4879. sh2_drc_test_irq = (void *)tcache_ptr;
  4880. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4881. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4882. emith_lsr(arg0, sr, I_SHIFT);
  4883. emith_and_r_imm(arg0, 0x0f);
  4884. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  4885. EMITH_SJMP_START(DCOND_GT);
  4886. emith_ret_c(DCOND_LE); // nope, return
  4887. EMITH_SJMP_END(DCOND_GT);
  4888. // adjust SP
  4889. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  4890. emith_sub_r_imm(tmp, 4*2);
  4891. rcache_clean();
  4892. // push SR
  4893. tmp = rcache_get_reg_arg(0, SHR_SP, &tmp2);
  4894. emith_add_r_r_imm(tmp, tmp2, 4);
  4895. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  4896. emith_clear_msb(tmp, tmp, 22);
  4897. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4898. rcache_invalidate_tmp();
  4899. emith_abicall(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  4900. // push PC
  4901. rcache_get_reg_arg(0, SHR_SP, NULL);
  4902. rcache_get_reg_arg(1, SHR_PC, NULL);
  4903. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4904. rcache_invalidate_tmp();
  4905. emith_abicall(p32x_sh2_write32);
  4906. // update I, cycles, do callback
  4907. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4908. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4909. emith_bic_r_imm(sr, I);
  4910. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  4911. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  4912. rcache_flush();
  4913. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4914. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  4915. // obtain new PC
  4916. tmp = rcache_get_reg_arg(1, SHR_VBR, &tmp2);
  4917. emith_add_r_r_r_lsl(arg0, tmp2, RET_REG, 2);
  4918. emith_call(sh2_drc_read32);
  4919. if (arg0 != RET_REG)
  4920. emith_move_r_r(arg0, RET_REG);
  4921. emith_call_cleanup();
  4922. rcache_invalidate();
  4923. emith_jump(sh2_drc_dispatcher);
  4924. emith_flush();
  4925. // sh2_drc_entry(SH2 *sh2)
  4926. sh2_drc_entry = (void *)tcache_ptr;
  4927. emith_sh2_drc_entry();
  4928. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  4929. emit_do_static_regs(0, arg2);
  4930. emith_call(sh2_drc_test_irq);
  4931. emith_ctx_read(arg0, SHR_PC * 4);
  4932. emith_jump(sh2_drc_dispatcher);
  4933. emith_flush();
  4934. #ifdef DRC_SR_REG
  4935. // sh2_drc_save_sr(SH2 *sh2)
  4936. sh2_drc_save_sr = (void *)tcache_ptr;
  4937. tmp = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4938. emith_write_r_r_offs(tmp, arg0, SHR_SR * 4);
  4939. rcache_invalidate();
  4940. emith_ret();
  4941. emith_flush();
  4942. // sh2_drc_restore_sr(SH2 *sh2)
  4943. sh2_drc_restore_sr = (void *)tcache_ptr;
  4944. tmp = rcache_get_reg(SHR_SR, RC_GR_WRITE, NULL);
  4945. emith_read_r_r_offs(tmp, arg0, SHR_SR * 4);
  4946. rcache_flush();
  4947. emith_ret();
  4948. emith_flush();
  4949. #endif
  4950. #ifdef PDB_NET
  4951. // debug
  4952. #define MAKE_READ_WRAPPER(func) { \
  4953. void *tmp = (void *)tcache_ptr; \
  4954. emith_push_ret(); \
  4955. emith_call(func); \
  4956. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4957. emith_addf_r_r(arg2, arg0); \
  4958. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4959. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4960. emith_adc_r_imm(arg2, 0x01000000); \
  4961. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4962. emith_pop_and_ret(); \
  4963. emith_flush(); \
  4964. func = tmp; \
  4965. }
  4966. #define MAKE_WRITE_WRAPPER(func) { \
  4967. void *tmp = (void *)tcache_ptr; \
  4968. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4969. emith_addf_r_r(arg2, arg1); \
  4970. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4971. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4972. emith_adc_r_imm(arg2, 0x01000000); \
  4973. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4974. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  4975. emith_jump(func); \
  4976. emith_flush(); \
  4977. func = tmp; \
  4978. }
  4979. MAKE_READ_WRAPPER(sh2_drc_read8);
  4980. MAKE_READ_WRAPPER(sh2_drc_read16);
  4981. MAKE_READ_WRAPPER(sh2_drc_read32);
  4982. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  4983. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  4984. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  4985. MAKE_READ_WRAPPER(sh2_drc_read8_poll);
  4986. MAKE_READ_WRAPPER(sh2_drc_read16_poll);
  4987. MAKE_READ_WRAPPER(sh2_drc_read32_poll);
  4988. #endif
  4989. emith_pool_commit(0);
  4990. rcache_invalidate();
  4991. #if (DRC_DEBUG & 4)
  4992. host_dasm_new_symbol(sh2_drc_entry);
  4993. host_dasm_new_symbol(sh2_drc_dispatcher);
  4994. #if CALL_STACK
  4995. host_dasm_new_symbol(sh2_drc_dispatcher_call);
  4996. host_dasm_new_symbol(sh2_drc_dispatcher_return);
  4997. #endif
  4998. host_dasm_new_symbol(sh2_drc_exit);
  4999. host_dasm_new_symbol(sh2_drc_test_irq);
  5000. host_dasm_new_symbol(sh2_drc_write8);
  5001. host_dasm_new_symbol(sh2_drc_write16);
  5002. host_dasm_new_symbol(sh2_drc_write32);
  5003. host_dasm_new_symbol(sh2_drc_read8);
  5004. host_dasm_new_symbol(sh2_drc_read16);
  5005. host_dasm_new_symbol(sh2_drc_read32);
  5006. host_dasm_new_symbol(sh2_drc_read8_poll);
  5007. host_dasm_new_symbol(sh2_drc_read16_poll);
  5008. host_dasm_new_symbol(sh2_drc_read32_poll);
  5009. #ifdef DRC_SR_REG
  5010. host_dasm_new_symbol(sh2_drc_save_sr);
  5011. host_dasm_new_symbol(sh2_drc_restore_sr);
  5012. #endif
  5013. #endif
  5014. #if DRC_DEBUG
  5015. host_insn_count = hic;
  5016. #endif
  5017. }
  5018. static void sh2_smc_rm_blocks(u32 a, int len, int tcache_id, u32 shift)
  5019. {
  5020. struct block_list **blist, *entry, *next;
  5021. u32 mask = RAM_SIZE(tcache_id) - 1;
  5022. u32 wtmask = ~0x20000000; // writethrough area mask
  5023. u32 start_addr, end_addr;
  5024. u32 start_lit, end_lit;
  5025. struct block_desc *block;
  5026. #if (DRC_DEBUG & 2)
  5027. int removed = 0;
  5028. #endif
  5029. // ignore cache-through
  5030. a &= wtmask;
  5031. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  5032. entry = *blist;
  5033. // go through the block list for this range
  5034. while (entry != NULL) {
  5035. next = entry->next;
  5036. block = entry->block;
  5037. start_addr = block->addr & wtmask;
  5038. end_addr = start_addr + block->size;
  5039. start_lit = block->addr_lit & wtmask;
  5040. end_lit = start_lit + block->size_lit;
  5041. // disable/delete block if it covers the modified address
  5042. if ((start_addr < a+len && a < end_addr) ||
  5043. (start_lit < a+len && a < end_lit))
  5044. {
  5045. dbg(2, "smc remove @%08x", a);
  5046. end_addr = (start_lit < a+len && block->size_lit ? a : 0);
  5047. dr_rm_block_entry(block, tcache_id, end_addr, 0);
  5048. #if (DRC_DEBUG & 2)
  5049. removed = 1;
  5050. #endif
  5051. }
  5052. entry = next;
  5053. }
  5054. #if (DRC_DEBUG & 2)
  5055. if (!removed)
  5056. dbg(2, "rm_blocks called @%08x, no work?", a);
  5057. #endif
  5058. #if BRANCH_CACHE
  5059. if (tcache_id)
  5060. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5061. else {
  5062. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  5063. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  5064. }
  5065. #endif
  5066. #if CALL_STACK
  5067. if (tcache_id) {
  5068. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5069. sh2s[tcache_id-1].rts_cache_idx = 0;
  5070. } else {
  5071. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  5072. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  5073. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  5074. }
  5075. #endif
  5076. }
  5077. void sh2_drc_wcheck_ram(u32 a, unsigned len, SH2 *sh2)
  5078. {
  5079. sh2_smc_rm_blocks(a, len, 0, SH2_DRCBLK_RAM_SHIFT);
  5080. }
  5081. void sh2_drc_wcheck_da(u32 a, unsigned len, SH2 *sh2)
  5082. {
  5083. sh2_smc_rm_blocks(a, len, 1 + sh2->is_slave, SH2_DRCBLK_DA_SHIFT);
  5084. }
  5085. int sh2_execute_drc(SH2 *sh2c, int cycles)
  5086. {
  5087. int ret_cycles;
  5088. // cycles are kept in SHR_SR unused bits (upper 20)
  5089. // bit11 contains T saved for delay slot
  5090. // others are usual SH2 flags
  5091. sh2c->sr &= 0x3f3;
  5092. sh2c->sr |= cycles << 12;
  5093. sh2c->state |= SH2_IN_DRC;
  5094. sh2_drc_entry(sh2c);
  5095. sh2c->state &= ~SH2_IN_DRC;
  5096. // TODO: irq cycles
  5097. ret_cycles = (int32_t)sh2c->sr >> 12;
  5098. if (ret_cycles > 0)
  5099. dbg(1, "warning: drc returned with cycles: %d, pc %08x", ret_cycles, sh2c->pc);
  5100. sh2c->sr &= 0x3f3;
  5101. return ret_cycles;
  5102. }
  5103. static void block_stats(void)
  5104. {
  5105. #if (DRC_DEBUG & 2)
  5106. int c, b, i;
  5107. long total = 0;
  5108. printf("block stats:\n");
  5109. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5110. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5111. if (block_tables[b][i].addr != 0)
  5112. total += block_tables[b][i].refcount;
  5113. }
  5114. printf("total: %ld\n",total);
  5115. for (c = 0; c < 20; c++) {
  5116. struct block_desc *blk, *maxb = NULL;
  5117. int max = 0;
  5118. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5119. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5120. if ((blk = &block_tables[b][i])->addr != 0 && blk->refcount > max) {
  5121. max = blk->refcount;
  5122. maxb = blk;
  5123. }
  5124. }
  5125. if (maxb == NULL)
  5126. break;
  5127. printf("%08x %p %9d %2.3f%%\n", maxb->addr, maxb->tcache_ptr, maxb->refcount,
  5128. (double)maxb->refcount / total * 100.0);
  5129. maxb->refcount = 0;
  5130. }
  5131. for (b = 0; b < ARRAY_SIZE(block_tables); b++)
  5132. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5133. block_tables[b][i].refcount = 0;
  5134. #endif
  5135. }
  5136. void entry_stats(void)
  5137. {
  5138. #if (DRC_DEBUG & 32)
  5139. int c, b, i, j;
  5140. long total = 0;
  5141. printf("block entry stats:\n");
  5142. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5143. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5144. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5145. total += block_tables[b][i].entryp[j].entry_count;
  5146. }
  5147. printf("total: %ld\n",total);
  5148. for (c = 0; c < 20; c++) {
  5149. struct block_desc *blk;
  5150. struct block_entry *maxb = NULL;
  5151. int max = 0;
  5152. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5153. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size) {
  5154. blk = &block_tables[b][i];
  5155. for (j = 0; j < blk->entry_count; j++)
  5156. if (blk->entryp[j].entry_count > max) {
  5157. max = blk->entryp[j].entry_count;
  5158. maxb = &blk->entryp[j];
  5159. }
  5160. }
  5161. }
  5162. if (maxb == NULL)
  5163. break;
  5164. printf("%08x %p %9d %2.3f%%\n", maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  5165. (double)100 * maxb->entry_count / total);
  5166. maxb->entry_count = 0;
  5167. }
  5168. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  5169. for (i = block_ring[b].first; i != block_ring[b].next; i = (i+1)%block_ring[b].size)
  5170. for (j = 0; j < block_tables[b][i].entry_count; j++)
  5171. block_tables[b][i].entryp[j].entry_count = 0;
  5172. }
  5173. #endif
  5174. }
  5175. static void backtrace(void)
  5176. {
  5177. #if (DRC_DEBUG & 1024)
  5178. int i;
  5179. printf("backtrace master:\n");
  5180. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  5181. SH2_DUMP(&csh2[0][i], "bt msh2");
  5182. printf("backtrace slave:\n");
  5183. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  5184. SH2_DUMP(&csh2[1][i], "bt ssh2");
  5185. #endif
  5186. }
  5187. static void state_dump(void)
  5188. {
  5189. #if (DRC_DEBUG & 2048)
  5190. int i;
  5191. SH2_DUMP(&sh2s[0], "master");
  5192. printf("VBR msh2: %x\n", sh2s[0].vbr);
  5193. for (i = 0; i < 0x60; i++) {
  5194. printf("%08x ",p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  5195. if ((i+1) % 8 == 0) printf("\n");
  5196. }
  5197. printf("stack msh2: %x\n", sh2s[0].r[15]);
  5198. for (i = -0x30; i < 0x30; i++) {
  5199. printf("%08x ",p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  5200. if ((i+1) % 8 == 0) printf("\n");
  5201. }
  5202. SH2_DUMP(&sh2s[1], "slave");
  5203. printf("VBR ssh2: %x\n", sh2s[1].vbr);
  5204. for (i = 0; i < 0x60; i++) {
  5205. printf("%08x ",p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  5206. if ((i+1) % 8 == 0) printf("\n");
  5207. }
  5208. printf("stack ssh2: %x\n", sh2s[1].r[15]);
  5209. for (i = -0x30; i < 0x30; i++) {
  5210. printf("%08x ",p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  5211. if ((i+1) % 8 == 0) printf("\n");
  5212. }
  5213. #endif
  5214. }
  5215. static void bcache_stats(void)
  5216. {
  5217. #if (DRC_DEBUG & 128)
  5218. int i;
  5219. #if CALL_STACK
  5220. for (i = 1; i < ARRAY_SIZE(sh2s->rts_cache); i++)
  5221. if (sh2s[0].rts_cache[i].pc == -1 && sh2s[1].rts_cache[i].pc == -1) break;
  5222. printf("return cache hits:%d misses:%d depth: %d index: %d/%d\n", rchit, rcmiss, i,sh2s[0].rts_cache_idx,sh2s[1].rts_cache_idx);
  5223. for (i = 0; i < ARRAY_SIZE(sh2s[0].rts_cache); i++) {
  5224. printf("%08x ",sh2s[0].rts_cache[i].pc);
  5225. if ((i+1) % 8 == 0) printf("\n");
  5226. }
  5227. for (i = 0; i < ARRAY_SIZE(sh2s[1].rts_cache); i++) {
  5228. printf("%08x ",sh2s[1].rts_cache[i].pc);
  5229. if ((i+1) % 8 == 0) printf("\n");
  5230. }
  5231. #endif
  5232. #if BRANCH_CACHE
  5233. printf("branch cache hits:%d misses:%d\n", bchit, bcmiss);
  5234. printf("branch cache master:\n");
  5235. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  5236. printf("%08x ",sh2s[0].branch_cache[i].pc);
  5237. if ((i+1) % 8 == 0) printf("\n");
  5238. }
  5239. printf("branch cache slave:\n");
  5240. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  5241. printf("%08x ",sh2s[1].branch_cache[i].pc);
  5242. if ((i+1) % 8 == 0) printf("\n");
  5243. }
  5244. #endif
  5245. #endif
  5246. }
  5247. void sh2_drc_flush_all(void)
  5248. {
  5249. backtrace();
  5250. state_dump();
  5251. block_stats();
  5252. entry_stats();
  5253. bcache_stats();
  5254. dr_flush_tcache(0);
  5255. dr_flush_tcache(1);
  5256. dr_flush_tcache(2);
  5257. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  5258. }
  5259. void sh2_drc_mem_setup(SH2 *sh2)
  5260. {
  5261. // fill the DRC-only convenience pointers
  5262. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  5263. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  5264. }
  5265. int sh2_drc_init(SH2 *sh2)
  5266. {
  5267. int i;
  5268. if (block_tables[0] == NULL)
  5269. {
  5270. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5271. block_tables[i] = calloc(BLOCK_MAX_COUNT(i), sizeof(*block_tables[0]));
  5272. if (block_tables[i] == NULL)
  5273. goto fail;
  5274. entry_tables[i] = calloc(ENTRY_MAX_COUNT(i), sizeof(*entry_tables[0]));
  5275. if (entry_tables[i] == NULL)
  5276. goto fail;
  5277. block_link_pool[i] = calloc(BLOCK_LINK_MAX_COUNT(i),
  5278. sizeof(*block_link_pool[0]));
  5279. if (block_link_pool[i] == NULL)
  5280. goto fail;
  5281. inval_lookup[i] = calloc(RAM_SIZE(i) / INVAL_PAGE_SIZE,
  5282. sizeof(inval_lookup[0]));
  5283. if (inval_lookup[i] == NULL)
  5284. goto fail;
  5285. hash_tables[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*hash_tables[0]));
  5286. if (hash_tables[i] == NULL)
  5287. goto fail;
  5288. unresolved_links[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*unresolved_links[0]));
  5289. if (unresolved_links[i] == NULL)
  5290. goto fail;
  5291. //atexit(sh2_drc_finish);
  5292. RING_INIT(&block_ring[i], block_tables[i], BLOCK_MAX_COUNT(i));
  5293. RING_INIT(&entry_ring[i], entry_tables[i], ENTRY_MAX_COUNT(i));
  5294. }
  5295. block_list_pool = calloc(BLOCK_LIST_MAX_COUNT, sizeof(*block_list_pool));
  5296. if (block_list_pool == NULL)
  5297. goto fail;
  5298. block_list_pool_count = 0;
  5299. blist_free = NULL;
  5300. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  5301. memset(blink_free, 0, sizeof(blink_free));
  5302. drc_cmn_init();
  5303. rcache_init();
  5304. tcache_ptr = tcache;
  5305. sh2_generate_utils();
  5306. host_instructions_updated(tcache, tcache_ptr, 1);
  5307. emith_update_cache();
  5308. i = tcache_ptr - tcache;
  5309. RING_INIT(&tcache_ring[0], tcache_ptr, tcache_sizes[0] - i);
  5310. for (i = 1; i < ARRAY_SIZE(tcache_ring); i++) {
  5311. RING_INIT(&tcache_ring[i], tcache_ring[i-1].base + tcache_ring[i-1].size,
  5312. tcache_sizes[i]);
  5313. }
  5314. #if (DRC_DEBUG & 4)
  5315. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  5316. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5317. // disasm the utils
  5318. tcache_dsm_ptrs[0] = tcache;
  5319. do_host_disasm(0);
  5320. fflush(stdout);
  5321. #endif
  5322. #if (DRC_DEBUG & 1)
  5323. hash_collisions = 0;
  5324. #endif
  5325. }
  5326. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  5327. memset(sh2->rts_cache, -1, sizeof(sh2->rts_cache));
  5328. sh2->rts_cache_idx = 0;
  5329. return 0;
  5330. fail:
  5331. sh2_drc_finish(sh2);
  5332. return -1;
  5333. }
  5334. void sh2_drc_finish(SH2 *sh2)
  5335. {
  5336. int i;
  5337. if (block_tables[0] == NULL)
  5338. return;
  5339. #if (DRC_DEBUG & (256|512))
  5340. if (trace[0]) fclose(trace[0]);
  5341. if (trace[1]) fclose(trace[1]);
  5342. trace[0] = trace[1] = NULL;
  5343. #endif
  5344. #if (DRC_DEBUG & 4)
  5345. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5346. printf("~~~ tcache %d\n", i);
  5347. #if 0
  5348. if (tcache_ring[i].first < tcache_ring[i].next) {
  5349. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5350. tcache_ptr = tcache_ring[i].next;
  5351. do_host_disasm(i);
  5352. } else if (tcache_ring[i].used) {
  5353. tcache_dsm_ptrs[i] = tcache_ring[i].first;
  5354. tcache_ptr = tcache_ring[i].base + tcache_ring[i].size;
  5355. do_host_disasm(i);
  5356. tcache_dsm_ptrs[i] = tcache_ring[i].base;
  5357. tcache_ptr = tcache_ring[i].next;
  5358. do_host_disasm(i);
  5359. }
  5360. #endif
  5361. printf("max links: %d\n", block_link_pool_counts[i]);
  5362. }
  5363. printf("max block list: %d\n", block_list_pool_count);
  5364. #endif
  5365. sh2_drc_flush_all();
  5366. for (i = 0; i < TCACHE_BUFFERS; i++) {
  5367. if (block_tables[i] != NULL)
  5368. free(block_tables[i]);
  5369. block_tables[i] = NULL;
  5370. if (entry_tables[i] != NULL)
  5371. free(entry_tables[i]);
  5372. entry_tables[i] = NULL;
  5373. if (block_link_pool[i] != NULL)
  5374. free(block_link_pool[i]);
  5375. block_link_pool[i] = NULL;
  5376. blink_free[i] = NULL;
  5377. if (inval_lookup[i] != NULL)
  5378. free(inval_lookup[i]);
  5379. inval_lookup[i] = NULL;
  5380. if (hash_tables[i] != NULL) {
  5381. free(hash_tables[i]);
  5382. hash_tables[i] = NULL;
  5383. }
  5384. }
  5385. if (block_list_pool != NULL)
  5386. free(block_list_pool);
  5387. block_list_pool = NULL;
  5388. blist_free = NULL;
  5389. drc_cmn_cleanup();
  5390. }
  5391. #endif /* DRC_SH2 */
  5392. static void *dr_get_pc_base(u32 pc, SH2 *sh2)
  5393. {
  5394. void *ret;
  5395. u32 mask = 0;
  5396. ret = p32x_sh2_get_mem_ptr(pc, &mask, sh2);
  5397. if (ret == (void *)-1)
  5398. return ret;
  5399. return (char *)ret - (pc & ~mask);
  5400. }
  5401. u16 scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  5402. u32 *base_literals_out, u32 *end_literals_out)
  5403. {
  5404. u16 *dr_pc_base;
  5405. u32 pc, op, tmp;
  5406. u32 end_pc, end_literals = 0;
  5407. u32 lowest_literal = 0;
  5408. u32 lowest_mova = 0;
  5409. struct op_data *opd;
  5410. int next_is_delay = 0;
  5411. int end_block = 0;
  5412. int is_divop;
  5413. int i, i_end, i_div = -1;
  5414. u32 crc = 0;
  5415. // 2nd pass stuff
  5416. int last_btarget; // loop detector
  5417. enum { T_UNKNOWN, T_CLEAR, T_SET } t; // T propagation state
  5418. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  5419. op_flags[0] |= OF_BTARGET; // block start is always a target
  5420. dr_pc_base = dr_get_pc_base(base_pc, &sh2s[!!is_slave]);
  5421. // 1st pass: disassemble
  5422. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  5423. // we need an ops[] entry after the last one initialized,
  5424. // so do it before end_block checks
  5425. opd = &ops[i];
  5426. opd->op = OP_UNHANDLED;
  5427. opd->rm = -1;
  5428. opd->source = opd->dest = 0;
  5429. opd->cycles = 1;
  5430. opd->imm = 0;
  5431. if (next_is_delay) {
  5432. op_flags[i] |= OF_DELAY_OP;
  5433. next_is_delay = 0;
  5434. }
  5435. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  5436. break;
  5437. else if ((lowest_mova && lowest_mova <= pc) ||
  5438. (lowest_literal && lowest_literal <= pc))
  5439. break; // text area collides with data area
  5440. is_divop = 0;
  5441. op = FETCH_OP(pc);
  5442. switch ((op & 0xf000) >> 12)
  5443. {
  5444. /////////////////////////////////////////////
  5445. case 0x00:
  5446. switch (op & 0x0f)
  5447. {
  5448. case 0x02:
  5449. switch (GET_Fx())
  5450. {
  5451. case 0: // STC SR,Rn 0000nnnn00000010
  5452. tmp = BITMASK2(SHR_SR, SHR_T);
  5453. break;
  5454. case 1: // STC GBR,Rn 0000nnnn00010010
  5455. tmp = BITMASK1(SHR_GBR);
  5456. break;
  5457. case 2: // STC VBR,Rn 0000nnnn00100010
  5458. tmp = BITMASK1(SHR_VBR);
  5459. break;
  5460. default:
  5461. goto undefined;
  5462. }
  5463. opd->op = OP_MOVE;
  5464. opd->source = tmp;
  5465. opd->dest = BITMASK1(GET_Rn());
  5466. break;
  5467. case 0x03:
  5468. CHECK_UNHANDLED_BITS(0xd0, undefined);
  5469. // BRAF Rm 0000mmmm00100011
  5470. // BSRF Rm 0000mmmm00000011
  5471. opd->op = OP_BRANCH_RF;
  5472. opd->rm = GET_Rn();
  5473. opd->source = BITMASK2(SHR_PC, opd->rm);
  5474. opd->dest = BITMASK1(SHR_PC);
  5475. if (!(op & 0x20))
  5476. opd->dest |= BITMASK1(SHR_PR);
  5477. opd->cycles = 2;
  5478. next_is_delay = 1;
  5479. if (!(opd->dest & BITMASK1(SHR_PR)))
  5480. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5481. else
  5482. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5483. break;
  5484. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  5485. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  5486. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  5487. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  5488. opd->dest = BITMASK1(SHR_MEM);
  5489. break;
  5490. case 0x07:
  5491. // MUL.L Rm,Rn 0000nnnnmmmm0111
  5492. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5493. opd->dest = BITMASK1(SHR_MACL);
  5494. opd->cycles = 2;
  5495. break;
  5496. case 0x08:
  5497. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5498. switch (GET_Fx())
  5499. {
  5500. case 0: // CLRT 0000000000001000
  5501. opd->op = OP_SETCLRT;
  5502. opd->dest = BITMASK1(SHR_T);
  5503. opd->imm = 0;
  5504. break;
  5505. case 1: // SETT 0000000000011000
  5506. opd->op = OP_SETCLRT;
  5507. opd->dest = BITMASK1(SHR_T);
  5508. opd->imm = 1;
  5509. break;
  5510. case 2: // CLRMAC 0000000000101000
  5511. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5512. break;
  5513. default:
  5514. goto undefined;
  5515. }
  5516. break;
  5517. case 0x09:
  5518. switch (GET_Fx())
  5519. {
  5520. case 0: // NOP 0000000000001001
  5521. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5522. break;
  5523. case 1: // DIV0U 0000000000011001
  5524. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5525. opd->op = OP_DIV0;
  5526. opd->source = BITMASK1(SHR_SR);
  5527. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5528. div(opd) = (struct div){ .rn=SHR_MEM, .rm=SHR_MEM, .ro=SHR_MEM };
  5529. i_div = i;
  5530. is_divop = 1;
  5531. break;
  5532. case 2: // MOVT Rn 0000nnnn00101001
  5533. opd->source = BITMASK1(SHR_T);
  5534. opd->dest = BITMASK1(GET_Rn());
  5535. break;
  5536. default:
  5537. goto undefined;
  5538. }
  5539. break;
  5540. case 0x0a:
  5541. switch (GET_Fx())
  5542. {
  5543. case 0: // STS MACH,Rn 0000nnnn00001010
  5544. tmp = SHR_MACH;
  5545. break;
  5546. case 1: // STS MACL,Rn 0000nnnn00011010
  5547. tmp = SHR_MACL;
  5548. break;
  5549. case 2: // STS PR,Rn 0000nnnn00101010
  5550. tmp = SHR_PR;
  5551. break;
  5552. default:
  5553. goto undefined;
  5554. }
  5555. opd->op = OP_MOVE;
  5556. opd->source = BITMASK1(tmp);
  5557. opd->dest = BITMASK1(GET_Rn());
  5558. break;
  5559. case 0x0b:
  5560. CHECK_UNHANDLED_BITS(0xf00, undefined);
  5561. switch (GET_Fx())
  5562. {
  5563. case 0: // RTS 0000000000001011
  5564. opd->op = OP_BRANCH_R;
  5565. opd->rm = SHR_PR;
  5566. opd->source = BITMASK1(opd->rm);
  5567. opd->dest = BITMASK1(SHR_PC);
  5568. opd->cycles = 2;
  5569. next_is_delay = 1;
  5570. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5571. break;
  5572. case 1: // SLEEP 0000000000011011
  5573. opd->op = OP_SLEEP;
  5574. end_block = 1;
  5575. break;
  5576. case 2: // RTE 0000000000101011
  5577. opd->op = OP_RTE;
  5578. opd->source = BITMASK1(SHR_SP);
  5579. opd->dest = BITMASK4(SHR_SP, SHR_SR, SHR_T, SHR_PC);
  5580. opd->cycles = 4;
  5581. next_is_delay = 1;
  5582. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5583. break;
  5584. default:
  5585. goto undefined;
  5586. }
  5587. break;
  5588. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  5589. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  5590. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  5591. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  5592. opd->dest = BITMASK1(GET_Rn());
  5593. op_flags[i] |= OF_POLL_INSN;
  5594. break;
  5595. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  5596. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5597. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5598. opd->cycles = 3;
  5599. break;
  5600. default:
  5601. goto undefined;
  5602. }
  5603. break;
  5604. /////////////////////////////////////////////
  5605. case 0x01:
  5606. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  5607. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5608. opd->dest = BITMASK1(SHR_MEM);
  5609. opd->imm = (op & 0x0f) * 4;
  5610. break;
  5611. /////////////////////////////////////////////
  5612. case 0x02:
  5613. switch (op & 0x0f)
  5614. {
  5615. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  5616. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  5617. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  5618. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5619. opd->dest = BITMASK1(SHR_MEM);
  5620. break;
  5621. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  5622. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  5623. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  5624. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5625. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5626. break;
  5627. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  5628. opd->op = OP_DIV0;
  5629. opd->source = BITMASK3(SHR_SR, GET_Rm(), GET_Rn());
  5630. opd->dest = BITMASK2(SHR_SR, SHR_T);
  5631. div(opd) = (struct div){ .rn=GET_Rn(), .rm=GET_Rm(), .ro=SHR_MEM };
  5632. i_div = i;
  5633. is_divop = 1;
  5634. break;
  5635. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  5636. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5637. opd->dest = BITMASK1(SHR_T);
  5638. break;
  5639. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  5640. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  5641. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  5642. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5643. opd->dest = BITMASK1(GET_Rn());
  5644. break;
  5645. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  5646. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5647. opd->dest = BITMASK1(SHR_T);
  5648. break;
  5649. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  5650. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5651. opd->dest = BITMASK1(GET_Rn());
  5652. break;
  5653. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  5654. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  5655. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5656. opd->dest = BITMASK1(SHR_MACL);
  5657. break;
  5658. default:
  5659. goto undefined;
  5660. }
  5661. break;
  5662. /////////////////////////////////////////////
  5663. case 0x03:
  5664. switch (op & 0x0f)
  5665. {
  5666. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  5667. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  5668. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  5669. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  5670. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  5671. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5672. opd->dest = BITMASK1(SHR_T);
  5673. break;
  5674. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  5675. opd->source = BITMASK4(GET_Rm(), GET_Rn(), SHR_SR, SHR_T);
  5676. opd->dest = BITMASK3(GET_Rn(), SHR_SR, SHR_T);
  5677. if (i_div >= 0) {
  5678. // divide operation: all DIV1 operations must use the same reg pair
  5679. if (div(&ops[i_div]).rn == SHR_MEM)
  5680. div(&ops[i_div]).rn=GET_Rn(), div(&ops[i_div]).rm=GET_Rm();
  5681. if (div(&ops[i_div]).rn == GET_Rn() && div(&ops[i_div]).rm == GET_Rm()) {
  5682. div(&ops[i_div]).div1 += 1;
  5683. div(&ops[i_div]).state = 0;
  5684. is_divop = 1;
  5685. } else {
  5686. ops[i_div].imm = 0;
  5687. i_div = -1;
  5688. }
  5689. }
  5690. break;
  5691. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  5692. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  5693. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5694. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5695. opd->cycles = 2;
  5696. break;
  5697. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  5698. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  5699. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5700. opd->dest = BITMASK1(GET_Rn());
  5701. break;
  5702. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  5703. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  5704. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  5705. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5706. break;
  5707. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  5708. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  5709. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5710. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5711. break;
  5712. default:
  5713. goto undefined;
  5714. }
  5715. break;
  5716. /////////////////////////////////////////////
  5717. case 0x04:
  5718. switch (op & 0x0f)
  5719. {
  5720. case 0x00:
  5721. switch (GET_Fx())
  5722. {
  5723. case 0: // SHLL Rn 0100nnnn00000000
  5724. case 2: // SHAL Rn 0100nnnn00100000
  5725. opd->source = BITMASK1(GET_Rn());
  5726. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5727. break;
  5728. case 1: // DT Rn 0100nnnn00010000
  5729. opd->source = BITMASK1(GET_Rn());
  5730. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5731. op_flags[i] |= OF_DELAY_INSN;
  5732. break;
  5733. default:
  5734. goto undefined;
  5735. }
  5736. break;
  5737. case 0x01:
  5738. switch (GET_Fx())
  5739. {
  5740. case 0: // SHLR Rn 0100nnnn00000001
  5741. case 2: // SHAR Rn 0100nnnn00100001
  5742. opd->source = BITMASK1(GET_Rn());
  5743. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5744. break;
  5745. case 1: // CMP/PZ Rn 0100nnnn00010001
  5746. opd->source = BITMASK1(GET_Rn());
  5747. opd->dest = BITMASK1(SHR_T);
  5748. break;
  5749. default:
  5750. goto undefined;
  5751. }
  5752. break;
  5753. case 0x02:
  5754. case 0x03:
  5755. switch (op & 0x3f)
  5756. {
  5757. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  5758. tmp = BITMASK1(SHR_MACH);
  5759. break;
  5760. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  5761. tmp = BITMASK1(SHR_MACL);
  5762. break;
  5763. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  5764. tmp = BITMASK1(SHR_PR);
  5765. break;
  5766. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  5767. tmp = BITMASK2(SHR_SR, SHR_T);
  5768. opd->cycles = 2;
  5769. break;
  5770. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  5771. tmp = BITMASK1(SHR_GBR);
  5772. opd->cycles = 2;
  5773. break;
  5774. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  5775. tmp = BITMASK1(SHR_VBR);
  5776. opd->cycles = 2;
  5777. break;
  5778. default:
  5779. goto undefined;
  5780. }
  5781. opd->source = BITMASK1(GET_Rn()) | tmp;
  5782. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5783. break;
  5784. case 0x04:
  5785. case 0x05:
  5786. switch (op & 0x3f)
  5787. {
  5788. case 0x04: // ROTL Rn 0100nnnn00000100
  5789. case 0x05: // ROTR Rn 0100nnnn00000101
  5790. opd->source = BITMASK1(GET_Rn());
  5791. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5792. break;
  5793. case 0x24: // ROTCL Rn 0100nnnn00100100
  5794. if (i_div >= 0) {
  5795. // divide operation: all ROTCL operations must use the same register
  5796. if (div(&ops[i_div]).ro == SHR_MEM)
  5797. div(&ops[i_div]).ro = GET_Rn();
  5798. if (div(&ops[i_div]).ro == GET_Rn() && !div(&ops[i_div]).state) {
  5799. div(&ops[i_div]).rotcl += 1;
  5800. div(&ops[i_div]).state = 1;
  5801. is_divop = 1;
  5802. } else {
  5803. ops[i_div].imm = 0;
  5804. i_div = -1;
  5805. }
  5806. }
  5807. case 0x25: // ROTCR Rn 0100nnnn00100101
  5808. opd->source = BITMASK2(GET_Rn(), SHR_T);
  5809. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5810. break;
  5811. case 0x15: // CMP/PL Rn 0100nnnn00010101
  5812. opd->source = BITMASK1(GET_Rn());
  5813. opd->dest = BITMASK1(SHR_T);
  5814. break;
  5815. default:
  5816. goto undefined;
  5817. }
  5818. break;
  5819. case 0x06:
  5820. case 0x07:
  5821. switch (op & 0x3f)
  5822. {
  5823. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  5824. tmp = BITMASK1(SHR_MACH);
  5825. break;
  5826. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  5827. tmp = BITMASK1(SHR_MACL);
  5828. break;
  5829. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  5830. tmp = BITMASK1(SHR_PR);
  5831. break;
  5832. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  5833. tmp = BITMASK2(SHR_SR, SHR_T);
  5834. opd->op = OP_LDC;
  5835. opd->cycles = 3;
  5836. break;
  5837. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  5838. tmp = BITMASK1(SHR_GBR);
  5839. opd->op = OP_LDC;
  5840. opd->cycles = 3;
  5841. break;
  5842. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  5843. tmp = BITMASK1(SHR_VBR);
  5844. opd->op = OP_LDC;
  5845. opd->cycles = 3;
  5846. break;
  5847. default:
  5848. goto undefined;
  5849. }
  5850. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5851. opd->dest = BITMASK1(GET_Rn()) | tmp;
  5852. break;
  5853. case 0x08:
  5854. case 0x09:
  5855. switch (GET_Fx())
  5856. {
  5857. case 0:
  5858. // SHLL2 Rn 0100nnnn00001000
  5859. // SHLR2 Rn 0100nnnn00001001
  5860. break;
  5861. case 1:
  5862. // SHLL8 Rn 0100nnnn00011000
  5863. // SHLR8 Rn 0100nnnn00011001
  5864. break;
  5865. case 2:
  5866. // SHLL16 Rn 0100nnnn00101000
  5867. // SHLR16 Rn 0100nnnn00101001
  5868. break;
  5869. default:
  5870. goto undefined;
  5871. }
  5872. opd->source = BITMASK1(GET_Rn());
  5873. opd->dest = BITMASK1(GET_Rn());
  5874. break;
  5875. case 0x0a:
  5876. switch (GET_Fx())
  5877. {
  5878. case 0: // LDS Rm,MACH 0100mmmm00001010
  5879. tmp = SHR_MACH;
  5880. break;
  5881. case 1: // LDS Rm,MACL 0100mmmm00011010
  5882. tmp = SHR_MACL;
  5883. break;
  5884. case 2: // LDS Rm,PR 0100mmmm00101010
  5885. tmp = SHR_PR;
  5886. break;
  5887. default:
  5888. goto undefined;
  5889. }
  5890. opd->op = OP_MOVE;
  5891. opd->source = BITMASK1(GET_Rn());
  5892. opd->dest = BITMASK1(tmp);
  5893. break;
  5894. case 0x0b:
  5895. switch (GET_Fx())
  5896. {
  5897. case 0: // JSR @Rm 0100mmmm00001011
  5898. opd->dest = BITMASK1(SHR_PR);
  5899. case 2: // JMP @Rm 0100mmmm00101011
  5900. opd->op = OP_BRANCH_R;
  5901. opd->rm = GET_Rn();
  5902. opd->source = BITMASK1(opd->rm);
  5903. opd->dest |= BITMASK1(SHR_PC);
  5904. opd->cycles = 2;
  5905. next_is_delay = 1;
  5906. if (!(opd->dest & BITMASK1(SHR_PR)))
  5907. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5908. else
  5909. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5910. break;
  5911. case 1: // TAS.B @Rn 0100nnnn00011011
  5912. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5913. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  5914. opd->cycles = 4;
  5915. break;
  5916. default:
  5917. goto undefined;
  5918. }
  5919. break;
  5920. case 0x0e:
  5921. switch (GET_Fx())
  5922. {
  5923. case 0: // LDC Rm,SR 0100mmmm00001110
  5924. tmp = BITMASK2(SHR_SR, SHR_T);
  5925. break;
  5926. case 1: // LDC Rm,GBR 0100mmmm00011110
  5927. tmp = BITMASK1(SHR_GBR);
  5928. break;
  5929. case 2: // LDC Rm,VBR 0100mmmm00101110
  5930. tmp = BITMASK1(SHR_VBR);
  5931. break;
  5932. default:
  5933. goto undefined;
  5934. }
  5935. opd->op = OP_LDC;
  5936. opd->source = BITMASK1(GET_Rn());
  5937. opd->dest = tmp;
  5938. break;
  5939. case 0x0f:
  5940. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  5941. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5942. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5943. opd->cycles = 3;
  5944. break;
  5945. default:
  5946. goto undefined;
  5947. }
  5948. break;
  5949. /////////////////////////////////////////////
  5950. case 0x05:
  5951. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  5952. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5953. opd->dest = BITMASK1(GET_Rn());
  5954. opd->imm = (op & 0x0f) * 4;
  5955. op_flags[i] |= OF_POLL_INSN;
  5956. break;
  5957. /////////////////////////////////////////////
  5958. case 0x06:
  5959. switch (op & 0x0f)
  5960. {
  5961. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  5962. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  5963. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  5964. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  5965. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5966. break;
  5967. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  5968. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  5969. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  5970. opd->dest = BITMASK1(GET_Rn());
  5971. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5972. op_flags[i] |= OF_POLL_INSN;
  5973. break;
  5974. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  5975. opd->source = BITMASK2(GET_Rm(), SHR_T);
  5976. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5977. break;
  5978. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  5979. opd->op = OP_MOVE;
  5980. goto arith_rmrn;
  5981. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  5982. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  5983. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  5984. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  5985. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  5986. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  5987. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  5988. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  5989. arith_rmrn:
  5990. opd->source = BITMASK1(GET_Rm());
  5991. opd->dest = BITMASK1(GET_Rn());
  5992. break;
  5993. }
  5994. break;
  5995. /////////////////////////////////////////////
  5996. case 0x07:
  5997. // ADD #imm,Rn 0111nnnniiiiiiii
  5998. opd->source = opd->dest = BITMASK1(GET_Rn());
  5999. opd->imm = (s8)op;
  6000. break;
  6001. /////////////////////////////////////////////
  6002. case 0x08:
  6003. switch (op & 0x0f00)
  6004. {
  6005. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  6006. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  6007. opd->dest = BITMASK1(SHR_MEM);
  6008. opd->imm = (op & 0x0f);
  6009. break;
  6010. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  6011. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  6012. opd->dest = BITMASK1(SHR_MEM);
  6013. opd->imm = (op & 0x0f) * 2;
  6014. break;
  6015. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  6016. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6017. opd->dest = BITMASK1(SHR_R0);
  6018. opd->imm = (op & 0x0f);
  6019. op_flags[i] |= OF_POLL_INSN;
  6020. break;
  6021. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  6022. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  6023. opd->dest = BITMASK1(SHR_R0);
  6024. opd->imm = (op & 0x0f) * 2;
  6025. op_flags[i] |= OF_POLL_INSN;
  6026. break;
  6027. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  6028. opd->source = BITMASK1(SHR_R0);
  6029. opd->dest = BITMASK1(SHR_T);
  6030. opd->imm = (s8)op;
  6031. break;
  6032. case 0x0d00: // BT/S label 10001101dddddddd
  6033. case 0x0f00: // BF/S label 10001111dddddddd
  6034. next_is_delay = 1;
  6035. // fallthrough
  6036. case 0x0900: // BT label 10001001dddddddd
  6037. case 0x0b00: // BF label 10001011dddddddd
  6038. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  6039. opd->source = BITMASK2(SHR_PC, SHR_T);
  6040. opd->dest = BITMASK1(SHR_PC);
  6041. opd->imm = ((signed int)(op << 24) >> 23);
  6042. opd->imm += pc + 4;
  6043. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  6044. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6045. break;
  6046. default:
  6047. goto undefined;
  6048. }
  6049. break;
  6050. /////////////////////////////////////////////
  6051. case 0x09:
  6052. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  6053. opd->op = OP_LOAD_POOL;
  6054. tmp = pc + 2;
  6055. if (op_flags[i] & OF_DELAY_OP) {
  6056. if (ops[i-1].op == OP_BRANCH)
  6057. tmp = ops[i-1].imm;
  6058. else if (ops[i-1].op != OP_BRANCH_N)
  6059. tmp = 0;
  6060. }
  6061. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6062. opd->dest = BITMASK1(GET_Rn());
  6063. if (tmp) {
  6064. opd->imm = tmp + 2 + (op & 0xff) * 2;
  6065. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6066. lowest_literal = opd->imm;
  6067. }
  6068. opd->size = 1;
  6069. break;
  6070. /////////////////////////////////////////////
  6071. case 0x0b:
  6072. // BSR label 1011dddddddddddd
  6073. opd->dest = BITMASK1(SHR_PR);
  6074. case 0x0a:
  6075. // BRA label 1010dddddddddddd
  6076. opd->op = OP_BRANCH;
  6077. opd->source = BITMASK1(SHR_PC);
  6078. opd->dest |= BITMASK1(SHR_PC);
  6079. opd->imm = ((signed int)(op << 20) >> 19);
  6080. opd->imm += pc + 4;
  6081. opd->cycles = 2;
  6082. next_is_delay = 1;
  6083. if (!(opd->dest & BITMASK1(SHR_PR))) {
  6084. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  6085. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  6086. if (opd->imm <= pc)
  6087. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6088. } else
  6089. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  6090. } else
  6091. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  6092. break;
  6093. /////////////////////////////////////////////
  6094. case 0x0c:
  6095. switch (op & 0x0f00)
  6096. {
  6097. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  6098. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  6099. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  6100. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  6101. opd->dest = BITMASK1(SHR_MEM);
  6102. opd->size = (op & 0x300) >> 8;
  6103. opd->imm = (op & 0xff) << opd->size;
  6104. break;
  6105. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  6106. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  6107. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  6108. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  6109. opd->dest = BITMASK1(SHR_R0);
  6110. opd->size = (op & 0x300) >> 8;
  6111. opd->imm = (op & 0xff) << opd->size;
  6112. op_flags[i] |= OF_POLL_INSN;
  6113. break;
  6114. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  6115. opd->op = OP_TRAPA;
  6116. opd->source = BITMASK4(SHR_SP, SHR_PC, SHR_SR, SHR_T);
  6117. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  6118. opd->imm = (op & 0xff);
  6119. opd->cycles = 8;
  6120. op_flags[i+1] |= OF_BTARGET;
  6121. break;
  6122. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  6123. opd->op = OP_MOVA;
  6124. tmp = pc + 2;
  6125. if (op_flags[i] & OF_DELAY_OP) {
  6126. if (ops[i-1].op == OP_BRANCH)
  6127. tmp = ops[i-1].imm;
  6128. else if (ops[i-1].op != OP_BRANCH_N)
  6129. tmp = 0;
  6130. }
  6131. opd->dest = BITMASK1(SHR_R0);
  6132. if (tmp) {
  6133. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6134. if (opd->imm >= base_pc) {
  6135. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6136. lowest_mova = opd->imm;
  6137. }
  6138. }
  6139. break;
  6140. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  6141. opd->source = BITMASK1(SHR_R0);
  6142. opd->dest = BITMASK1(SHR_T);
  6143. opd->imm = op & 0xff;
  6144. break;
  6145. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  6146. opd->source = opd->dest = BITMASK1(SHR_R0);
  6147. opd->imm = op & 0xff;
  6148. break;
  6149. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  6150. opd->source = opd->dest = BITMASK1(SHR_R0);
  6151. opd->imm = op & 0xff;
  6152. break;
  6153. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  6154. opd->source = opd->dest = BITMASK1(SHR_R0);
  6155. opd->imm = op & 0xff;
  6156. break;
  6157. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  6158. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6159. opd->dest = BITMASK1(SHR_T);
  6160. opd->imm = op & 0xff;
  6161. op_flags[i] |= OF_POLL_INSN;
  6162. opd->cycles = 3;
  6163. break;
  6164. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  6165. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  6166. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  6167. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  6168. opd->dest = BITMASK1(SHR_MEM);
  6169. opd->imm = op & 0xff;
  6170. opd->cycles = 3;
  6171. break;
  6172. default:
  6173. goto undefined;
  6174. }
  6175. break;
  6176. /////////////////////////////////////////////
  6177. case 0x0d:
  6178. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  6179. opd->op = OP_LOAD_POOL;
  6180. tmp = pc + 2;
  6181. if (op_flags[i] & OF_DELAY_OP) {
  6182. if (ops[i-1].op == OP_BRANCH)
  6183. tmp = ops[i-1].imm;
  6184. else if (ops[i-1].op != OP_BRANCH_N)
  6185. tmp = 0;
  6186. }
  6187. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  6188. opd->dest = BITMASK1(GET_Rn());
  6189. if (tmp) {
  6190. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  6191. if (lowest_literal == 0 || opd->imm < lowest_literal)
  6192. lowest_literal = opd->imm;
  6193. }
  6194. opd->size = 2;
  6195. break;
  6196. /////////////////////////////////////////////
  6197. case 0x0e:
  6198. // MOV #imm,Rn 1110nnnniiiiiiii
  6199. opd->op = OP_LOAD_CONST;
  6200. opd->dest = BITMASK1(GET_Rn());
  6201. opd->imm = (s8)op;
  6202. break;
  6203. default:
  6204. undefined:
  6205. opd->op = OP_UNDEFINED;
  6206. // an unhandled instruction is probably not code if it's not the 1st insn
  6207. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  6208. goto end;
  6209. break;
  6210. }
  6211. if (op_flags[i] & OF_DELAY_OP) {
  6212. switch (opd->op) {
  6213. case OP_BRANCH:
  6214. case OP_BRANCH_N:
  6215. case OP_BRANCH_CT:
  6216. case OP_BRANCH_CF:
  6217. case OP_BRANCH_R:
  6218. case OP_BRANCH_RF:
  6219. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  6220. is_slave ? 's' : 'm', pc);
  6221. opd->op = OP_UNDEFINED;
  6222. op_flags[i] |= OF_B_IN_DS;
  6223. next_is_delay = 0;
  6224. break;
  6225. }
  6226. } else if (!is_divop && i_div >= 0)
  6227. i_div = -1; // divide parser stop
  6228. }
  6229. end:
  6230. i_end = i;
  6231. end_pc = pc;
  6232. // 2nd pass: some analysis
  6233. lowest_literal = end_literals = lowest_mova = 0;
  6234. t = T_UNKNOWN; // T flag state
  6235. last_btarget = 0;
  6236. op = 0; // delay/poll insns counter
  6237. is_divop = 0; // divide op insns counter
  6238. i_div = -1; // index of current divide op
  6239. for (i = 0, pc = base_pc; i < i_end; i++, pc += 2) {
  6240. opd = &ops[i];
  6241. crc += FETCH_OP(pc);
  6242. // propagate T (TODO: DIV0U)
  6243. if (op_flags[i] & OF_BTARGET)
  6244. t = T_UNKNOWN;
  6245. if ((opd->op == OP_BRANCH_CT && t == T_SET) ||
  6246. (opd->op == OP_BRANCH_CF && t == T_CLEAR)) {
  6247. opd->op = OP_BRANCH;
  6248. opd->cycles = (op_flags[i + 1] & OF_DELAY_OP) ? 2 : 3;
  6249. } else if ((opd->op == OP_BRANCH_CT && t == T_CLEAR) ||
  6250. (opd->op == OP_BRANCH_CF && t == T_SET))
  6251. opd->op = OP_BRANCH_N;
  6252. else if (OP_ISBRACND(opd->op))
  6253. t = (opd->op == OP_BRANCH_CF ? T_SET : T_CLEAR);
  6254. else if (opd->op == OP_SETCLRT)
  6255. t = (opd->imm ? T_SET : T_CLEAR);
  6256. else if (opd->dest & BITMASK1(SHR_T))
  6257. t = T_UNKNOWN;
  6258. // "overscan" detection: unreachable code after unconditional branch
  6259. // this can happen if the insn after a forward branch isn't a local target
  6260. if (OP_ISBRAUC(opd->op)) {
  6261. if (op_flags[i + 1] & OF_DELAY_OP) {
  6262. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  6263. i_end = i + 2;
  6264. } else {
  6265. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  6266. i_end = i + 1;
  6267. }
  6268. }
  6269. // divide operation verification:
  6270. // 1. there must not be a branch target inside
  6271. // 2. nothing is in a delay slot (could only be DIV0)
  6272. // 2. DIV0/n*(ROTCL+DIV1)/ROTCL:
  6273. // div.div1 > 0 && div.rotcl == div.div1+1 && div.rn =! div.ro
  6274. // 3. DIV0/n*DIV1/ROTCL:
  6275. // div.div1 > 0 && div.rotcl == 1 && div.ro == div.rn
  6276. if (i_div >= 0) {
  6277. if (op_flags[i] & OF_BTARGET) { // condition 1
  6278. ops[i_div].imm = 0;
  6279. i_div = -1;
  6280. } else if (--is_divop == 0)
  6281. i_div = -1;
  6282. } else if (opd->op == OP_DIV0) {
  6283. struct div *div = &div(opd);
  6284. is_divop = div->div1 + div->rotcl;
  6285. if (op_flags[i] & OF_DELAY_OP) // condition 2
  6286. opd->imm = 0;
  6287. else if (! div->div1 || ! ((div->ro == div->rn && div->rotcl == 1) ||
  6288. (div->ro != div->rn && div->rotcl == div->div1+1)))
  6289. opd->imm = 0; // condition 3+4
  6290. else if (is_divop)
  6291. i_div = i;
  6292. }
  6293. // literal pool size detection
  6294. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  6295. if (lowest_mova == 0 || opd->imm < lowest_mova)
  6296. lowest_mova = opd->imm;
  6297. if (opd->op == OP_LOAD_POOL) {
  6298. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  6299. if (end_literals < opd->imm + opd->size * 2)
  6300. end_literals = opd->imm + opd->size * 2;
  6301. if (lowest_literal == 0 || lowest_literal > opd->imm)
  6302. lowest_literal = opd->imm;
  6303. if (opd->size == 2) {
  6304. // tweak for NFL: treat a 32bit literal as an address and check if it
  6305. // points to the literal space. In that case handle it like MOVA.
  6306. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  6307. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  6308. if (lowest_mova == 0 || tmp < lowest_mova)
  6309. lowest_mova = tmp;
  6310. }
  6311. }
  6312. }
  6313. #if LOOP_DETECTION
  6314. // inner loop detection
  6315. // 1. a loop always starts with a branch target (for the backwards jump)
  6316. // 2. it doesn't contain more than one polling and/or delaying insn
  6317. // 3. it doesn't contain unconditional jumps
  6318. // 4. no overlapping of loops
  6319. if (op_flags[i] & OF_BTARGET) {
  6320. last_btarget = i; // possible loop starting point
  6321. op = 0;
  6322. }
  6323. // XXX let's hope nobody is putting a delay or poll insn in a delay slot :-/
  6324. if (OP_ISBRAIMM(opd->op)) {
  6325. // BSR, BRA, BT, BF with immediate target
  6326. int i_tmp = (opd->imm - base_pc) / 2; // branch target, index in ops
  6327. if (i_tmp == last_btarget) // candidate for basic loop optimizer
  6328. op_flags[i_tmp] |= OF_BASIC_LOOP;
  6329. if (i_tmp == last_btarget && op <= 1) {
  6330. op_flags[i_tmp] |= OF_LOOP; // conditions met -> mark loop
  6331. last_btarget = i+1; // condition 4
  6332. } else if (opd->op == OP_BRANCH)
  6333. last_btarget = i+1; // condition 3
  6334. }
  6335. else if (OP_ISBRAIND(opd->op))
  6336. // BRAF, BSRF, JMP, JSR, register indirect. treat it as off-limits jump
  6337. last_btarget = i+1; // condition 3
  6338. else if (op_flags[i] & (OF_POLL_INSN|OF_DELAY_INSN))
  6339. op ++; // condition 2
  6340. #endif
  6341. }
  6342. end_pc = pc;
  6343. // end_literals is used to decide to inline a literal or not
  6344. // XXX: need better detection if this actually is used in write
  6345. if (lowest_literal >= base_pc) {
  6346. if (lowest_literal < end_pc) {
  6347. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  6348. // TODO: does this always mean end_pc covers data?
  6349. }
  6350. }
  6351. if (lowest_mova >= base_pc) {
  6352. if (lowest_mova < end_literals) {
  6353. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  6354. end_literals = lowest_mova;
  6355. }
  6356. if (lowest_mova < end_pc) {
  6357. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  6358. end_literals = end_pc;
  6359. }
  6360. }
  6361. if (lowest_literal >= end_literals)
  6362. lowest_literal = end_literals;
  6363. if (lowest_literal && end_literals)
  6364. for (pc = lowest_literal; pc < end_literals; pc += 2)
  6365. crc += FETCH_OP(pc);
  6366. *end_pc_out = end_pc;
  6367. if (base_literals_out != NULL)
  6368. *base_literals_out = (lowest_literal ? lowest_literal : end_pc);
  6369. if (end_literals_out != NULL)
  6370. *end_literals_out = (end_literals ? lowest_literal : end_pc);
  6371. // crc overflow handling, twice to collect all overflows
  6372. crc = (crc & 0xffff) + (crc >> 16);
  6373. crc = (crc & 0xffff) + (crc >> 16);
  6374. return crc;
  6375. }
  6376. // vim:shiftwidth=2:ts=2:expandtab