compiler.c 190 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. * (C) kub, 2018,2019
  5. *
  6. * This work is licensed under the terms of MAME license.
  7. * See COPYING file in the top-level directory.
  8. *
  9. * notes:
  10. * - tcache, block descriptor, link buffer overflows result in sh2_translate()
  11. * failure, followed by full tcache invalidation for that region
  12. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  13. * except jumps between different tcaches
  14. *
  15. * implemented:
  16. * - static register allocation
  17. * - remaining register caching and tracking in temporaries
  18. * - block-local branch linking
  19. * - block linking (except between tcaches)
  20. * - some constant propagation
  21. *
  22. * TODO:
  23. * - better constant propagation
  24. * - stack caching?
  25. * - bug fixing
  26. */
  27. #include <stddef.h>
  28. #include <stdio.h>
  29. #include <stdlib.h>
  30. #include <assert.h>
  31. #include "../../pico/pico_int.h"
  32. #include "../../pico/arm_features.h"
  33. #include "sh2.h"
  34. #include "compiler.h"
  35. #include "../drc/cmn.h"
  36. #include "../debug.h"
  37. // features
  38. #define PROPAGATE_CONSTANTS 1
  39. #define LINK_BRANCHES 1
  40. #define BRANCH_CACHE 1
  41. #define CALL_STACK 0
  42. #define ALIAS_REGISTERS 1
  43. #define REMAP_REGISTER 1
  44. #define LOOP_DETECTION 1
  45. // limits (per block)
  46. #define MAX_BLOCK_SIZE (BLOCK_INSN_LIMIT * 6 * 6)
  47. // max literal offset from the block end
  48. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  49. #define MAX_LITERALS (BLOCK_INSN_LIMIT / 4)
  50. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 4)
  51. // debug stuff
  52. // 01 - warnings/errors
  53. // 02 - block info/smc
  54. // 04 - asm
  55. // 08 - runtime block entry log
  56. // 10 - smc self-check
  57. // 20 - runtime block entry counter
  58. // 40 - rcache checking
  59. // 80 - branch cache statistics
  60. // 100 - write trace
  61. // 200 - compare trace
  62. // 400 - block entry backtrace on exit
  63. // 800 - state dump on exit
  64. // {
  65. #ifndef DRC_DEBUG
  66. #define DRC_DEBUG 0//x8e7
  67. #endif
  68. #if DRC_DEBUG
  69. #define dbg(l,...) { \
  70. if ((l) & DRC_DEBUG) \
  71. elprintf(EL_STATUS, ##__VA_ARGS__); \
  72. }
  73. #include "mame/sh2dasm.h"
  74. #include <platform/libpicofe/linux/host_dasm.h>
  75. static int insns_compiled, hash_collisions, host_insn_count;
  76. #define COUNT_OP \
  77. host_insn_count++
  78. #else // !DRC_DEBUG
  79. #define COUNT_OP
  80. #define dbg(...)
  81. #endif
  82. ///
  83. #define FETCH_OP(pc) \
  84. dr_pc_base[(pc) / 2]
  85. #define FETCH32(a) \
  86. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  87. #define CHECK_UNHANDLED_BITS(mask, label) { \
  88. if ((op & (mask)) != 0) \
  89. goto label; \
  90. }
  91. #define GET_Fx() \
  92. ((op >> 4) & 0x0f)
  93. #define GET_Rm GET_Fx
  94. #define GET_Rn() \
  95. ((op >> 8) & 0x0f)
  96. #define SHR_T SHR_SR // might make them separate someday
  97. #define SHR_MEM 31
  98. #define SHR_TMP -1
  99. #define T 0x00000001
  100. #define S 0x00000002
  101. #define I 0x000000f0
  102. #define Q 0x00000100
  103. #define M 0x00000200
  104. #define T_save 0x00000800
  105. #define I_SHIFT 4
  106. #define Q_SHIFT 8
  107. #define M_SHIFT 9
  108. static struct op_data {
  109. u8 op;
  110. u8 cycles;
  111. u8 size; // 0, 1, 2 - byte, word, long
  112. s8 rm; // branch or load/store data reg
  113. u32 source; // bitmask of src regs
  114. u32 dest; // bitmask of dest regs
  115. u32 imm; // immediate/io address/branch target
  116. // (for literal - address, not value)
  117. } ops[BLOCK_INSN_LIMIT];
  118. enum op_types {
  119. OP_UNHANDLED = 0,
  120. OP_BRANCH,
  121. OP_BRANCH_N, // conditional known not to be taken
  122. OP_BRANCH_CT, // conditional, branch if T set
  123. OP_BRANCH_CF, // conditional, branch if T clear
  124. OP_BRANCH_R, // indirect
  125. OP_BRANCH_RF, // indirect far (PC + Rm)
  126. OP_SETCLRT, // T flag set/clear
  127. OP_MOVE, // register move
  128. OP_LOAD_CONST,// load const to register
  129. OP_LOAD_POOL, // literal pool load, imm is address
  130. OP_MOVA, // MOVA instruction
  131. OP_SLEEP, // SLEEP instruction
  132. OP_RTE, // RTE instruction
  133. OP_TRAPA, // TRAPA instruction
  134. OP_LDC, // LDC instruction
  135. OP_UNDEFINED,
  136. };
  137. // XXX consider trap insns: OP_TRAPA, OP_UNDEFINED?
  138. #define OP_ISBRANCH(op) ((BITRANGE(OP_BRANCH, OP_BRANCH_RF)| BITMASK1(OP_RTE)) \
  139. & BITMASK1(op))
  140. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  141. & BITMASK1(op))
  142. #define OP_ISBRACND(op) (BITMASK3(OP_BRANCH_CT, OP_BRANCH_CF, OP_BRANCH_N) \
  143. & BITMASK1(op))
  144. #define OP_ISBRAIMM(op) (BITMASK3(OP_BRANCH, OP_BRANCH_CT, OP_BRANCH_CF) \
  145. & BITMASK1(op))
  146. #define OP_ISBRAIND(op) (BITMASK3(OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  147. & BITMASK1(op))
  148. #ifdef DRC_SH2
  149. #if (DRC_DEBUG & 4)
  150. static u8 *tcache_dsm_ptrs[3];
  151. static char sh2dasm_buff[64];
  152. #define do_host_disasm(tcid) \
  153. emith_flush(); \
  154. host_dasm(tcache_dsm_ptrs[tcid], emith_insn_ptr() - tcache_dsm_ptrs[tcid]); \
  155. tcache_dsm_ptrs[tcid] = emith_insn_ptr()
  156. #else
  157. #define do_host_disasm(x)
  158. #endif
  159. #define SH2_DUMP(sh2, reason) { \
  160. char ms = (sh2)->is_slave ? 's' : 'm'; \
  161. printf("%csh2 %s %08x\n", ms, reason, (sh2)->pc); \
  162. printf("%csh2 r0-7 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  163. (sh2)->r[0], (sh2)->r[1], (sh2)->r[2], (sh2)->r[3], \
  164. (sh2)->r[4], (sh2)->r[5], (sh2)->r[6], (sh2)->r[7]); \
  165. printf("%csh2 r8-15 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  166. (sh2)->r[8], (sh2)->r[9], (sh2)->r[10], (sh2)->r[11], \
  167. (sh2)->r[12], (sh2)->r[13], (sh2)->r[14], (sh2)->r[15]); \
  168. printf("%csh2 pc-ml %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  169. (sh2)->pc, (sh2)->ppc, (sh2)->pr, (sh2)->sr&0x3ff, \
  170. (sh2)->gbr, (sh2)->vbr, (sh2)->mach, (sh2)->macl); \
  171. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  172. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  173. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  174. (sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  175. }
  176. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  177. #if (DRC_DEBUG & (256|512|1024))
  178. static SH2 csh2[2][8];
  179. #endif
  180. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  181. {
  182. if (block != NULL) {
  183. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  184. sh2->pc, block, (signed int)sr >> 12);
  185. #if defined PDB
  186. pdb_step(sh2, sh2->pc);
  187. #elif (DRC_DEBUG & 256)
  188. {
  189. static FILE *trace[2];
  190. int idx = sh2->is_slave;
  191. if (!trace[0]) {
  192. trace[0] = fopen("pico.trace0", "wb");
  193. trace[1] = fopen("pico.trace1", "wb");
  194. }
  195. if (csh2[idx][0].pc != sh2->pc) {
  196. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  197. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  198. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  199. csh2[idx][0].is_slave = idx;
  200. }
  201. }
  202. #elif (DRC_DEBUG & 512)
  203. {
  204. static FILE *trace[2];
  205. static SH2 fsh2;
  206. int idx = sh2->is_slave;
  207. if (!trace[0]) {
  208. trace[0] = fopen("pico.trace0", "rb");
  209. trace[1] = fopen("pico.trace1", "rb");
  210. }
  211. if (csh2[idx][0].pc != sh2->pc) {
  212. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  213. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  214. printf("trace eof at %08lx\n",ftell(trace[idx]));
  215. exit(1);
  216. }
  217. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  218. fsh2.is_slave = idx;
  219. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  220. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  221. {
  222. printf("difference at %08lx!\n",ftell(trace[idx]));
  223. SH2_DUMP(&fsh2, "file");
  224. SH2_DUMP(sh2, "current");
  225. SH2_DUMP(&csh2[idx][0], "previous");
  226. exit(1);
  227. }
  228. csh2[idx][0] = fsh2;
  229. }
  230. }
  231. #elif (DRC_DEBUG & 1024)
  232. {
  233. int x = sh2->is_slave, i;
  234. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  235. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  236. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  237. csh2[x][0].is_slave = x;
  238. }
  239. #endif
  240. }
  241. return block;
  242. }
  243. #endif
  244. // } debug
  245. #define TCACHE_BUFFERS 3
  246. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  247. // BIOS shares tcache with data array because it's only used for init
  248. // and can be discarded early
  249. // XXX: need to tune sizes
  250. static const int tcache_sizes[TCACHE_BUFFERS] = {
  251. DRC_TCACHE_SIZE * 14 / 16, // ROM (rarely used), DRAM
  252. DRC_TCACHE_SIZE / 16, // BIOS, data array in master sh2
  253. DRC_TCACHE_SIZE / 16, // ... slave
  254. };
  255. static u8 *tcache_bases[TCACHE_BUFFERS];
  256. static u8 *tcache_ptrs[TCACHE_BUFFERS];
  257. static u8 *tcache_limit[TCACHE_BUFFERS];
  258. // ptr for code emiters
  259. static u8 *tcache_ptr;
  260. #define MAX_BLOCK_ENTRIES (BLOCK_INSN_LIMIT / 6)
  261. struct block_link {
  262. u32 target_pc;
  263. void *jump; // insn address
  264. struct block_link *next; // either in block_entry->links or unresolved
  265. struct block_link *o_next; // ...in block_entry->o_links
  266. struct block_link *prev;
  267. struct block_link *o_prev;
  268. struct block_entry *target;// target block this is linked in (be->links)
  269. int tcache_id;
  270. };
  271. struct block_entry {
  272. u32 pc;
  273. u8 *tcache_ptr; // translated block for above PC
  274. struct block_entry *next; // chain in hash_table with same pc hash
  275. struct block_entry *prev;
  276. struct block_link *links; // incoming links to this entry
  277. struct block_link *o_links;// outgoing links from this entry
  278. #if (DRC_DEBUG & 2)
  279. struct block_desc *block;
  280. #endif
  281. #if (DRC_DEBUG & 32)
  282. int entry_count;
  283. #endif
  284. };
  285. struct block_desc {
  286. u32 addr; // block start SH2 PC address
  287. u32 addr_lit; // block start SH2 literal pool addr
  288. int size; // ..of recompiled insns
  289. int size_lit; // ..of (insns+)literal pool
  290. u8 *tcache_ptr; // start address of block in cache
  291. u16 crc; // crc of insns and literals
  292. u16 active; // actively used or deactivated?
  293. struct block_list *list;
  294. #if (DRC_DEBUG & 2)
  295. int refcount;
  296. #endif
  297. int entry_count;
  298. struct block_entry entryp[MAX_BLOCK_ENTRIES];
  299. };
  300. #define BLOCK_MAX_COUNT(tcid) ((tcid) ? 256 : 16*256)
  301. static struct block_desc *block_tables[TCACHE_BUFFERS];
  302. static int block_counts[TCACHE_BUFFERS];
  303. static int block_limit[TCACHE_BUFFERS];
  304. // we have block_link_pool to avoid using mallocs
  305. #define BLOCK_LINK_MAX_COUNT(tcid) ((tcid) ? 1024 : 16*1024)
  306. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  307. static int block_link_pool_counts[TCACHE_BUFFERS];
  308. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  309. static struct block_link *blink_free[TCACHE_BUFFERS];
  310. // used for invalidation
  311. #define RAM_SIZE(tcid) ((tcid) ? 0x1000 : 0x40000)
  312. #define INVAL_PAGE_SIZE 0x100
  313. struct block_list {
  314. struct block_desc *block;
  315. struct block_list *next;
  316. struct block_list *prev;
  317. struct block_list **head;
  318. struct block_list *l_next;
  319. };
  320. struct block_list *blist_free;
  321. static struct block_list *inactive_blocks[TCACHE_BUFFERS];
  322. // array of pointers to block_lists for RAM and 2 data arrays
  323. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  324. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  325. #define HASH_TABLE_SIZE(tcid) ((tcid) ? 256 : 64*256)
  326. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  327. #define HASH_FUNC(hash_tab, addr, mask) \
  328. (hash_tab)[((addr) >> 1) & (mask)]
  329. #if (DRC_DEBUG & 128)
  330. #if BRANCH_CACHE
  331. int bchit, bcmiss;
  332. #endif
  333. #if CALL_STACK
  334. int rchit, rcmiss;
  335. #endif
  336. #endif
  337. // host register tracking
  338. enum {
  339. HR_FREE,
  340. HR_STATIC, // vreg has a static mapping
  341. HR_CACHED, // vreg has sh2_reg_e
  342. HR_TEMP, // reg used for temp storage
  343. } cache_reg_type;
  344. enum {
  345. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  346. HRF_LOCKED = 1 << 1, // can't be evicted
  347. HRF_TEMP = 1 << 2, // is for temps and args
  348. HRF_REG = 1 << 3, // is for sh2 regs
  349. } cache_reg_flags;
  350. typedef struct {
  351. u8 hreg; // "host" reg
  352. u8 flags:4; // TEMP or REG?
  353. u8 type:2; // CACHED or TEMP?
  354. u8 ref:2; // ref counter
  355. u16 stamp; // kind of a timestamp
  356. u32 gregs; // "guest" reg mask
  357. } cache_reg_t;
  358. // guest register tracking
  359. enum {
  360. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  361. GRF_CONST = 1 << 1, // reg has a constant
  362. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  363. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  364. } guest_reg_flags;
  365. typedef struct {
  366. u8 flags; // guest flags: is constant, is dirty?
  367. s8 sreg; // cache reg for static mapping
  368. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  369. s8 cnst; // const index if this is constant
  370. } guest_reg_t;
  371. // possibly needed in code emitter
  372. static int rcache_get_tmp(void);
  373. static void rcache_free_tmp(int hr);
  374. // Note: cache_regs[] must have at least the amount of HRF_REG registers used
  375. // by handlers in worst case (currently 4).
  376. // Register assignment goes by ABI convention. Caller save registers are TEMP,
  377. // the others are either static or REG. SR must be static, R0 very recommended.
  378. // VBR, PC, PR must not be static (read from context in utils).
  379. // TEMP registers first, REG last. alloc/evict algorithm depends on this.
  380. // The 1st TEMP must not be RET_REG on platforms using temps in insns (eg. x86).
  381. // XXX shouldn't this be somehow defined in the code emitters?
  382. #ifdef __arm__
  383. #include "../drc/emit_arm.c"
  384. static guest_reg_t guest_regs[] = {
  385. // SHR_R0 .. SHR_SP
  386. #ifndef __MACH__ // no r9..
  387. { GRF_STATIC, 8 }, { GRF_STATIC, 9 }, { 0 } , { 0 } ,
  388. #else
  389. { GRF_STATIC, 8 }, { 0 } , { 0 } , { 0 } ,
  390. #endif
  391. { 0 } , { 0 } , { 0 } , { 0 } ,
  392. { 0 } , { 0 } , { 0 } , { 0 } ,
  393. { 0 } , { 0 } , { 0 } , { 0 } ,
  394. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  395. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  396. { 0 } , { 0 } , { 0 } , { GRF_STATIC, 10 },
  397. { 0 } , { 0 } , { 0 } , { 0 } ,
  398. };
  399. // OABI/EABI: params: r0-r3, return: r0-r1, temp: r12,r14, saved: r4-r8,r10,r11
  400. // SP,PC: r13,r15 must not be used. saved: r9 (for platform use, e.g. on ios)
  401. static cache_reg_t cache_regs[] = {
  402. { 12, HRF_TEMP }, // temps
  403. { 14, HRF_TEMP },
  404. { 3, HRF_TEMP }, // params
  405. { 2, HRF_TEMP },
  406. { 1, HRF_TEMP },
  407. { 0, HRF_TEMP }, // RET_REG
  408. { 8, HRF_LOCKED }, // statics
  409. #ifndef __MACH__ // no r9..
  410. { 9, HRF_LOCKED },
  411. #endif
  412. { 10, HRF_LOCKED },
  413. { 4, HRF_REG }, // other regs
  414. { 5, HRF_REG },
  415. { 6, HRF_REG },
  416. { 7, HRF_REG },
  417. };
  418. #elif defined(__aarch64__)
  419. #include "../drc/emit_arm64.c"
  420. static guest_reg_t guest_regs[] = {
  421. // SHR_R0 .. SHR_SP
  422. { GRF_STATIC,20 }, { GRF_STATIC,21 }, { 0 } , { 0 } ,
  423. { 0 } , { 0 } , { 0 } , { 0 } ,
  424. { 0 } , { 0 } , { 0 } , { 0 } ,
  425. { 0 } , { 0 } , { 0 } , { 0 } ,
  426. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  427. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  428. { 0 } , { 0 } , { 0 } , { GRF_STATIC, 22 },
  429. { 0 } , { 0 } , { 0 } , { 0 } ,
  430. };
  431. // AAPCS64: params: r0-r7, return: r0-r1, temp: r8-r17, saved: r19-r29
  432. // saved: r18 (for platform use)
  433. // since drc never needs more than 4 parameters, r4-r7 are treated as temp.
  434. static cache_reg_t cache_regs[] = {
  435. { 17, HRF_TEMP }, // temps
  436. { 16, HRF_TEMP },
  437. { 15, HRF_TEMP },
  438. { 14, HRF_TEMP },
  439. { 13, HRF_TEMP },
  440. { 12, HRF_TEMP },
  441. { 11, HRF_TEMP },
  442. { 10, HRF_TEMP },
  443. { 9, HRF_TEMP },
  444. { 8, HRF_TEMP },
  445. { 7, HRF_TEMP },
  446. { 6, HRF_TEMP },
  447. { 5, HRF_TEMP },
  448. { 4, HRF_TEMP },
  449. { 3, HRF_TEMP }, // params
  450. { 2, HRF_TEMP },
  451. { 1, HRF_TEMP },
  452. { 0, HRF_TEMP }, // RET_REG
  453. { 22, HRF_LOCKED }, // statics
  454. { 21, HRF_LOCKED },
  455. { 20, HRF_LOCKED },
  456. { 29, HRF_REG }, // other regs
  457. { 28, HRF_REG },
  458. { 27, HRF_REG },
  459. { 26, HRF_REG },
  460. { 25, HRF_REG },
  461. { 24, HRF_REG },
  462. { 23, HRF_REG },
  463. { 22, HRF_REG },
  464. };
  465. #elif defined(__mips__)
  466. #include "../drc/emit_mips.c"
  467. static guest_reg_t guest_regs[] = {
  468. // SHR_R0 .. SHR_SP
  469. {GRF_STATIC, 20} , {GRF_STATIC, 21} , { 0 } , { 0 } ,
  470. { 0 } , { 0 } , { 0 } , { 0 } ,
  471. { 0 } , { 0 } , { 0 } , { 0 } ,
  472. { 0 } , { 0 } , { 0 } , { 0 } ,
  473. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  474. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  475. { 0 } , { 0 } , { 0 } , {GRF_STATIC, 22} ,
  476. { 0 } , { 0 } , { 0 } , { 0 } ,
  477. };
  478. // MIPS ABI: params: r4-r7, return: r2-r3, temp: r1(at),r8-r15,r24-r25,r31(ra),
  479. // saved: r16-r23,r30, reserved: r0(zero), r26-r27(irq), r28(gp), r29(sp)
  480. // r1,r15,r24,r25 are used internally by the code emitter
  481. static cache_reg_t cache_regs[] = {
  482. { 14, HRF_TEMP }, // temps
  483. { 13, HRF_TEMP },
  484. { 12, HRF_TEMP },
  485. { 11, HRF_TEMP },
  486. { 10, HRF_TEMP },
  487. { 9, HRF_TEMP },
  488. { 8, HRF_TEMP },
  489. { 7, HRF_TEMP }, // params
  490. { 6, HRF_TEMP },
  491. { 5, HRF_TEMP },
  492. { 4, HRF_TEMP },
  493. { 3, HRF_TEMP }, // RET_REG
  494. { 2, HRF_TEMP },
  495. { 22, HRF_LOCKED }, // statics
  496. { 21, HRF_LOCKED },
  497. { 20, HRF_LOCKED },
  498. { 19, HRF_REG }, // other regs
  499. { 18, HRF_REG },
  500. { 17, HRF_REG },
  501. { 16, HRF_REG },
  502. };
  503. #elif defined(__i386__)
  504. #include "../drc/emit_x86.c"
  505. static guest_reg_t guest_regs[] = {
  506. // SHR_R0 .. SHR_SP
  507. {GRF_STATIC, xSI}, { 0 } , { 0 } , { 0 } ,
  508. { 0 } , { 0 } , { 0 } , { 0 } ,
  509. { 0 } , { 0 } , { 0 } , { 0 } ,
  510. { 0 } , { 0 } , { 0 } , { 0 } ,
  511. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  512. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  513. { 0 } , { 0 } , { 0 } , {GRF_STATIC, xDI},
  514. { 0 } , { 0 } , { 0 } , { 0 } ,
  515. };
  516. // ax, cx, dx are usually temporaries by convention
  517. static cache_reg_t cache_regs[] = {
  518. { xBX, HRF_REG|HRF_TEMP }, // params
  519. { xCX, HRF_REG|HRF_TEMP },
  520. { xDX, HRF_REG|HRF_TEMP },
  521. { xAX, HRF_REG|HRF_TEMP }, // return value
  522. { xSI, HRF_LOCKED }, // statics
  523. { xDI, HRF_LOCKED },
  524. };
  525. #elif defined(__x86_64__)
  526. #include "../drc/emit_x86.c"
  527. static guest_reg_t guest_regs[] = {
  528. // SHR_R0 .. SHR_SP
  529. {GRF_STATIC,xR12}, { 0 } , { 0 } , { 0 } ,
  530. { 0 } , { 0 } , { 0 } , { 0 } ,
  531. { 0 } , { 0 } , { 0 } , { 0 } ,
  532. { 0 } , { 0 } , { 0 } , { 0 } ,
  533. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  534. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  535. { 0 } , { 0 } , { 0 } , {GRF_STATIC, xBX},
  536. { 0 } , { 0 } , { 0 } , { 0 } ,
  537. };
  538. // M$/SystemV ABI conventions:
  539. // rbx,rbp,r12-r15 are preserved, rcx,rdx,rax,r8,r9,r10,r11 are temporaries
  540. // rsi,rdi are preserved in M$ ABI, temporary in SystemV ABI
  541. // parameters in rcx,rdx,r8,r9, SystemV ABI additionally uses rsi,rdi
  542. static cache_reg_t cache_regs[] = {
  543. { xR10,HRF_TEMP }, // temps
  544. { xR11,HRF_TEMP },
  545. { xAX, HRF_TEMP }, // RET_REG
  546. { xR8, HRF_TEMP }, // params
  547. { xR9, HRF_TEMP },
  548. { xCX, HRF_TEMP },
  549. { xDX, HRF_TEMP },
  550. { xSI, HRF_REG|HRF_TEMP },
  551. { xDI, HRF_REG|HRF_TEMP },
  552. { xBX, HRF_LOCKED }, // statics
  553. { xR12,HRF_LOCKED },
  554. { xR13,HRF_REG }, // other regs
  555. { xR14,HRF_REG },
  556. { xR15,HRF_REG },
  557. };
  558. #else
  559. #error unsupported arch
  560. #endif
  561. static signed char reg_map_host[HOST_REGS];
  562. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  563. static void REGPARM(1) (*sh2_drc_dispatcher)(u32 pc);
  564. #if CALL_STACK
  565. static void REGPARM(2) (*sh2_drc_dispatcher_call)(u32 pc, uptr host_pr);
  566. static void REGPARM(1) (*sh2_drc_dispatcher_return)(u32 pc);
  567. #endif
  568. static void REGPARM(1) (*sh2_drc_exit)(u32 pc);
  569. static void (*sh2_drc_test_irq)(void);
  570. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  571. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  572. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  573. static u32 REGPARM(1) (*sh2_drc_read8_poll)(u32 a);
  574. static u32 REGPARM(1) (*sh2_drc_read16_poll)(u32 a);
  575. static u32 REGPARM(1) (*sh2_drc_read32_poll)(u32 a);
  576. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  577. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  578. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  579. // flags for memory access
  580. #define MF_SIZEMASK 0x03 // size of access
  581. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  582. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  583. #define MF_POLLING 0x20 // include polling check in read
  584. // address space stuff
  585. static int dr_is_rom(u32 a)
  586. {
  587. // tweak for WWF Raw which writes data to some high ROM addresses
  588. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  589. }
  590. static int dr_ctx_get_mem_ptr(SH2 *sh2, u32 a, u32 *mask)
  591. {
  592. void *memptr;
  593. int poffs = -1;
  594. // check if region is mapped memory
  595. memptr = p32x_sh2_get_mem_ptr(a, mask, sh2);
  596. if (memptr == NULL)
  597. return poffs;
  598. if (memptr == sh2->p_bios) // BIOS
  599. poffs = offsetof(SH2, p_bios);
  600. else if (memptr == sh2->p_da) // data array
  601. poffs = offsetof(SH2, p_da);
  602. else if (memptr == sh2->p_sdram) // SDRAM
  603. poffs = offsetof(SH2, p_sdram);
  604. else if (memptr == sh2->p_rom) // ROM
  605. poffs = offsetof(SH2, p_rom);
  606. return poffs;
  607. }
  608. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  609. {
  610. struct block_entry *be;
  611. u32 tcid = 0;
  612. if ((pc & 0xe0000000) == 0xc0000000)
  613. tcid = 1 + is_slave; // data array
  614. if ((pc & ~0xfff) == 0)
  615. tcid = 1 + is_slave; // BIOS
  616. *tcache_id = tcid;
  617. be = HASH_FUNC(hash_tables[tcid], pc, HASH_TABLE_SIZE(tcid) - 1);
  618. if (be != NULL) // don't ask... gcc code generation hint
  619. for (; be != NULL; be = be->next)
  620. if (be->pc == pc)
  621. return be;
  622. return NULL;
  623. }
  624. // ---------------------------------------------------------------
  625. // block management
  626. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  627. {
  628. struct block_list *added;
  629. if (blist_free) {
  630. added = blist_free;
  631. blist_free = added->next;
  632. } else {
  633. added = malloc(sizeof(*added));
  634. }
  635. if (!added) {
  636. elprintf(EL_ANOMALY, "drc OOM (1)");
  637. return;
  638. }
  639. added->block = block;
  640. added->l_next = block->list;
  641. block->list = added;
  642. added->head = blist;
  643. added->prev = NULL;
  644. if (*blist)
  645. (*blist)->prev = added;
  646. added->next = *blist;
  647. *blist = added;
  648. }
  649. static void rm_from_block_lists(struct block_desc *block)
  650. {
  651. struct block_list *entry;
  652. entry = block->list;
  653. while (entry != NULL) {
  654. if (entry->prev != NULL)
  655. entry->prev->next = entry->next;
  656. else
  657. *(entry->head) = entry->next;
  658. if (entry->next != NULL)
  659. entry->next->prev = entry->prev;
  660. entry->next = blist_free;
  661. blist_free = entry;
  662. entry = entry->l_next;
  663. }
  664. block->list = NULL;
  665. }
  666. static void rm_block_list(struct block_list **blist)
  667. {
  668. struct block_list *next, *current = *blist;
  669. while (current != NULL) {
  670. next = current->next;
  671. current->next = blist_free;
  672. blist_free = current;
  673. current = next;
  674. }
  675. *blist = NULL;
  676. }
  677. static void REGPARM(1) flush_tcache(int tcid)
  678. {
  679. int i;
  680. #if (DRC_DEBUG & 1)
  681. int tc_used, bl_used;
  682. tc_used = tcache_sizes[tcid] - (tcache_limit[tcid] - tcache_ptrs[tcid]);
  683. bl_used = BLOCK_MAX_COUNT(tcid) - (block_limit[tcid] - block_counts[tcid]);
  684. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d)", tcid, tc_used,
  685. tcache_sizes[tcid], bl_used, BLOCK_MAX_COUNT(tcid));
  686. #endif
  687. block_counts[tcid] = 0;
  688. block_limit[tcid] = BLOCK_MAX_COUNT(tcid) - 1;
  689. block_link_pool_counts[tcid] = 0;
  690. blink_free[tcid] = NULL;
  691. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * HASH_TABLE_SIZE(tcid));
  692. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * HASH_TABLE_SIZE(tcid));
  693. tcache_ptrs[tcid] = tcache_bases[tcid];
  694. tcache_limit[tcid] = tcache_bases[tcid] + tcache_sizes[tcid];
  695. if (Pico32xMem->sdram != NULL) {
  696. if (tcid == 0) { // ROM, RAM
  697. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  698. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  699. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  700. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  701. memset(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  702. memset(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache));
  703. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  704. } else {
  705. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  706. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  707. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  708. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  709. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  710. memset(sh2s[tcid - 1].rts_cache, -1, sizeof(sh2s[0].rts_cache));
  711. sh2s[tcid - 1].rts_cache_idx = 0;
  712. }
  713. }
  714. #if (DRC_DEBUG & 4)
  715. tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
  716. #endif
  717. for (i = 0; i < RAM_SIZE(tcid) / INVAL_PAGE_SIZE; i++)
  718. rm_block_list(&inval_lookup[tcid][i]);
  719. rm_block_list(&inactive_blocks[tcid]);
  720. }
  721. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  722. {
  723. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  724. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  725. be->prev = NULL;
  726. if (*head)
  727. (*head)->prev = be;
  728. be->next = *head;
  729. *head = be;
  730. #if (DRC_DEBUG & 2)
  731. if (be->next != NULL) {
  732. printf(" %08x: entry hash collision with %08x\n",
  733. be->pc, be->next->pc);
  734. hash_collisions++;
  735. }
  736. #endif
  737. }
  738. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  739. {
  740. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  741. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  742. #if DRC_DEBUG & 1
  743. struct block_entry *current = be;
  744. while (current->prev != NULL)
  745. current = current->prev;
  746. if (current != *head)
  747. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  748. #endif
  749. if (be->prev != NULL)
  750. be->prev->next = be->next;
  751. else
  752. *head = be->next;
  753. if (be->next != NULL)
  754. be->next->prev = be->prev;
  755. }
  756. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  757. {
  758. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  759. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  760. #if DRC_DEBUG & 1
  761. struct block_link *current = *head;
  762. while (current != NULL && current != bl)
  763. current = current->next;
  764. if (current == bl)
  765. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  766. #endif
  767. bl->target = NULL; // marker for not resolved
  768. bl->prev = NULL;
  769. if (*head)
  770. (*head)->prev = bl;
  771. bl->next = *head;
  772. *head = bl;
  773. }
  774. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  775. {
  776. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  777. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  778. #if DRC_DEBUG & 1
  779. struct block_link *current = bl;
  780. while (current->prev != NULL)
  781. current = current->prev;
  782. if (current != *head)
  783. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  784. #endif
  785. if (bl->prev != NULL)
  786. bl->prev->next = bl->next;
  787. else
  788. *head = bl->next;
  789. if (bl->next != NULL)
  790. bl->next->prev = bl->prev;
  791. }
  792. static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free);
  793. static void dr_free_oldest_block(int tcache_id)
  794. {
  795. struct block_desc *bd;
  796. if (block_limit[tcache_id] >= BLOCK_MAX_COUNT(tcache_id)) {
  797. // block desc wrap around
  798. block_limit[tcache_id] = 0;
  799. }
  800. bd = &block_tables[tcache_id][block_limit[tcache_id]];
  801. if (bd->tcache_ptr && bd->tcache_ptr < tcache_ptrs[tcache_id]) {
  802. // cache wrap around
  803. tcache_ptrs[tcache_id] = bd->tcache_ptr;
  804. }
  805. if (bd->addr && bd->entry_count)
  806. sh2_smc_rm_block_entry(bd, tcache_id, 0, 1);
  807. block_limit[tcache_id]++;
  808. if (block_limit[tcache_id] >= BLOCK_MAX_COUNT(tcache_id))
  809. block_limit[tcache_id] = 0;
  810. bd = &block_tables[tcache_id][block_limit[tcache_id]];
  811. if (bd->tcache_ptr >= tcache_ptrs[tcache_id])
  812. tcache_limit[tcache_id] = bd->tcache_ptr;
  813. else
  814. tcache_limit[tcache_id] = tcache_bases[tcache_id] + tcache_sizes[tcache_id];
  815. }
  816. static u8 *dr_prepare_cache(int tcache_id, int insn_count)
  817. {
  818. u8 *limit = tcache_limit[tcache_id];
  819. // if no block desc available
  820. if (block_counts[tcache_id] == block_limit[tcache_id])
  821. dr_free_oldest_block(tcache_id);
  822. // while not enough cache space left (limit - tcache_ptr < max space needed)
  823. while (tcache_limit[tcache_id] - tcache_ptrs[tcache_id] < insn_count * 128)
  824. dr_free_oldest_block(tcache_id);
  825. if (limit != tcache_limit[tcache_id]) {
  826. #if BRANCH_CACHE
  827. if (tcache_id)
  828. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  829. else {
  830. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  831. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  832. }
  833. #endif
  834. #if CALL_STACK
  835. if (tcache_id) {
  836. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  837. sh2s[tcache_id-1].rts_cache_idx = 0;
  838. } else {
  839. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  840. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  841. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  842. }
  843. #endif
  844. }
  845. return (u8 *)tcache_ptrs[tcache_id];
  846. }
  847. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  848. {
  849. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  850. u32 addr, end, mask = 0, shift = 0, idx;
  851. // mark memory blocks as containing compiled code
  852. if ((block->addr & 0xc7fc0000) == 0x06000000
  853. || (block->addr & 0xfffff000) == 0xc0000000)
  854. {
  855. if (tcache_id != 0) {
  856. // data array
  857. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  858. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  859. shift = SH2_DRCBLK_DA_SHIFT;
  860. }
  861. else {
  862. // SDRAM
  863. drc_ram_blk = Pico32xMem->drcblk_ram;
  864. lit_ram_blk = Pico32xMem->drclit_ram;
  865. shift = SH2_DRCBLK_RAM_SHIFT;
  866. }
  867. mask = RAM_SIZE(tcache_id) - 1;
  868. // mark recompiled insns
  869. addr = block->addr & ~((1 << shift) - 1);
  870. end = block->addr + block->size;
  871. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  872. drc_ram_blk[idx++] += mark;
  873. // mark literal pool
  874. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  875. addr = block->addr_lit & ~((1 << shift) - 1);
  876. end = block->addr_lit + block->size_lit;
  877. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  878. drc_ram_blk[idx++] += mark;
  879. // mark for literals disabled
  880. if (nolit) {
  881. addr = nolit & ~((1 << shift) - 1);
  882. end = block->addr_lit + block->size_lit;
  883. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  884. lit_ram_blk[idx++] = 1;
  885. }
  886. if (mark < 0)
  887. rm_from_block_lists(block);
  888. else {
  889. // add to invalidation lookup lists
  890. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  891. end = block->addr + block->size;
  892. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  893. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  894. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  895. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  896. end = block->addr_lit + block->size_lit;
  897. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  898. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  899. }
  900. }
  901. }
  902. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  903. {
  904. u8 *lit_ram_blk = NULL;
  905. u32 mask = 0, shift = 0, addr, idx;
  906. if ((start & 0xc7fc0000) == 0x06000000
  907. || (start & 0xfffff000) == 0xc0000000)
  908. {
  909. if (tcache_id != 0) {
  910. // data array
  911. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  912. shift = SH2_DRCBLK_DA_SHIFT;
  913. }
  914. else {
  915. // SDRAM
  916. lit_ram_blk = Pico32xMem->drclit_ram;
  917. shift = SH2_DRCBLK_RAM_SHIFT;
  918. }
  919. mask = RAM_SIZE(tcache_id) - 1;
  920. addr = start & ~((1 << shift) - 1);
  921. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  922. if (lit_ram_blk[idx++])
  923. break;
  924. return (addr < start ? start : addr > end ? end : addr);
  925. }
  926. return end;
  927. }
  928. static struct block_desc *dr_find_inactive_block(int tcache_id, u16 crc,
  929. u32 addr, int size, u32 addr_lit, int size_lit)
  930. {
  931. struct block_list **head = &inactive_blocks[tcache_id];
  932. struct block_list *current;
  933. for (current = *head; current != NULL; current = current->next) {
  934. struct block_desc *block = current->block;
  935. if (block->crc == crc && block->addr == addr && block->size == size &&
  936. block->addr_lit == addr_lit && block->size_lit == size_lit)
  937. {
  938. rm_from_block_lists(block);
  939. return block;
  940. }
  941. }
  942. return NULL;
  943. }
  944. static struct block_desc *dr_add_block(u32 addr, int size,
  945. u32 addr_lit, int size_lit, u16 crc, int is_slave, int *blk_id)
  946. {
  947. struct block_entry *be;
  948. struct block_desc *bd;
  949. int tcache_id;
  950. int *bcount;
  951. // do a lookup to get tcache_id and override check
  952. be = dr_get_entry(addr, is_slave, &tcache_id);
  953. if (be != NULL)
  954. dbg(1, "block override for %08x", addr);
  955. bcount = &block_counts[tcache_id];
  956. if (*bcount == block_limit[tcache_id]) {
  957. dbg(1, "bd overflow for tcache %d", tcache_id);
  958. return NULL;
  959. }
  960. bd = &block_tables[tcache_id][*bcount];
  961. bd->addr = addr;
  962. bd->size = size;
  963. bd->addr_lit = addr_lit;
  964. bd->size_lit = size_lit;
  965. bd->tcache_ptr = tcache_ptr;
  966. bd->crc = crc;
  967. bd->active = 1;
  968. bd->entry_count = 1;
  969. bd->entryp[0].pc = addr;
  970. bd->entryp[0].tcache_ptr = tcache_ptr;
  971. bd->entryp[0].links = bd->entryp[0].o_links = NULL;
  972. #if (DRC_DEBUG & 2)
  973. bd->entryp[0].block = bd;
  974. bd->refcount = 0;
  975. #endif
  976. add_to_hashlist(&bd->entryp[0], tcache_id);
  977. *blk_id = *bcount;
  978. (*bcount)++;
  979. if (*bcount >= BLOCK_MAX_COUNT(tcache_id))
  980. *bcount = 0;
  981. return bd;
  982. }
  983. static void REGPARM(3) *dr_lookup_block(u32 pc, SH2 *sh2, int *tcache_id)
  984. {
  985. struct block_entry *be = NULL;
  986. void *block = NULL;
  987. be = dr_get_entry(pc, sh2->is_slave, tcache_id);
  988. if (be != NULL)
  989. block = be->tcache_ptr;
  990. #if (DRC_DEBUG & 2)
  991. if (be != NULL)
  992. be->block->refcount++;
  993. #endif
  994. return block;
  995. }
  996. static void *dr_failure(void)
  997. {
  998. lprintf("recompilation failed\n");
  999. exit(1);
  1000. }
  1001. #if LINK_BRANCHES
  1002. static void dr_block_link(struct block_entry *be, struct block_link *bl, int emit_jump)
  1003. {
  1004. dbg(2, "- %slink from %p to pc %08x entry %p", emit_jump ? "":"early ",
  1005. bl->jump, bl->target_pc, be->tcache_ptr);
  1006. if (emit_jump) {
  1007. u8 *jump = emith_jump_patch(bl->jump, be->tcache_ptr);
  1008. // only needs sync if patch is possibly crossing cacheline (assume 16 byte)
  1009. if ((uintptr_t)jump >>4 != ((uintptr_t)jump+emith_jump_patch_size()-1) >>4)
  1010. host_instructions_updated(jump, jump+emith_jump_patch_size());
  1011. }
  1012. // move bl to block_entry
  1013. bl->target = be;
  1014. bl->prev = NULL;
  1015. if (be->links)
  1016. be->links->prev = bl;
  1017. bl->next = be->links;
  1018. be->links = bl;
  1019. }
  1020. static void dr_block_unlink(struct block_link *bl, int emit_jump)
  1021. {
  1022. dbg(2,"- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  1023. if (bl->target) {
  1024. if (emit_jump) {
  1025. u8 *jump = emith_jump_patch(bl->jump, sh2_drc_dispatcher);
  1026. // update cpu caches since the previous jump target doesn't exist anymore
  1027. host_instructions_updated(jump, jump+emith_jump_patch_size());
  1028. }
  1029. if (bl->prev)
  1030. bl->prev->next = bl->next;
  1031. else
  1032. bl->target->links = bl->next;
  1033. if (bl->next)
  1034. bl->next->prev = bl->prev;
  1035. bl->target = NULL;
  1036. }
  1037. }
  1038. #endif
  1039. static void *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  1040. {
  1041. #if LINK_BRANCHES
  1042. struct block_link *bl = block_link_pool[tcache_id];
  1043. int cnt = block_link_pool_counts[tcache_id];
  1044. struct block_entry *be = NULL;
  1045. int target_tcache_id;
  1046. // get the target block entry
  1047. be = dr_get_entry(pc, is_slave, &target_tcache_id);
  1048. if (target_tcache_id && target_tcache_id != tcache_id)
  1049. return sh2_drc_dispatcher;
  1050. // get a block link
  1051. if (blink_free[tcache_id] != NULL) {
  1052. bl = blink_free[tcache_id];
  1053. blink_free[tcache_id] = bl->next;
  1054. } else if (cnt >= BLOCK_LINK_MAX_COUNT(tcache_id)) {
  1055. dbg(1, "bl overflow for tcache %d", tcache_id);
  1056. return sh2_drc_dispatcher;
  1057. } else {
  1058. bl += cnt;
  1059. block_link_pool_counts[tcache_id] = cnt+1;
  1060. }
  1061. // prepare link and add to ougoing list of owner
  1062. bl->tcache_id = tcache_id;
  1063. bl->target_pc = pc;
  1064. bl->jump = tcache_ptr;
  1065. bl->o_next = owner->o_links;
  1066. owner->o_links = bl;
  1067. if (be != NULL) {
  1068. dr_block_link(be, bl, 0); // jump not yet emitted by translate()
  1069. return be->tcache_ptr;
  1070. }
  1071. else {
  1072. add_to_hashlist_unresolved(bl, tcache_id);
  1073. return sh2_drc_dispatcher;
  1074. }
  1075. #else
  1076. return sh2_drc_dispatcher;
  1077. #endif
  1078. }
  1079. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  1080. {
  1081. #if LINK_BRANCHES
  1082. u32 tcmask = HASH_TABLE_SIZE(tcache_id) - 1;
  1083. u32 pc = be->pc;
  1084. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  1085. struct block_link *bl = *head, *next;
  1086. while (bl != NULL) {
  1087. next = bl->next;
  1088. if (bl->target_pc == pc && (!bl->tcache_id || bl->tcache_id == tcache_id)) {
  1089. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  1090. dr_block_link(be, bl, 1);
  1091. }
  1092. bl = next;
  1093. }
  1094. #endif
  1095. }
  1096. static void dr_link_outgoing(struct block_entry *be, int tcache_id, int is_slave)
  1097. {
  1098. #if LINK_BRANCHES
  1099. struct block_link *bl;
  1100. int target_tcache_id;
  1101. for (bl = be->o_links; bl; bl = bl->o_next) {
  1102. if (bl->target == NULL) {
  1103. be = dr_get_entry(bl->target_pc, is_slave, &target_tcache_id);
  1104. if (be != NULL && (!target_tcache_id || target_tcache_id == tcache_id)) {
  1105. // remove bl from unresolved_links (must've been since target was NULL)
  1106. rm_from_hashlist_unresolved(bl, bl->tcache_id);
  1107. dr_block_link(be, bl, 1);
  1108. }
  1109. }
  1110. }
  1111. #endif
  1112. }
  1113. #define ADD_TO_ARRAY(array, count, item, failcode) { \
  1114. if (count >= ARRAY_SIZE(array)) { \
  1115. dbg(1, "warning: " #array " overflow"); \
  1116. failcode; \
  1117. } else \
  1118. array[count++] = item; \
  1119. }
  1120. static inline int find_in_array(u32 *array, size_t size, u32 what)
  1121. {
  1122. size_t i;
  1123. for (i = 0; i < size; i++)
  1124. if (what == array[i])
  1125. return i;
  1126. return -1;
  1127. }
  1128. static int find_in_sorted_array(u32 *array, size_t size, u32 what)
  1129. {
  1130. // binary search in sorted array
  1131. int left = 0, right = size-1;
  1132. while (left <= right)
  1133. {
  1134. int middle = (left + right) / 2;
  1135. if (array[middle] == what)
  1136. return middle;
  1137. else if (array[middle] < what)
  1138. left = middle + 1;
  1139. else
  1140. right = middle - 1;
  1141. }
  1142. return -1;
  1143. }
  1144. // ---------------------------------------------------------------
  1145. // NB rcache allocation dependencies:
  1146. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  1147. // - get_reg(..., NULL) before get_reg(..., &hr) if it might get the same reg
  1148. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  1149. // register cache / constant propagation stuff
  1150. typedef enum {
  1151. RC_GR_READ,
  1152. RC_GR_WRITE,
  1153. RC_GR_RMW,
  1154. } rc_gr_mode;
  1155. typedef struct {
  1156. u32 gregs;
  1157. u32 val;
  1158. } gconst_t;
  1159. gconst_t gconsts[ARRAY_SIZE(guest_regs)];
  1160. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  1161. static inline int rcache_is_cached(sh2_reg_e r);
  1162. static void rcache_add_vreg_alias(int x, sh2_reg_e r);
  1163. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  1164. static void rcache_evict_vreg(int x);
  1165. static void rcache_remap_vreg(int x);
  1166. #define RCACHE_DUMP(msg) { \
  1167. cache_reg_t *cp; \
  1168. guest_reg_t *gp; \
  1169. int i; \
  1170. printf("cache dump %s:\n",msg); \
  1171. printf(" cache_regs:\n"); \
  1172. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1173. cp = &cache_regs[i]; \
  1174. if (cp->type != HR_FREE || cp->gregs || (cp->flags & ~(HRF_REG|HRF_TEMP))) \
  1175. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->ref, cp->gregs); \
  1176. } \
  1177. printf(" guest_regs:\n"); \
  1178. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1179. gp = &guest_regs[i]; \
  1180. if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
  1181. printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
  1182. } \
  1183. printf(" gconsts:\n"); \
  1184. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1185. if (gconsts[i].gregs) \
  1186. printf(" %d: m=%x v=%x\n", i, gconsts[i].gregs, gconsts[i].val); \
  1187. } \
  1188. }
  1189. #define RCACHE_CHECK(msg) { \
  1190. cache_reg_t *cp; \
  1191. guest_reg_t *gp; \
  1192. int i, x, d = 0; \
  1193. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1194. cp = &cache_regs[i]; \
  1195. if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
  1196. /* check connectivity greg->vreg */ \
  1197. FOR_ALL_BITS_SET_DO(cp->gregs, x, \
  1198. if (guest_regs[x].vreg != i) \
  1199. { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
  1200. ) \
  1201. } \
  1202. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  1203. gp = &guest_regs[i]; \
  1204. if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
  1205. { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
  1206. if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_STATIC && cache_regs[gp->vreg].type != HR_CACHED) \
  1207. { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
  1208. if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
  1209. { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
  1210. if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)) )\
  1211. { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
  1212. } \
  1213. for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
  1214. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
  1215. if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
  1216. { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
  1217. ) \
  1218. } \
  1219. if (d) RCACHE_DUMP(msg) \
  1220. /* else { \
  1221. printf("locked regs %s:\n",msg); \
  1222. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  1223. cp = &cache_regs[i]; \
  1224. if (cp->flags & HRF_LOCKED) \
  1225. printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->ref, cp->gregs); \
  1226. } \
  1227. } */ \
  1228. }
  1229. #if PROPAGATE_CONSTANTS
  1230. static inline int gconst_alloc(sh2_reg_e r)
  1231. {
  1232. int i, n = -1;
  1233. for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
  1234. gconsts[i].gregs &= ~(1 << r);
  1235. if (gconsts[i].gregs == 0 && n < 0)
  1236. n = i;
  1237. }
  1238. if (n >= 0)
  1239. gconsts[n].gregs = (1 << r);
  1240. else {
  1241. printf("all gconst buffers in use, aborting\n");
  1242. exit(1); // cannot happen - more constants than guest regs?
  1243. }
  1244. return n;
  1245. }
  1246. static void gconst_set(sh2_reg_e r, u32 val)
  1247. {
  1248. int i = gconst_alloc(r);
  1249. guest_regs[r].flags |= GRF_CONST;
  1250. guest_regs[r].cnst = i;
  1251. gconsts[i].val = val;
  1252. }
  1253. static void gconst_new(sh2_reg_e r, u32 val)
  1254. {
  1255. gconst_set(r, val);
  1256. guest_regs[r].flags |= GRF_CDIRTY;
  1257. // throw away old r that we might have cached
  1258. if (guest_regs[r].vreg >= 0)
  1259. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1260. }
  1261. #endif
  1262. static int gconst_get(sh2_reg_e r, u32 *val)
  1263. {
  1264. if (guest_regs[r].flags & GRF_CONST) {
  1265. *val = gconsts[guest_regs[r].cnst].val;
  1266. return 1;
  1267. }
  1268. *val = 0;
  1269. return 0;
  1270. }
  1271. static int gconst_check(sh2_reg_e r)
  1272. {
  1273. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1274. return 1;
  1275. return 0;
  1276. }
  1277. // update hr if dirty, else do nothing
  1278. static int gconst_try_read(int vreg, sh2_reg_e r)
  1279. {
  1280. int i, x;
  1281. if (guest_regs[r].flags & GRF_CDIRTY) {
  1282. x = guest_regs[r].cnst;
  1283. emith_move_r_imm(cache_regs[vreg].hreg, gconsts[x].val);
  1284. FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
  1285. {
  1286. if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
  1287. rcache_remove_vreg_alias(guest_regs[i].vreg, i);
  1288. if (guest_regs[i].vreg < 0)
  1289. rcache_add_vreg_alias(vreg, i);
  1290. guest_regs[i].flags &= ~GRF_CDIRTY;
  1291. guest_regs[i].flags |= GRF_DIRTY;
  1292. });
  1293. if (cache_regs[vreg].type != HR_STATIC)
  1294. cache_regs[vreg].type = HR_CACHED;
  1295. cache_regs[vreg].flags |= HRF_DIRTY;
  1296. return 1;
  1297. }
  1298. return 0;
  1299. }
  1300. static u32 gconst_dirty_mask(void)
  1301. {
  1302. u32 mask = 0;
  1303. int i;
  1304. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1305. if (guest_regs[i].flags & GRF_CDIRTY)
  1306. mask |= (1 << i);
  1307. return mask;
  1308. }
  1309. static void gconst_kill(sh2_reg_e r)
  1310. {
  1311. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1312. gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
  1313. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1314. }
  1315. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1316. {
  1317. gconst_kill(rd);
  1318. if (guest_regs[rs].flags & GRF_CONST) {
  1319. guest_regs[rd].flags |= GRF_CONST;
  1320. if (guest_regs[rd].vreg < 0)
  1321. guest_regs[rd].flags |= GRF_CDIRTY;
  1322. guest_regs[rd].cnst = guest_regs[rs].cnst;
  1323. gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
  1324. }
  1325. }
  1326. static void gconst_clean(void)
  1327. {
  1328. int i;
  1329. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1330. if (guest_regs[i].flags & GRF_CDIRTY) {
  1331. // using RC_GR_READ here: it will call gconst_try_read,
  1332. // cache the reg and mark it dirty.
  1333. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1334. }
  1335. }
  1336. static void gconst_invalidate(void)
  1337. {
  1338. int i;
  1339. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1340. if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
  1341. gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
  1342. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1343. }
  1344. }
  1345. static u16 rcache_counter;
  1346. // SH2 register usage bitmasks
  1347. static u32 rcache_regs_static; // statically allocated regs
  1348. static u32 rcache_regs_now; // regs used in current insn
  1349. static u32 rcache_regs_soon; // regs used in the next few insns
  1350. static u32 rcache_regs_late; // regs used in later insns
  1351. static u32 rcache_regs_discard; // regs overwritten without being used
  1352. static u32 rcache_regs_clean; // regs needing cleaning
  1353. // combination masks XXX this seems obscure
  1354. #define rcache_regs_used (rcache_regs_soon|rcache_regs_late|rcache_regs_clean)
  1355. #define rcache_regs_nowused (rcache_regs_now|rcache_regs_used)
  1356. #define rcache_regs_nowsoon (rcache_regs_now|rcache_regs_soon)
  1357. #define rcache_regs_soonclean (rcache_regs_soon|rcache_regs_clean)
  1358. static void rcache_ref_vreg(int x)
  1359. {
  1360. if (x >= 0) {
  1361. cache_regs[x].ref ++;
  1362. cache_regs[x].flags |= HRF_LOCKED;
  1363. }
  1364. }
  1365. static void rcache_unref_vreg(int x)
  1366. {
  1367. if (x >= 0 && -- cache_regs[x].ref == 0) {
  1368. cache_regs[x].flags &= ~HRF_LOCKED;
  1369. }
  1370. }
  1371. static void rcache_free_vreg(int x)
  1372. {
  1373. if (cache_regs[x].type != HR_STATIC)
  1374. cache_regs[x].type = HR_FREE;
  1375. cache_regs[x].flags &= (HRF_REG|HRF_TEMP);
  1376. cache_regs[x].gregs = 0;
  1377. cache_regs[x].ref = 0;
  1378. }
  1379. static void rcache_unmap_vreg(int x)
  1380. {
  1381. int i;
  1382. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1383. if (guest_regs[i].flags & GRF_DIRTY) {
  1384. // if a dirty reg is unmapped save its value to context
  1385. if (~rcache_regs_discard & (1 << i))
  1386. emith_ctx_write(cache_regs[x].hreg, i * 4);
  1387. guest_regs[i].flags &= ~GRF_DIRTY;
  1388. }
  1389. guest_regs[i].vreg = -1);
  1390. rcache_free_vreg(x);
  1391. }
  1392. static void rcache_move_vreg(int d, int x)
  1393. {
  1394. int i;
  1395. if (cache_regs[d].type != HR_STATIC)
  1396. cache_regs[d].type = HR_CACHED;
  1397. cache_regs[d].gregs = cache_regs[x].gregs;
  1398. cache_regs[d].flags &= (HRF_TEMP|HRF_REG);
  1399. cache_regs[d].flags |= cache_regs[x].flags & ~(HRF_TEMP|HRF_REG);
  1400. cache_regs[d].ref = 0;
  1401. cache_regs[d].stamp = cache_regs[x].stamp;
  1402. emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
  1403. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1404. if (guest_regs[i].vreg == x)
  1405. guest_regs[i].vreg = d;
  1406. rcache_free_vreg(x);
  1407. }
  1408. static void rcache_clean_vreg(int x)
  1409. {
  1410. int r;
  1411. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1412. cache_regs[x].flags &= ~HRF_DIRTY;
  1413. rcache_ref_vreg(x);
  1414. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1415. if (guest_regs[r].flags & GRF_DIRTY) {
  1416. if (guest_regs[r].flags & GRF_STATIC) {
  1417. if (guest_regs[r].vreg != guest_regs[r].sreg) {
  1418. if (!(cache_regs[guest_regs[r].sreg].flags & HRF_LOCKED)) {
  1419. // statically mapped reg not in its sreg. move back to sreg
  1420. rcache_evict_vreg(guest_regs[r].sreg);
  1421. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
  1422. cache_regs[guest_regs[r].vreg].hreg);
  1423. rcache_remove_vreg_alias(x, r);
  1424. rcache_add_vreg_alias(guest_regs[r].sreg, r);
  1425. cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
  1426. } else {
  1427. // must evict since sreg is locked
  1428. if (~rcache_regs_discard & (1 << r))
  1429. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1430. guest_regs[r].flags &= ~GRF_DIRTY;
  1431. rcache_remove_vreg_alias(x, r);
  1432. }
  1433. } else
  1434. cache_regs[x].flags |= HRF_DIRTY;
  1435. } else {
  1436. if (~rcache_regs_discard & (1 << r))
  1437. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1438. guest_regs[r].flags &= ~GRF_DIRTY;
  1439. }
  1440. rcache_regs_clean &= ~(1 << r);
  1441. })
  1442. rcache_unref_vreg(x);
  1443. }
  1444. #if DRC_DEBUG & 64
  1445. RCACHE_CHECK("after clean");
  1446. #endif
  1447. }
  1448. static void rcache_add_vreg_alias(int x, sh2_reg_e r)
  1449. {
  1450. cache_regs[x].gregs |= (1 << r);
  1451. guest_regs[r].vreg = x;
  1452. if (cache_regs[x].type != HR_STATIC)
  1453. cache_regs[x].type = HR_CACHED;
  1454. }
  1455. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1456. {
  1457. cache_regs[x].gregs &= ~(1 << r);
  1458. if (!cache_regs[x].gregs)
  1459. // no reg mapped -> free vreg
  1460. rcache_free_vreg(x);
  1461. guest_regs[r].vreg = -1;
  1462. }
  1463. static void rcache_evict_vreg(int x)
  1464. {
  1465. #if REMAP_REGISTER
  1466. rcache_remap_vreg(x);
  1467. #else
  1468. rcache_clean_vreg(x);
  1469. #endif
  1470. rcache_unmap_vreg(x);
  1471. }
  1472. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1473. {
  1474. rcache_remove_vreg_alias(x, r);
  1475. rcache_evict_vreg(x);
  1476. rcache_add_vreg_alias(x, r);
  1477. }
  1478. static int rcache_allocate(int what, int minprio)
  1479. {
  1480. // evict reg with oldest stamp (only for HRF_REG, no temps)
  1481. int i, i_prio, oldest = -1, prio = 0;
  1482. u16 min_stamp = (u16)-1;
  1483. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  1484. // consider only unlocked REG or non-TEMP
  1485. if (cache_regs[i].flags == 0 || (cache_regs[i].flags & HRF_LOCKED))
  1486. continue;
  1487. if ((what > 0 && !(cache_regs[i].flags & HRF_REG)) ||
  1488. (what == 0 && (cache_regs[i].flags & HRF_TEMP)) ||
  1489. (what < 0 && !(cache_regs[i].flags & HRF_TEMP)))
  1490. continue;
  1491. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
  1492. // REG is free
  1493. prio = 6;
  1494. oldest = i;
  1495. break;
  1496. }
  1497. if (cache_regs[i].type == HR_CACHED) {
  1498. if (rcache_regs_now & cache_regs[i].gregs)
  1499. // REGs needed for the current insn
  1500. i_prio = 1;
  1501. else if (rcache_regs_soon & cache_regs[i].gregs)
  1502. // REGs needed in the next insns
  1503. i_prio = 2;
  1504. else if (rcache_regs_late & cache_regs[i].gregs)
  1505. // REGs needed in some future insn
  1506. i_prio = 3;
  1507. else if (!(~rcache_regs_discard & cache_regs[i].gregs))
  1508. // REGs not needed in the foreseeable future
  1509. i_prio = 4;
  1510. else
  1511. // REGs soon overwritten anyway
  1512. i_prio = 5;
  1513. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1514. min_stamp = cache_regs[i].stamp;
  1515. oldest = i;
  1516. prio = i_prio;
  1517. }
  1518. }
  1519. }
  1520. if (prio < minprio || oldest == -1)
  1521. return -1;
  1522. if (cache_regs[oldest].type == HR_CACHED)
  1523. rcache_evict_vreg(oldest);
  1524. else
  1525. rcache_free_vreg(oldest);
  1526. return oldest;
  1527. }
  1528. static int rcache_allocate_vreg(int needed)
  1529. {
  1530. int x;
  1531. // get a free reg, but use temps only if r is not needed soon
  1532. for (x = ARRAY_SIZE(cache_regs) - 1; x >= 0; x--) {
  1533. if (cache_regs[x].flags && (cache_regs[x].type == HR_FREE ||
  1534. (cache_regs[x].type == HR_TEMP && !(cache_regs[x].flags & HRF_LOCKED))) &&
  1535. (!needed || (cache_regs[x].flags & HRF_REG)))
  1536. break;
  1537. }
  1538. if (x < 0)
  1539. x = rcache_allocate(1, 0);
  1540. return x;
  1541. }
  1542. static int rcache_allocate_nontemp(void)
  1543. {
  1544. int x = rcache_allocate(0, 3);
  1545. return x;
  1546. }
  1547. static int rcache_allocate_temp(void)
  1548. {
  1549. int x;
  1550. // use any free reg, but prefer TEMP regs
  1551. for (x = 0; x < ARRAY_SIZE(cache_regs); x++) {
  1552. if (cache_regs[x].flags && (cache_regs[x].type == HR_FREE ||
  1553. (cache_regs[x].type == HR_TEMP && !(cache_regs[x].flags & HRF_LOCKED))))
  1554. break;
  1555. }
  1556. if (x >= ARRAY_SIZE(cache_regs))
  1557. x = rcache_allocate(-1, 1);
  1558. if (x < 0) {
  1559. printf("no temp register available, aborting\n");
  1560. exit(1);
  1561. }
  1562. return x;
  1563. }
  1564. #if REMAP_REGISTER
  1565. // maps a host register to a REG
  1566. static int rcache_map_reg(sh2_reg_e r, int hr, int mode)
  1567. {
  1568. int x, i;
  1569. gconst_kill(r);
  1570. // lookup the TEMP hr maps to
  1571. i = reg_map_host[hr];
  1572. if (i < 0) {
  1573. // must not happen
  1574. printf("invalid host register %d\n", hr);
  1575. exit(1);
  1576. }
  1577. // deal with statically mapped regs
  1578. if (mode == RC_GR_RMW && (guest_regs[r].flags & GRF_STATIC)) {
  1579. x = guest_regs[r].sreg;
  1580. if (guest_regs[r].vreg == x) {
  1581. // STATIC in its sreg with no aliases, and some processing pending
  1582. if (cache_regs[x].gregs == 1 << r)
  1583. return cache_regs[x].hreg;
  1584. } else if (cache_regs[x].type == HR_FREE ||
  1585. (cache_regs[x].type == HR_TEMP && !(cache_regs[x].flags & HRF_LOCKED)))
  1586. // STATIC not in its sreg, with sreg available -> move it
  1587. i = guest_regs[r].sreg;
  1588. }
  1589. // remove old mappings of r and i if one exists
  1590. if (guest_regs[r].vreg >= 0)
  1591. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1592. if (cache_regs[i].type == HR_CACHED)
  1593. rcache_evict_vreg(i);
  1594. // set new mappping
  1595. if (cache_regs[i].type != HR_STATIC)
  1596. cache_regs[i].type = HR_CACHED;
  1597. cache_regs[i].gregs = 1 << r;
  1598. cache_regs[i].flags &= (HRF_TEMP|HRF_REG);
  1599. cache_regs[i].ref = 0;
  1600. cache_regs[i].stamp = ++rcache_counter;
  1601. cache_regs[i].flags |= HRF_DIRTY;
  1602. rcache_ref_vreg(i);
  1603. guest_regs[r].flags |= GRF_DIRTY;
  1604. guest_regs[r].vreg = i;
  1605. #if DRC_DEBUG & 64
  1606. RCACHE_CHECK("after map");
  1607. #endif
  1608. return cache_regs[i].hreg;
  1609. }
  1610. // remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
  1611. static void rcache_remap_vreg(int x)
  1612. {
  1613. int d;
  1614. // x must be a cached vreg
  1615. if (cache_regs[x].type != HR_CACHED && cache_regs[x].type != HR_STATIC)
  1616. return;
  1617. // don't do it if x is already a REG or isn't used or to be cleaned anyway
  1618. if ((cache_regs[x].flags & HRF_REG) ||
  1619. !(rcache_regs_used & ~rcache_regs_clean & cache_regs[x].gregs)) {
  1620. // clean here to avoid data loss on invalidation
  1621. rcache_clean_vreg(x);
  1622. return;
  1623. }
  1624. if (cache_regs[x].flags & HRF_LOCKED) {
  1625. printf("remap vreg %d is locked\n", x);
  1626. exit(1);
  1627. }
  1628. // allocate a non-TEMP vreg
  1629. rcache_ref_vreg(x); // lock to avoid evicting x
  1630. d = rcache_allocate_nontemp();
  1631. rcache_unref_vreg(x);
  1632. if (d < 0) {
  1633. rcache_clean_vreg(x);
  1634. return;
  1635. }
  1636. // move vreg to new location
  1637. rcache_move_vreg(d, x);
  1638. #if DRC_DEBUG & 64
  1639. RCACHE_CHECK("after remap");
  1640. #endif
  1641. }
  1642. #endif
  1643. #if ALIAS_REGISTERS
  1644. static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
  1645. {
  1646. int x;
  1647. // if s isn't constant, it must be in cache for aliasing
  1648. if (!gconst_check(rs))
  1649. rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
  1650. // if d and s are not already aliased
  1651. x = guest_regs[rs].vreg;
  1652. if (guest_regs[rd].vreg != x) {
  1653. // remove possible old mapping of dst
  1654. if (guest_regs[rd].vreg >= 0)
  1655. rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
  1656. // make dst an alias of src
  1657. if (x >= 0)
  1658. rcache_add_vreg_alias(x, rd);
  1659. // if d is now in cache, it must be dirty
  1660. if (guest_regs[rd].vreg >= 0) {
  1661. x = guest_regs[rd].vreg;
  1662. cache_regs[x].flags |= HRF_DIRTY;
  1663. guest_regs[rd].flags |= GRF_DIRTY;
  1664. }
  1665. }
  1666. gconst_copy(rd, rs);
  1667. #if DRC_DEBUG & 64
  1668. RCACHE_CHECK("after alias");
  1669. #endif
  1670. }
  1671. #endif
  1672. // note: must not be called when doing conditional code
  1673. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1674. {
  1675. int src, dst, ali;
  1676. cache_reg_t *tr;
  1677. dst = src = guest_regs[r].vreg;
  1678. rcache_ref_vreg(src); // lock to avoid evicting src
  1679. // good opportunity to relocate a remapped STATIC?
  1680. if ((guest_regs[r].flags & GRF_STATIC) && src != guest_regs[r].sreg &&
  1681. !(cache_regs[guest_regs[r].sreg].flags & HRF_LOCKED) &&
  1682. (src < 0 || mode != RC_GR_READ) &&
  1683. !(rcache_regs_nowsoon & cache_regs[guest_regs[r].sreg].gregs)) {
  1684. dst = guest_regs[r].sreg;
  1685. rcache_evict_vreg(dst);
  1686. } else if (dst < 0) {
  1687. // allocate a cache register
  1688. if ((dst = rcache_allocate_vreg(rcache_regs_nowsoon & (1 << r))) < 0) {
  1689. printf("no registers to evict, aborting\n");
  1690. exit(1);
  1691. }
  1692. }
  1693. tr = &cache_regs[dst];
  1694. tr->stamp = rcache_counter;
  1695. rcache_unref_vreg(src);
  1696. // remove r from src
  1697. if (src >= 0 && src != dst)
  1698. rcache_remove_vreg_alias(src, r);
  1699. // if r has a constant it may have aliases
  1700. if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
  1701. src = dst;
  1702. // if r will be modified, check for aliases being needed rsn
  1703. ali = tr->gregs & ~(1 << r);
  1704. if (mode != RC_GR_READ && src == dst && ali) {
  1705. int x = -1;
  1706. if (rcache_regs_nowsoon & ali) {
  1707. if (tr->type == HR_STATIC && guest_regs[r].sreg == dst &&
  1708. !(tr->flags & HRF_LOCKED)) {
  1709. // split aliases if r is STATIC in sreg and dst isn't already locked
  1710. rcache_ref_vreg(dst); // lock to avoid evicting dst
  1711. if ((x = rcache_allocate_vreg(rcache_regs_nowsoon & ali)) >= 0) {
  1712. src = x;
  1713. rcache_move_vreg(src, dst);
  1714. }
  1715. rcache_unref_vreg(dst);
  1716. } else {
  1717. // split r
  1718. rcache_ref_vreg(src); // lock to avoid evicting src
  1719. if ((x = rcache_allocate_vreg(rcache_regs_nowsoon & (1 << r))) >= 0) {
  1720. dst = x;
  1721. tr = &cache_regs[dst];
  1722. tr->stamp = rcache_counter;
  1723. }
  1724. rcache_unref_vreg(src);
  1725. }
  1726. }
  1727. if (x < 0)
  1728. // aliases not needed or no vreg available, remove them
  1729. rcache_evict_vreg_aliases(dst, r);
  1730. else if (src != dst)
  1731. rcache_remove_vreg_alias(src, r);
  1732. }
  1733. // assign r to dst
  1734. rcache_add_vreg_alias(dst, r);
  1735. // handle dst register transfer
  1736. if (src < 0 && mode != RC_GR_WRITE)
  1737. emith_ctx_read(tr->hreg, r * 4);
  1738. if (hr) {
  1739. *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
  1740. rcache_ref_vreg(reg_map_host[*hr]);
  1741. } else if (src >= 0 && cache_regs[src].hreg != tr->hreg)
  1742. emith_move_r_r(tr->hreg, cache_regs[src].hreg);
  1743. // housekeeping
  1744. if (do_locking)
  1745. rcache_ref_vreg(dst);
  1746. if (mode != RC_GR_READ) {
  1747. tr->flags |= HRF_DIRTY;
  1748. guest_regs[r].flags |= GRF_DIRTY;
  1749. gconst_kill(r);
  1750. }
  1751. #if DRC_DEBUG & 64
  1752. RCACHE_CHECK("after getreg");
  1753. #endif
  1754. return tr->hreg;
  1755. }
  1756. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1757. {
  1758. return rcache_get_reg_(r, mode, 1, hr);
  1759. }
  1760. static int rcache_get_tmp(void)
  1761. {
  1762. int i;
  1763. i = rcache_allocate_temp();
  1764. rcache_ref_vreg(i);
  1765. cache_regs[i].type = HR_TEMP;
  1766. return cache_regs[i].hreg;
  1767. }
  1768. static int rcache_get_vreg_hr(int hr)
  1769. {
  1770. int i;
  1771. i = reg_map_host[hr];
  1772. if (i < 0 || (cache_regs[i].flags & HRF_LOCKED)) {
  1773. printf("host register %d is locked\n", hr);
  1774. exit(1);
  1775. }
  1776. if (cache_regs[i].type == HR_CACHED)
  1777. rcache_evict_vreg(i);
  1778. else if (cache_regs[i].type == HR_TEMP && (cache_regs[i].flags & HRF_LOCKED)) {
  1779. printf("host reg %d already used, aborting\n", hr);
  1780. exit(1);
  1781. }
  1782. return i;
  1783. }
  1784. static int rcache_get_vreg_arg(int arg)
  1785. {
  1786. int hr = 0;
  1787. host_arg2reg(hr, arg);
  1788. return rcache_get_vreg_hr(hr);
  1789. }
  1790. // get a reg to be used as function arg
  1791. static int rcache_get_tmp_arg(int arg)
  1792. {
  1793. int x = rcache_get_vreg_arg(arg);
  1794. cache_regs[x].type = HR_TEMP;
  1795. rcache_ref_vreg(x);
  1796. return cache_regs[x].hreg;
  1797. }
  1798. // ... as return value after a call
  1799. static int rcache_get_tmp_ret(void)
  1800. {
  1801. int x = rcache_get_vreg_hr(RET_REG);
  1802. cache_regs[x].type = HR_TEMP;
  1803. rcache_ref_vreg(x);
  1804. return cache_regs[x].hreg;
  1805. }
  1806. // same but caches a reg if access is readonly (announced by hr being NULL)
  1807. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1808. {
  1809. int i, srcr, dstr, dstid, keep;
  1810. u32 val;
  1811. host_arg2reg(dstr, arg);
  1812. i = guest_regs[r].vreg;
  1813. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1814. // r is already in arg, avoid evicting
  1815. dstid = i;
  1816. else
  1817. dstid = rcache_get_vreg_arg(arg);
  1818. dstr = cache_regs[dstid].hreg;
  1819. if (rcache_is_cached(r)) {
  1820. // r is needed later on anyway
  1821. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1822. keep = 1;
  1823. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1824. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1825. srcr = dstr;
  1826. emith_move_r_imm(srcr, val);
  1827. keep = 0;
  1828. } else {
  1829. // must read from ctx
  1830. srcr = dstr;
  1831. emith_ctx_read(srcr, r * 4);
  1832. keep = 1;
  1833. }
  1834. if (cache_regs[dstid].type == HR_CACHED)
  1835. rcache_evict_vreg(dstid);
  1836. cache_regs[dstid].type = HR_TEMP;
  1837. if (hr == NULL) {
  1838. if (dstr != srcr)
  1839. // arg is a copy of cached r
  1840. emith_move_r_r(dstr, srcr);
  1841. else if (keep && guest_regs[r].vreg < 0)
  1842. // keep arg as vreg for r
  1843. rcache_add_vreg_alias(dstid, r);
  1844. } else {
  1845. *hr = srcr;
  1846. if (dstr != srcr) // must lock srcr if not copied here
  1847. rcache_ref_vreg(reg_map_host[srcr]);
  1848. }
  1849. cache_regs[dstid].stamp = ++rcache_counter;
  1850. rcache_ref_vreg(dstid);
  1851. #if DRC_DEBUG & 64
  1852. RCACHE_CHECK("after getarg");
  1853. #endif
  1854. return dstr;
  1855. }
  1856. static void rcache_free_tmp(int hr)
  1857. {
  1858. int i = reg_map_host[hr];
  1859. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1860. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1861. exit(1);
  1862. }
  1863. rcache_free_vreg(i);
  1864. }
  1865. // saves temporary result either in REG or in drctmp
  1866. static int rcache_save_tmp(int hr)
  1867. {
  1868. int i;
  1869. // find REG, either free or unlocked temp or oldest non-hinted cached
  1870. i = rcache_allocate_nontemp();
  1871. if (i < 0) {
  1872. // if none is available, store in drctmp
  1873. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1874. rcache_free_tmp(hr);
  1875. return -1;
  1876. }
  1877. cache_regs[i].type = HR_CACHED;
  1878. cache_regs[i].gregs = 0; // not storing any guest register
  1879. cache_regs[i].flags &= (HRF_TEMP|HRF_REG);
  1880. cache_regs[i].ref = 0;
  1881. cache_regs[i].stamp = ++rcache_counter;
  1882. rcache_ref_vreg(i);
  1883. emith_move_r_r(cache_regs[i].hreg, hr);
  1884. rcache_free_tmp(hr);
  1885. return i;
  1886. }
  1887. static int rcache_restore_tmp(int x)
  1888. {
  1889. int hr;
  1890. // find REG with tmp store: cached but with no gregs
  1891. if (x >= 0) {
  1892. if (cache_regs[x].type != HR_CACHED || cache_regs[x].gregs) {
  1893. printf("invalid tmp storage %d\n", x);
  1894. exit(1);
  1895. }
  1896. // found, transform to a TEMP
  1897. cache_regs[x].type = HR_TEMP;
  1898. return cache_regs[x].hreg;
  1899. }
  1900. // if not available, create a TEMP store and fetch from drctmp
  1901. hr = rcache_get_tmp();
  1902. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1903. return hr;
  1904. }
  1905. static void rcache_free(int hr)
  1906. {
  1907. int x = reg_map_host[hr];
  1908. if (cache_regs[x].type == HR_TEMP)
  1909. rcache_free_tmp(hr);
  1910. else
  1911. rcache_unref_vreg(x);
  1912. }
  1913. static void rcache_unlock(int x)
  1914. {
  1915. if (x >= 0) {
  1916. cache_regs[x].flags &= ~HRF_LOCKED;
  1917. cache_regs[x].ref = 0;
  1918. // rcache_regs_now &= ~cache_regs[x].gregs;
  1919. }
  1920. }
  1921. static void rcache_unlock_all(void)
  1922. {
  1923. int i;
  1924. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  1925. cache_regs[i].flags &= ~HRF_LOCKED;
  1926. cache_regs[i].ref = 0;
  1927. }
  1928. }
  1929. static inline void rcache_set_usage_now(u32 mask)
  1930. {
  1931. rcache_regs_now = mask;
  1932. }
  1933. static inline void rcache_set_usage_soon(u32 mask)
  1934. {
  1935. rcache_regs_soon = mask;
  1936. }
  1937. static inline void rcache_set_usage_late(u32 mask)
  1938. {
  1939. rcache_regs_late = mask;
  1940. }
  1941. static inline void rcache_set_usage_discard(u32 mask)
  1942. {
  1943. rcache_regs_discard = mask;
  1944. }
  1945. static inline int rcache_is_cached(sh2_reg_e r)
  1946. {
  1947. // is r in cache or needed RSN?
  1948. return (guest_regs[r].vreg >= 0 || (rcache_regs_soonclean & (1 << r)));
  1949. }
  1950. static inline int rcache_is_hreg_used(int hr)
  1951. {
  1952. int x = reg_map_host[hr];
  1953. // is hr in use?
  1954. return cache_regs[x].type != HR_FREE &&
  1955. (cache_regs[x].type != HR_TEMP || (cache_regs[x].flags & HRF_LOCKED));
  1956. }
  1957. static inline u32 rcache_used_hregs_mask(void)
  1958. {
  1959. u32 mask = 0;
  1960. int i;
  1961. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1962. if ((cache_regs[i].flags & HRF_TEMP) && cache_regs[i].type != HR_FREE &&
  1963. (cache_regs[i].type != HR_TEMP || (cache_regs[i].flags & HRF_LOCKED)))
  1964. mask |= 1 << cache_regs[i].hreg;
  1965. return mask;
  1966. }
  1967. static inline u32 rcache_dirty_mask(void)
  1968. {
  1969. u32 mask = 0;
  1970. int i;
  1971. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1972. if (guest_regs[i].flags & GRF_DIRTY)
  1973. mask |= 1 << i;
  1974. mask |= gconst_dirty_mask();
  1975. return mask;
  1976. }
  1977. static inline u32 rcache_cached_mask(void)
  1978. {
  1979. u32 mask = 0;
  1980. int i;
  1981. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1982. if (cache_regs[i].type == HR_CACHED || cache_regs[i].type == HR_STATIC)
  1983. mask |= cache_regs[i].gregs;
  1984. return mask;
  1985. }
  1986. static void rcache_clean_tmp(void)
  1987. {
  1988. int i;
  1989. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  1990. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1991. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].flags & HRF_TEMP)) {
  1992. rcache_unlock(i);
  1993. #if REMAP_REGISTER
  1994. rcache_remap_vreg(i);
  1995. #else
  1996. rcache_clean_vreg(i);
  1997. #endif
  1998. }
  1999. rcache_regs_clean = 0;
  2000. }
  2001. static void rcache_clean_masked(u32 mask)
  2002. {
  2003. int i, r, hr;
  2004. if (!(mask &= ~rcache_regs_static))
  2005. return;
  2006. rcache_regs_clean |= mask;
  2007. // clean constants where all aliases are covered by the mask
  2008. for (i = 0; i < ARRAY_SIZE(gconsts); i++)
  2009. if ((gconsts[i].gregs & mask) && !(gconsts[i].gregs & ~mask)) {
  2010. FOR_ALL_BITS_SET_DO(gconsts[i].gregs, r,
  2011. if (guest_regs[r].flags & GRF_CDIRTY) {
  2012. hr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  2013. rcache_clean_vreg(reg_map_host[hr]);
  2014. break;
  2015. });
  2016. }
  2017. // clean vregs where all aliases are covered by the mask
  2018. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2019. if ((cache_regs[i].type == HR_CACHED || cache_regs[i].type == HR_STATIC) &&
  2020. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  2021. rcache_clean_vreg(i);
  2022. }
  2023. static void rcache_clean(void)
  2024. {
  2025. int i;
  2026. gconst_clean();
  2027. rcache_regs_clean = (1 << ARRAY_SIZE(guest_regs)) - 1;
  2028. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  2029. if (cache_regs[i].type == HR_CACHED || cache_regs[i].type == HR_STATIC)
  2030. rcache_clean_vreg(i);
  2031. // relocate statics to their sregs (necessary before conditional jumps)
  2032. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2033. if ((guest_regs[i].flags & GRF_STATIC) &&
  2034. guest_regs[i].vreg != guest_regs[i].sreg) {
  2035. rcache_ref_vreg(guest_regs[i].vreg);
  2036. rcache_evict_vreg(guest_regs[i].sreg);
  2037. rcache_unref_vreg(guest_regs[i].vreg);
  2038. if (guest_regs[i].vreg < 0)
  2039. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  2040. else
  2041. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  2042. cache_regs[guest_regs[i].vreg].hreg);
  2043. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2044. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY;
  2045. guest_regs[i].flags |= GRF_DIRTY;
  2046. guest_regs[i].vreg = guest_regs[i].sreg;
  2047. }
  2048. }
  2049. rcache_regs_clean = 0;
  2050. }
  2051. static void rcache_invalidate_tmp(void)
  2052. {
  2053. int i;
  2054. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  2055. if (cache_regs[i].flags & HRF_TEMP) {
  2056. rcache_unlock(i);
  2057. if (cache_regs[i].type == HR_CACHED)
  2058. rcache_evict_vreg(i);
  2059. else
  2060. rcache_free_vreg(i);
  2061. }
  2062. }
  2063. }
  2064. static void rcache_invalidate(void)
  2065. {
  2066. int i;
  2067. gconst_invalidate();
  2068. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2069. rcache_free_vreg(i);
  2070. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2071. guest_regs[i].flags &= GRF_STATIC;
  2072. if (!(guest_regs[i].flags & GRF_STATIC))
  2073. guest_regs[i].vreg = -1;
  2074. else {
  2075. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  2076. cache_regs[guest_regs[i].sreg].flags |= HRF_DIRTY;
  2077. guest_regs[i].flags |= GRF_DIRTY;
  2078. guest_regs[i].vreg = guest_regs[i].sreg;
  2079. }
  2080. }
  2081. rcache_counter = 0;
  2082. rcache_regs_now = rcache_regs_soon = rcache_regs_late = 0;
  2083. rcache_regs_discard = rcache_regs_clean = 0;
  2084. }
  2085. static void rcache_flush(void)
  2086. {
  2087. rcache_unlock_all();
  2088. rcache_clean();
  2089. rcache_invalidate();
  2090. }
  2091. static void rcache_init(void)
  2092. {
  2093. static int once = 1;
  2094. int i;
  2095. // init is executed on every rom load, but this must only be executed once...
  2096. if (once) {
  2097. memset(reg_map_host, -1, sizeof(reg_map_host));
  2098. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  2099. reg_map_host[cache_regs[i].hreg] = i;
  2100. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2101. if (guest_regs[i].flags & GRF_STATIC) {
  2102. rcache_regs_static |= (1 << i);
  2103. guest_regs[i].sreg = reg_map_host[guest_regs[i].sreg];
  2104. cache_regs[guest_regs[i].sreg].type = HR_STATIC;
  2105. } else
  2106. guest_regs[i].sreg = -1;
  2107. once = 0;
  2108. }
  2109. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  2110. if (guest_regs[i].flags & GRF_STATIC) {
  2111. guest_regs[i].vreg = guest_regs[i].sreg;
  2112. cache_regs[guest_regs[i].sreg].gregs = (1 << i);
  2113. }
  2114. rcache_invalidate();
  2115. }
  2116. // ---------------------------------------------------------------
  2117. // NB may return either REG or TEMP
  2118. static int emit_get_rbase_and_offs(SH2 *sh2, sh2_reg_e r, int rmode, u32 *offs)
  2119. {
  2120. uptr omask = emith_rw_offs_max(); // offset mask
  2121. u32 mask = 0;
  2122. u32 a;
  2123. int poffs;
  2124. int hr, hr2;
  2125. uptr la;
  2126. // is r constant and points to a memory region?
  2127. if (! gconst_get(r, &a))
  2128. return -1;
  2129. poffs = dr_ctx_get_mem_ptr(sh2, a, &mask);
  2130. if (poffs == -1)
  2131. return -1;
  2132. if (mask < 0x20000) {
  2133. // data array, BIOS, DRAM, can't safely access directly since host addr may
  2134. // change (BIOS,da code may run on either core, DRAM may be switched)
  2135. hr = rcache_get_tmp();
  2136. a = (a + *offs) & mask;
  2137. if (poffs == offsetof(SH2, p_da)) {
  2138. // access sh2->data_array directly
  2139. a += offsetof(SH2, data_array);
  2140. emith_add_r_r_ptr_imm(hr, CONTEXT_REG, a & ~omask);
  2141. } else {
  2142. emith_ctx_read_ptr(hr, poffs);
  2143. if (a & ~omask)
  2144. emith_add_r_r_ptr_imm(hr, hr, a & ~omask);
  2145. }
  2146. *offs = a & omask;
  2147. return hr;
  2148. }
  2149. // ROM, SDRAM. Host address should be mmapped to be equal to SH2 address.
  2150. la = (uptr)*(void **)((char *)sh2 + poffs);
  2151. // if r is in rcache or needed soon anyway, and offs is relative to region,
  2152. // and address translation fits in add_ptr_imm (s32), then use rcached const
  2153. if (la == (s32)la && !(*offs & ~mask) && rcache_is_cached(r)) {
  2154. u32 odd = a & 1; // need to fix odd address for correct byte addressing
  2155. la -= (s32)((a & ~mask) - *offs - odd); // diff between reg and memory
  2156. hr = hr2 = rcache_get_reg(r, rmode, NULL);
  2157. if ((la & ~omask) - odd) {
  2158. hr = rcache_get_tmp();
  2159. emith_add_r_r_ptr_imm(hr, hr2, (la & ~omask) - odd);
  2160. rcache_free(hr2);
  2161. }
  2162. *offs = (la & omask);
  2163. } else {
  2164. // known fixed host address
  2165. la += (a + *offs) & mask;
  2166. hr = rcache_get_tmp();
  2167. emith_move_r_ptr_imm(hr, la & ~omask);
  2168. *offs = la & omask;
  2169. }
  2170. return hr;
  2171. }
  2172. // read const data from const ROM address
  2173. static int emit_get_rom_data(SH2 *sh2, sh2_reg_e r, u32 offs, int size, u32 *val)
  2174. {
  2175. u32 a, mask;
  2176. *val = 0;
  2177. if (gconst_get(r, &a)) {
  2178. a += offs;
  2179. // check if rom is memory mapped (not bank switched), and address is in rom
  2180. if (dr_is_rom(a) && p32x_sh2_get_mem_ptr(a, &mask, sh2) == sh2->p_rom) {
  2181. switch (size & MF_SIZEMASK) {
  2182. case 0: *val = (s8)p32x_sh2_read8(a, sh2s); break; // 8
  2183. case 1: *val = (s16)p32x_sh2_read16(a, sh2s); break; // 16
  2184. case 2: *val = p32x_sh2_read32(a, sh2s); break; // 32
  2185. }
  2186. return 1;
  2187. }
  2188. }
  2189. return 0;
  2190. }
  2191. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  2192. {
  2193. #if PROPAGATE_CONSTANTS
  2194. gconst_new(dst, imm);
  2195. #else
  2196. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2197. emith_move_r_imm(hr, imm);
  2198. #endif
  2199. }
  2200. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  2201. {
  2202. if (gconst_check(src) || rcache_is_cached(src)) {
  2203. #if ALIAS_REGISTERS
  2204. rcache_alias_vreg(dst, src);
  2205. #else
  2206. int hr_s = rcache_get_reg(src, RC_GR_READ, NULL);
  2207. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2208. emith_move_r_r(hr_d, hr_s);
  2209. gconst_copy(dst, src);
  2210. #endif
  2211. } else {
  2212. int hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  2213. emith_ctx_read(hr_d, src * 4);
  2214. }
  2215. }
  2216. static void emit_add_r_imm(sh2_reg_e r, u32 imm)
  2217. {
  2218. u32 val;
  2219. int isgc = gconst_get(r, &val);
  2220. int hr, hr2;
  2221. if (!isgc || rcache_is_cached(r)) {
  2222. // not constant, or r is already in cache
  2223. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2224. emith_add_r_r_imm(hr, hr2, imm);
  2225. rcache_free(hr2);
  2226. if (isgc)
  2227. gconst_set(r, val + imm);
  2228. } else
  2229. gconst_new(r, val + imm);
  2230. }
  2231. static void emit_sub_r_imm(sh2_reg_e r, u32 imm)
  2232. {
  2233. u32 val;
  2234. int isgc = gconst_get(r, &val);
  2235. int hr, hr2;
  2236. if (!isgc || rcache_is_cached(r)) {
  2237. // not constant, or r is already in cache
  2238. hr = rcache_get_reg(r, RC_GR_RMW, &hr2);
  2239. emith_sub_r_r_imm(hr, hr2, imm);
  2240. rcache_free(hr2);
  2241. if (isgc)
  2242. gconst_set(r, val - imm);
  2243. } else
  2244. gconst_new(r, val - imm);
  2245. }
  2246. static void emit_sync_t_to_sr(void)
  2247. {
  2248. // avoid reloading SR from context if there's nothing to do
  2249. if (emith_get_t_cond() >= 0) {
  2250. int sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2251. emith_sync_t(sr);
  2252. }
  2253. }
  2254. // rd = @(arg0)
  2255. static int emit_memhandler_read(int size)
  2256. {
  2257. emit_sync_t_to_sr();
  2258. rcache_clean_tmp();
  2259. #ifndef DRC_SR_REG
  2260. // must writeback cycles for poll detection stuff
  2261. if (guest_regs[SHR_SR].vreg != -1)
  2262. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2263. #endif
  2264. rcache_invalidate_tmp();
  2265. if (size & MF_POLLING)
  2266. switch (size & MF_SIZEMASK) {
  2267. case 0: emith_call(sh2_drc_read8_poll); break; // 8
  2268. case 1: emith_call(sh2_drc_read16_poll); break; // 16
  2269. case 2: emith_call(sh2_drc_read32_poll); break; // 32
  2270. }
  2271. else
  2272. switch (size & MF_SIZEMASK) {
  2273. case 0: emith_call(sh2_drc_read8); break; // 8
  2274. case 1: emith_call(sh2_drc_read16); break; // 16
  2275. case 2: emith_call(sh2_drc_read32); break; // 32
  2276. }
  2277. return rcache_get_tmp_ret();
  2278. }
  2279. // @(arg0) = arg1
  2280. static void emit_memhandler_write(int size)
  2281. {
  2282. emit_sync_t_to_sr();
  2283. rcache_clean_tmp();
  2284. #ifndef DRC_SR_REG
  2285. if (guest_regs[SHR_SR].vreg != -1)
  2286. rcache_unmap_vreg(guest_regs[SHR_SR].vreg);
  2287. #endif
  2288. rcache_invalidate_tmp();
  2289. switch (size & MF_SIZEMASK) {
  2290. case 0: emith_call(sh2_drc_write8); break; // 8
  2291. case 1: emith_call(sh2_drc_write16); break; // 16
  2292. case 2: emith_call(sh2_drc_write32); break; // 32
  2293. }
  2294. }
  2295. // rd = @(Rs,#offs); rd < 0 -> return a temp
  2296. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2297. {
  2298. int hr, hr2;
  2299. u32 val;
  2300. #if PROPAGATE_CONSTANTS
  2301. if (emit_get_rom_data(sh2, rs, offs, size, &val)) {
  2302. if (rd == SHR_TMP) {
  2303. hr2 = rcache_get_tmp();
  2304. emith_move_r_imm(hr2, val);
  2305. } else {
  2306. emit_move_r_imm32(rd, val);
  2307. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  2308. }
  2309. if (size & MF_POSTINCR)
  2310. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2311. return hr2;
  2312. }
  2313. val = size & MF_POSTINCR;
  2314. hr = emit_get_rbase_and_offs(sh2, rs, val ? RC_GR_RMW : RC_GR_READ, &offs);
  2315. if (hr != -1) {
  2316. if (rd == SHR_TMP)
  2317. hr2 = rcache_get_tmp();
  2318. else
  2319. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2320. switch (size & MF_SIZEMASK) {
  2321. case 0: emith_read8s_r_r_offs(hr2, hr, offs ^ 1); break; // 8
  2322. case 1: emith_read16s_r_r_offs(hr2, hr, offs); break; // 16
  2323. case 2: emith_read_r_r_offs(hr2, hr, offs); emith_ror(hr2, hr2, 16); break;
  2324. }
  2325. rcache_free(hr);
  2326. if (size & MF_POSTINCR)
  2327. emit_add_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2328. return hr2;
  2329. }
  2330. #endif
  2331. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2332. hr = rcache_get_tmp_arg(0);
  2333. emith_move_r_imm(hr, val + offs);
  2334. if (size & MF_POSTINCR)
  2335. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  2336. } else if (size & MF_POSTINCR) {
  2337. hr = rcache_get_tmp_arg(0);
  2338. hr2 = rcache_get_reg(rs, RC_GR_RMW, NULL);
  2339. emith_add_r_r_imm(hr, hr2, offs);
  2340. emith_add_r_imm(hr2, 1 << (size & MF_SIZEMASK));
  2341. if (gconst_get(rs, &val))
  2342. gconst_set(rs, val + (1 << (size & MF_SIZEMASK)));
  2343. } else {
  2344. hr = rcache_get_reg_arg(0, rs, &hr2);
  2345. if (offs || hr != hr2)
  2346. emith_add_r_r_imm(hr, hr2, offs);
  2347. }
  2348. hr = emit_memhandler_read(size);
  2349. size &= MF_SIZEMASK;
  2350. if (rd == SHR_TMP)
  2351. hr2 = hr;
  2352. else
  2353. #if REMAP_REGISTER
  2354. hr2 = rcache_map_reg(rd, hr, RC_GR_WRITE);
  2355. #else
  2356. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2357. #endif
  2358. if (hr != hr2) {
  2359. emith_move_r_r(hr2, hr);
  2360. rcache_free_tmp(hr);
  2361. }
  2362. return hr2;
  2363. }
  2364. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  2365. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  2366. {
  2367. int hr, hr2;
  2368. u32 val;
  2369. if (rd == SHR_TMP) {
  2370. host_arg2reg(hr2, 1); // already locked and prepared by caller
  2371. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  2372. hr2 = rcache_get_reg_arg(1, rd, &hr);
  2373. if (hr != hr2) {
  2374. emith_move_r_r(hr2, hr);
  2375. rcache_free(hr2);
  2376. }
  2377. } else
  2378. hr2 = rcache_get_reg_arg(1, rd, NULL);
  2379. if (rd != SHR_TMP)
  2380. rcache_unlock(guest_regs[rd].vreg); // unlock in case rd is in arg0
  2381. if (gconst_get(rs, &val) && !rcache_is_cached(rs)) {
  2382. hr = rcache_get_tmp_arg(0);
  2383. if (size & MF_PREDECR) {
  2384. val -= 1 << (size & MF_SIZEMASK);
  2385. gconst_new(rs, val);
  2386. }
  2387. emith_move_r_imm(hr, val + offs);
  2388. } else if (offs || (size & MF_PREDECR)) {
  2389. if (size & MF_PREDECR)
  2390. emit_sub_r_imm(rs, 1 << (size & MF_SIZEMASK));
  2391. rcache_unlock(guest_regs[rs].vreg); // unlock in case rs is in arg0
  2392. hr = rcache_get_reg_arg(0, rs, &hr2);
  2393. if (offs || hr != hr2)
  2394. emith_add_r_r_imm(hr, hr2, offs);
  2395. } else
  2396. hr = rcache_get_reg_arg(0, rs, NULL);
  2397. emit_memhandler_write(size);
  2398. }
  2399. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2400. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2401. {
  2402. int hr, hr2;
  2403. int tx, ty;
  2404. #if PROPAGATE_CONSTANTS
  2405. u32 offs;
  2406. // if offs is larger than 0x01000000, it's most probably the base address part
  2407. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2408. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2409. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2410. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2411. #endif
  2412. hr = rcache_get_reg_arg(0, rx, &tx);
  2413. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2414. emith_add_r_r_r(hr, tx, ty);
  2415. hr = emit_memhandler_read(size);
  2416. size &= MF_SIZEMASK;
  2417. if (rd == SHR_TMP)
  2418. hr2 = hr;
  2419. else
  2420. #if REMAP_REGISTER
  2421. hr2 = rcache_map_reg(rd, hr, RC_GR_WRITE);
  2422. #else
  2423. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2424. #endif
  2425. if (hr != hr2) {
  2426. emith_move_r_r(hr2, hr);
  2427. rcache_free_tmp(hr);
  2428. }
  2429. return hr2;
  2430. }
  2431. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2432. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2433. {
  2434. int hr, tx, ty;
  2435. #if PROPAGATE_CONSTANTS
  2436. u32 offs;
  2437. // if offs is larger than 0x01000000, it's most probably the base address part
  2438. if (gconst_get(ry, &offs) && offs < 0x01000000)
  2439. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2440. if (gconst_get(rx, &offs) && offs < 0x01000000)
  2441. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2442. #endif
  2443. if (rd != SHR_TMP)
  2444. rcache_get_reg_arg(1, rd, NULL);
  2445. hr = rcache_get_reg_arg(0, rx, &tx);
  2446. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2447. emith_add_r_r_r(hr, tx, ty);
  2448. emit_memhandler_write(size);
  2449. }
  2450. // @Rn+,@Rm+
  2451. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2452. {
  2453. int tmp;
  2454. // unlock rn, rm here to avoid REG shortage in MAC operation
  2455. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2456. rcache_unlock(guest_regs[rn].vreg);
  2457. tmp = rcache_save_tmp(tmp);
  2458. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2459. rcache_unlock(guest_regs[rm].vreg);
  2460. *rnr = rcache_restore_tmp(tmp);
  2461. }
  2462. static void emit_do_static_regs(int is_write, int tmpr)
  2463. {
  2464. int i, r, count;
  2465. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2466. if (guest_regs[i].flags & GRF_STATIC)
  2467. r = cache_regs[guest_regs[i].vreg].hreg;
  2468. else
  2469. continue;
  2470. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2471. if ((guest_regs[i + 1].flags & GRF_STATIC) &&
  2472. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2473. count++;
  2474. else
  2475. break;
  2476. }
  2477. if (count > 1) {
  2478. // i, r point to last item
  2479. if (is_write)
  2480. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2481. else
  2482. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2483. } else {
  2484. if (is_write)
  2485. emith_ctx_write(r, i * 4);
  2486. else
  2487. emith_ctx_read(r, i * 4);
  2488. }
  2489. }
  2490. }
  2491. #define DELAY_SAVE_T(sr) { \
  2492. emith_bic_r_imm(sr, T_save); \
  2493. emith_tst_r_imm(sr, T); \
  2494. EMITH_SJMP_START(DCOND_EQ); \
  2495. emith_or_r_imm_c(DCOND_NE, sr, T_save); \
  2496. EMITH_SJMP_END(DCOND_EQ); \
  2497. }
  2498. #define FLUSH_CYCLES(sr) \
  2499. if (cycles > 0) { \
  2500. emith_sub_r_imm(sr, cycles << 12); \
  2501. cycles = 0; \
  2502. }
  2503. static void *dr_get_pc_base(u32 pc, SH2 *sh2);
  2504. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2505. {
  2506. u32 branch_target_pc[MAX_LOCAL_BRANCHES];
  2507. void *branch_target_ptr[MAX_LOCAL_BRANCHES];
  2508. int branch_target_count = 0;
  2509. void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
  2510. u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
  2511. int branch_patch_count = 0;
  2512. u8 op_flags[BLOCK_INSN_LIMIT];
  2513. struct drcf {
  2514. int delay_reg:8;
  2515. u32 loop_type:8;
  2516. u32 polling:8;
  2517. u32 test_irq:1;
  2518. u32 pending_branch_direct:1;
  2519. u32 pending_branch_indirect:1;
  2520. } drcf = { 0, };
  2521. // PC of current, first, last SH2 insn
  2522. u32 pc, base_pc, end_pc;
  2523. u32 base_literals, end_literals;
  2524. void *block_entry_ptr;
  2525. struct block_desc *block;
  2526. struct block_entry *entry;
  2527. u16 *dr_pc_base;
  2528. struct op_data *opd;
  2529. int blkid_main = 0;
  2530. int skip_op = 0;
  2531. int tmp, tmp2;
  2532. int cycles;
  2533. int i, v;
  2534. u32 u, m1, m2;
  2535. int op;
  2536. u16 crc;
  2537. base_pc = sh2->pc;
  2538. // get base/validate PC
  2539. dr_pc_base = dr_get_pc_base(base_pc, sh2);
  2540. if (dr_pc_base == (void *)-1) {
  2541. printf("invalid PC, aborting: %08x\n", base_pc);
  2542. // FIXME: be less destructive
  2543. exit(1);
  2544. }
  2545. // initial passes to disassemble and analyze the block
  2546. crc = scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2547. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2548. if (base_literals == end_literals) // map empty lit section to end of code
  2549. base_literals = end_literals = end_pc;
  2550. // if there is already a translated but inactive block, reuse it
  2551. block = dr_find_inactive_block(tcache_id, crc, base_pc, end_pc - base_pc,
  2552. base_literals, end_literals - base_literals);
  2553. if (block) {
  2554. // connect branches
  2555. dbg(2, "== %csh2 reuse block %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2556. base_pc, end_pc, base_literals, end_literals, block->entryp->tcache_ptr);
  2557. for (i = 0; i < block->entry_count; i++) {
  2558. entry = &block->entryp[i];
  2559. add_to_hashlist(entry, tcache_id);
  2560. #if LINK_BRANCHES
  2561. // incoming branches
  2562. dr_link_blocks(entry, tcache_id);
  2563. if (!tcache_id)
  2564. dr_link_blocks(entry, sh2->is_slave?2:1);
  2565. // outgoing branches
  2566. dr_link_outgoing(entry, tcache_id, sh2->is_slave);
  2567. #endif
  2568. }
  2569. // mark memory for overwrite detection
  2570. dr_mark_memory(1, block, tcache_id, 0);
  2571. block->active = 1;
  2572. return block->entryp[0].tcache_ptr;
  2573. }
  2574. // collect branch_targets that don't land on delay slots
  2575. m1 = m2 = v = op = 0;
  2576. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2577. if (op_flags[i] & OF_DELAY_OP)
  2578. op_flags[i] &= ~OF_BTARGET;
  2579. if (op_flags[i] & OF_BTARGET)
  2580. ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, );
  2581. if (ops[i].op == OP_LDC && (ops[i].dest & BITMASK1(SHR_SR)) && pc+2 < end_pc)
  2582. op_flags[i+1] |= OF_BTARGET; // RTE entrypoint in case of SR.IMASK change
  2583. #if LOOP_DETECTION
  2584. // loop types detected:
  2585. // 1. target: ... BRA target -> idle loop
  2586. // 2. target: ... delay insn ... BF target -> delay loop
  2587. // 3. target: ... poll insn ... BF/BT target -> poll loop
  2588. // 4. target: ... poll insn ... BF/BT exit ... BRA target, exit: -> poll
  2589. // conditions:
  2590. // a. no further branch targets between target and back jump.
  2591. // b. no unconditional branch insn inside the loop.
  2592. // c. exactly one poll or delay insn is allowed inside a delay/poll loop
  2593. // (scan_block marks loops only if they meet conditions a through c)
  2594. // d. idle loops do not modify anything but PC,SR and contain no branches
  2595. // e. delay/poll loops do not modify anything but the concerned reg,PC,SR
  2596. // f. loading constants into registers inside the loop is allowed
  2597. // g. a delay/poll loop must have a conditional branch somewhere
  2598. // h. an idle loop must not have a conditional branch
  2599. if (op_flags[i] & OF_BTARGET) {
  2600. // possible loop entry point
  2601. drcf.loop_type = op_flags[i] & OF_LOOP;
  2602. drcf.pending_branch_direct = drcf.pending_branch_indirect = 0;
  2603. op = OF_IDLE_LOOP; // loop type
  2604. v = i;
  2605. m1 = m2 = 0;
  2606. }
  2607. if (drcf.loop_type) {
  2608. // detect loop type, and store poll/delay register
  2609. if (op_flags[i] & OF_POLL_INSN) {
  2610. op = OF_POLL_LOOP;
  2611. m1 |= ops[i].dest; // loop poll/delay regs
  2612. } else if (op_flags[i] & OF_DELAY_INSN) {
  2613. op = OF_DELAY_LOOP;
  2614. m1 |= ops[i].dest;
  2615. } else if (ops[i].op != OP_LOAD_POOL && ops[i].op != OP_LOAD_CONST
  2616. && (ops[i].op != OP_MOVE || op != OF_POLL_LOOP)) {
  2617. // not (MOV @(PC) or MOV # or (MOV reg and poll)), condition f
  2618. m2 |= ops[i].dest; // regs modified by other insns
  2619. }
  2620. // branch detector
  2621. if (OP_ISBRAIMM(ops[i].op) && ops[i].imm == base_pc + 2*v)
  2622. drcf.pending_branch_direct = 1; // backward branch detected
  2623. if (OP_ISBRACND(ops[i].op))
  2624. drcf.pending_branch_indirect = 1; // conditions g,h - cond.branch
  2625. // poll/idle loops terminate with their backwards branch to the loop start
  2626. if (drcf.pending_branch_direct && !(op_flags[i+1] & OF_DELAY_OP)) {
  2627. m2 &= ~(m1 | BITMASK2(SHR_PC, SHR_SR)); // conditions d,e + g,h
  2628. if (m2 || ((op == OF_IDLE_LOOP) == (drcf.pending_branch_indirect)))
  2629. op = 0; // conditions not met
  2630. op_flags[v] = (op_flags[v] & ~OF_LOOP) | op; // set loop type
  2631. drcf.loop_type = 0;
  2632. }
  2633. }
  2634. #endif
  2635. }
  2636. if (branch_target_count > 0) {
  2637. memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
  2638. }
  2639. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2);
  2640. #if (DRC_DEBUG & 4)
  2641. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  2642. #endif
  2643. block = dr_add_block(base_pc, end_pc - base_pc, base_literals,
  2644. end_literals - base_literals, crc, sh2->is_slave, &blkid_main);
  2645. if (block == NULL)
  2646. return NULL;
  2647. block_entry_ptr = tcache_ptr;
  2648. dbg(2, "== %csh2 block #%d,%d %08x-%08x,%08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2649. tcache_id, blkid_main, base_pc, end_pc, base_literals, end_literals, block_entry_ptr);
  2650. // clear stale state after compile errors
  2651. rcache_invalidate();
  2652. emith_invalidate_t();
  2653. drcf = (struct drcf) { 0 };
  2654. // -------------------------------------------------
  2655. // 3rd pass: actual compilation
  2656. pc = base_pc;
  2657. cycles = 0;
  2658. for (i = 0; pc < end_pc; i++)
  2659. {
  2660. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  2661. int tmp3, tmp4;
  2662. int sr;
  2663. opd = &ops[i];
  2664. op = FETCH_OP(pc);
  2665. #if (DRC_DEBUG & 2)
  2666. insns_compiled++;
  2667. #endif
  2668. #if (DRC_DEBUG & 4)
  2669. DasmSH2(sh2dasm_buff, pc, op);
  2670. if (op_flags[i] & OF_BTARGET) {
  2671. if ((op_flags[i] & OF_LOOP) == OF_DELAY_LOOP) tmp3 = '+';
  2672. else if ((op_flags[i] & OF_LOOP) == OF_POLL_LOOP) tmp3 = '=';
  2673. else if ((op_flags[i] & OF_LOOP) == OF_IDLE_LOOP) tmp3 = '~';
  2674. else tmp3 = '*';
  2675. } else if (drcf.loop_type) tmp3 = '.';
  2676. else tmp3 = ' ';
  2677. printf("%c%08x %04x %s\n", tmp3, pc, op, sh2dasm_buff);
  2678. #endif
  2679. if (op_flags[i] & OF_BTARGET)
  2680. {
  2681. if (pc != base_pc)
  2682. {
  2683. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2684. FLUSH_CYCLES(sr);
  2685. emith_sync_t(sr);
  2686. rcache_flush();
  2687. emith_flush();
  2688. // make block entry
  2689. v = block->entry_count;
  2690. entry = &block->entryp[v];
  2691. if (v < ARRAY_SIZE(block->entryp))
  2692. {
  2693. entry = &block->entryp[v];
  2694. entry->pc = pc;
  2695. entry->tcache_ptr = tcache_ptr;
  2696. entry->links = entry->o_links = NULL;
  2697. #if (DRC_DEBUG & 2)
  2698. entry->block = block;
  2699. #endif
  2700. add_to_hashlist(entry, tcache_id);
  2701. block->entry_count++;
  2702. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  2703. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  2704. pc, tcache_ptr);
  2705. }
  2706. else {
  2707. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  2708. tcache_id, blkid_main, pc);
  2709. break;
  2710. }
  2711. } else {
  2712. entry = block->entryp;
  2713. }
  2714. // since we made a block entry, link any other blocks that jump to it
  2715. dr_link_blocks(entry, tcache_id);
  2716. if (!tcache_id) // can safely link from cpu-local to global memory
  2717. dr_link_blocks(entry, sh2->is_slave?2:1);
  2718. v = find_in_sorted_array(branch_target_pc, branch_target_count, pc);
  2719. if (v >= 0)
  2720. branch_target_ptr[v] = tcache_ptr;
  2721. #if LOOP_DETECTION
  2722. drcf.loop_type = op_flags[i] & OF_LOOP;
  2723. drcf.delay_reg = -1;
  2724. drcf.polling = (drcf.loop_type == OF_POLL_LOOP ? MF_POLLING : 0);
  2725. #endif
  2726. rcache_clean();
  2727. #if (DRC_DEBUG & 0x10)
  2728. tmp = rcache_get_tmp_arg(0);
  2729. emith_move_r_imm(tmp, pc);
  2730. tmp = emit_memhandler_read(1);
  2731. tmp2 = rcache_get_tmp();
  2732. tmp3 = rcache_get_tmp();
  2733. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  2734. emith_move_r_imm(tmp3, 0);
  2735. emith_cmp_r_r(tmp, tmp2);
  2736. EMITH_SJMP_START(DCOND_EQ);
  2737. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  2738. EMITH_SJMP_END(DCOND_EQ);
  2739. rcache_free_tmp(tmp);
  2740. rcache_free_tmp(tmp2);
  2741. rcache_free_tmp(tmp3);
  2742. #endif
  2743. // check cycles
  2744. tmp = rcache_get_tmp_arg(0);
  2745. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2746. emith_cmp_r_imm(sr, 0);
  2747. emith_move_r_imm_c(DCOND_LE, tmp, pc);
  2748. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  2749. rcache_free_tmp(tmp);
  2750. #if (DRC_DEBUG & 32)
  2751. // block hit counter
  2752. tmp = rcache_get_tmp_arg(0);
  2753. tmp2 = rcache_get_tmp_arg(1);
  2754. emith_move_r_ptr_imm(tmp, (uptr)entry);
  2755. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  2756. emith_add_r_imm(tmp2, 1);
  2757. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  2758. rcache_free_tmp(tmp);
  2759. rcache_free_tmp(tmp2);
  2760. #endif
  2761. #if (DRC_DEBUG & (8|256|512|1024))
  2762. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2763. emith_sync_t(sr);
  2764. rcache_clean();
  2765. tmp = rcache_used_hregs_mask();
  2766. emith_save_caller_regs(tmp);
  2767. emit_do_static_regs(1, 0);
  2768. rcache_get_reg_arg(2, SHR_SR, NULL);
  2769. tmp2 = rcache_get_tmp_arg(0);
  2770. tmp3 = rcache_get_tmp_arg(1);
  2771. tmp4 = rcache_get_tmp_arg(3);
  2772. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  2773. emith_move_r_r_ptr(tmp3, CONTEXT_REG);
  2774. emith_move_r_imm(tmp4, pc);
  2775. emith_ctx_write(tmp4, SHR_PC * 4);
  2776. rcache_invalidate_tmp();
  2777. emith_call(sh2_drc_log_entry);
  2778. emith_restore_caller_regs(tmp);
  2779. #endif
  2780. do_host_disasm(tcache_id);
  2781. rcache_unlock_all();
  2782. }
  2783. #ifdef DRC_CMP
  2784. if (!(op_flags[i] & OF_DELAY_OP)) {
  2785. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2786. FLUSH_CYCLES(sr);
  2787. emith_sync_t(sr);
  2788. emit_move_r_imm32(SHR_PC, pc);
  2789. rcache_clean();
  2790. tmp = rcache_used_hregs_mask();
  2791. emith_save_caller_regs(tmp);
  2792. emit_do_static_regs(1, 0);
  2793. emith_pass_arg_r(0, CONTEXT_REG);
  2794. emith_call(do_sh2_cmp);
  2795. emith_restore_caller_regs(tmp);
  2796. }
  2797. #endif
  2798. emith_pool_check();
  2799. pc += 2;
  2800. if (skip_op > 0) {
  2801. skip_op--;
  2802. continue;
  2803. }
  2804. if (op_flags[i] & OF_DELAY_OP)
  2805. {
  2806. // handle delay slot dependencies
  2807. delay_dep_fw = opd->dest & ops[i-1].source;
  2808. delay_dep_bk = opd->source & ops[i-1].dest;
  2809. if (delay_dep_fw & BITMASK1(SHR_T)) {
  2810. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2811. emith_sync_t(sr);
  2812. DELAY_SAVE_T(sr);
  2813. }
  2814. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  2815. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  2816. // can only be those 2 really..
  2817. elprintf_sh2(sh2, EL_ANOMALY,
  2818. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  2819. }
  2820. // store PC for MOVA/MOV @PC address calculation
  2821. if (opd->imm != 0)
  2822. ; // case OP_BRANCH - addr already resolved in scan_block
  2823. else {
  2824. switch (ops[i-1].op) {
  2825. case OP_BRANCH:
  2826. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  2827. break;
  2828. case OP_BRANCH_CT:
  2829. case OP_BRANCH_CF:
  2830. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2831. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  2832. emith_move_r_imm(tmp, pc);
  2833. tmp2 = emith_tst_t(sr, (ops[i-1].op == OP_BRANCH_CT));
  2834. tmp3 = emith_invert_cond(tmp2);
  2835. EMITH_SJMP_START(tmp3);
  2836. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  2837. EMITH_SJMP_END(tmp3);
  2838. break;
  2839. case OP_BRANCH_N: // BT/BF known not to be taken
  2840. // XXX could modify opd->imm instead?
  2841. emit_move_r_imm32(SHR_PC, pc);
  2842. break;
  2843. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  2844. }
  2845. }
  2846. }
  2847. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  2848. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  2849. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  2850. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  2851. }
  2852. // inform cache about future register usage
  2853. u32 late = 0; // regs read by future ops
  2854. u32 write = 0; // regs written to (to detect write before read)
  2855. u32 soon = 0; // regs read soon
  2856. for (v = 1; v <= 9; v++) {
  2857. // no sense in looking any further than the next rcache flush
  2858. tmp = ((op_flags[i+v] & OF_BTARGET) || (op_flags[i+v-1] & OF_DELAY_OP) ||
  2859. (OP_ISBRACND(opd[v-1].op) && !(op_flags[i+v] & OF_DELAY_OP)));
  2860. if (pc + 2*v <= end_pc && !tmp) { // (pc already incremented above)
  2861. late |= opd[v].source & ~write;
  2862. // ignore source regs after they have been written to
  2863. write |= opd[v].dest;
  2864. // regs needed in the next few instructions
  2865. if (v <= 4)
  2866. soon = late;
  2867. } else {
  2868. // upcoming rcache_flush, start writing back unused dirty stuff
  2869. rcache_clean_masked(rcache_dirty_mask() & ~(write|opd[0].dest));
  2870. break;
  2871. }
  2872. }
  2873. rcache_set_usage_now(opd[0].source); // current insn
  2874. rcache_set_usage_soon(late); // insns 1-3
  2875. rcache_set_usage_late(late & ~soon); // insns 4-9
  2876. rcache_set_usage_discard(write & ~(late|soon) & ~opd[0].source);
  2877. switch (opd->op)
  2878. {
  2879. case OP_BRANCH_N:
  2880. // never taken, just use up cycles
  2881. goto end_op;
  2882. case OP_BRANCH:
  2883. case OP_BRANCH_CT:
  2884. case OP_BRANCH_CF:
  2885. if (opd->dest & BITMASK1(SHR_PR))
  2886. emit_move_r_imm32(SHR_PR, pc + 2);
  2887. drcf.pending_branch_direct = 1;
  2888. goto end_op;
  2889. case OP_BRANCH_R:
  2890. if (opd->dest & BITMASK1(SHR_PR))
  2891. emit_move_r_imm32(SHR_PR, pc + 2);
  2892. emit_move_r_r(SHR_PC, opd->rm);
  2893. drcf.pending_branch_indirect = 1;
  2894. goto end_op;
  2895. case OP_BRANCH_RF:
  2896. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2897. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  2898. emith_move_r_imm(tmp, pc + 2);
  2899. if (opd->dest & BITMASK1(SHR_PR)) {
  2900. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  2901. emith_move_r_r(tmp3, tmp);
  2902. }
  2903. emith_add_r_r(tmp, tmp2);
  2904. if (gconst_get(GET_Rn(), &u))
  2905. gconst_set(SHR_PC, pc + 2 + u);
  2906. drcf.pending_branch_indirect = 1;
  2907. goto end_op;
  2908. case OP_SLEEP: // SLEEP 0000000000011011
  2909. printf("TODO sleep\n");
  2910. goto end_op;
  2911. case OP_RTE: // RTE 0000000000101011
  2912. emith_invalidate_t();
  2913. // pop PC
  2914. tmp = emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  2915. rcache_free(tmp);
  2916. // pop SR
  2917. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  2918. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2919. emith_write_sr(sr, tmp);
  2920. rcache_free_tmp(tmp);
  2921. drcf.test_irq = 1;
  2922. drcf.pending_branch_indirect = 1;
  2923. goto end_op;
  2924. case OP_UNDEFINED:
  2925. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  2926. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  2927. // fallthrough
  2928. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  2929. // push SR
  2930. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  2931. emith_sync_t(tmp2);
  2932. emith_clear_msb(tmp, tmp2, 22);
  2933. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  2934. // push PC
  2935. if (opd->op == OP_TRAPA) {
  2936. tmp = rcache_get_tmp_arg(1);
  2937. emith_move_r_imm(tmp, pc);
  2938. } else if (drcf.pending_branch_indirect) {
  2939. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  2940. } else {
  2941. tmp = rcache_get_tmp_arg(1);
  2942. emith_move_r_imm(tmp, pc - 2);
  2943. }
  2944. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  2945. // obtain new PC
  2946. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  2947. // indirect jump -> back to dispatcher
  2948. drcf.pending_branch_indirect = 1;
  2949. goto end_op;
  2950. case OP_LOAD_POOL:
  2951. #if PROPAGATE_CONSTANTS
  2952. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  2953. dr_is_rom(opd->imm))
  2954. {
  2955. if (opd->size == 2)
  2956. u = FETCH32(opd->imm);
  2957. else
  2958. u = (s16)FETCH_OP(opd->imm);
  2959. // tweak for Blackthorne: avoid stack overwriting
  2960. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  2961. gconst_new(GET_Rn(), u);
  2962. }
  2963. else
  2964. #endif
  2965. {
  2966. if (opd->imm != 0) {
  2967. tmp = rcache_get_tmp_arg(0);
  2968. emith_move_r_imm(tmp, opd->imm);
  2969. } else {
  2970. // have to calculate read addr from PC for delay slot
  2971. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  2972. if (opd->size == 2) {
  2973. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  2974. emith_bic_r_imm(tmp, 3);
  2975. }
  2976. else
  2977. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  2978. }
  2979. tmp2 = emit_memhandler_read(opd->size);
  2980. #if REMAP_REGISTER
  2981. tmp3 = rcache_map_reg(GET_Rn(), tmp2, RC_GR_WRITE);
  2982. #else
  2983. tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  2984. #endif
  2985. if (tmp3 != tmp2) {
  2986. emith_move_r_r(tmp3, tmp2);
  2987. rcache_free_tmp(tmp2);
  2988. }
  2989. }
  2990. goto end_op;
  2991. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  2992. if (opd->imm != 0)
  2993. emit_move_r_imm32(SHR_R0, opd->imm);
  2994. else {
  2995. // have to calculate addr from PC for delay slot
  2996. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  2997. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  2998. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  2999. emith_bic_r_imm(tmp, 3);
  3000. }
  3001. goto end_op;
  3002. }
  3003. switch ((op >> 12) & 0x0f)
  3004. {
  3005. /////////////////////////////////////////////
  3006. case 0x00:
  3007. switch (op & 0x0f)
  3008. {
  3009. case 0x02:
  3010. switch (GET_Fx())
  3011. {
  3012. case 0: // STC SR,Rn 0000nnnn00000010
  3013. tmp2 = SHR_SR;
  3014. break;
  3015. case 1: // STC GBR,Rn 0000nnnn00010010
  3016. tmp2 = SHR_GBR;
  3017. break;
  3018. case 2: // STC VBR,Rn 0000nnnn00100010
  3019. tmp2 = SHR_VBR;
  3020. break;
  3021. default:
  3022. goto default_;
  3023. }
  3024. if (tmp2 == SHR_SR) {
  3025. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3026. emith_sync_t(sr);
  3027. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3028. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  3029. } else
  3030. emit_move_r_r(GET_Rn(), tmp2);
  3031. goto end_op;
  3032. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  3033. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  3034. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  3035. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  3036. goto end_op;
  3037. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  3038. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3039. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3040. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3041. emith_mul(tmp3, tmp2, tmp);
  3042. goto end_op;
  3043. case 0x08:
  3044. switch (GET_Fx())
  3045. {
  3046. case 0: // CLRT 0000000000001000
  3047. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3048. emith_set_t(sr, 0);
  3049. break;
  3050. case 1: // SETT 0000000000011000
  3051. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3052. emith_set_t(sr, 1);
  3053. break;
  3054. case 2: // CLRMAC 0000000000101000
  3055. emit_move_r_imm32(SHR_MACL, 0);
  3056. emit_move_r_imm32(SHR_MACH, 0);
  3057. break;
  3058. default:
  3059. goto default_;
  3060. }
  3061. goto end_op;
  3062. case 0x09:
  3063. switch (GET_Fx())
  3064. {
  3065. case 0: // NOP 0000000000001001
  3066. break;
  3067. case 1: // DIV0U 0000000000011001
  3068. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3069. emith_invalidate_t();
  3070. emith_bic_r_imm(sr, M|Q|T);
  3071. break;
  3072. case 2: // MOVT Rn 0000nnnn00101001
  3073. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3074. emith_sync_t(sr);
  3075. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3076. emith_clear_msb(tmp2, sr, 31);
  3077. break;
  3078. default:
  3079. goto default_;
  3080. }
  3081. goto end_op;
  3082. case 0x0a:
  3083. switch (GET_Fx())
  3084. {
  3085. case 0: // STS MACH,Rn 0000nnnn00001010
  3086. tmp2 = SHR_MACH;
  3087. break;
  3088. case 1: // STS MACL,Rn 0000nnnn00011010
  3089. tmp2 = SHR_MACL;
  3090. break;
  3091. case 2: // STS PR,Rn 0000nnnn00101010
  3092. tmp2 = SHR_PR;
  3093. break;
  3094. default:
  3095. goto default_;
  3096. }
  3097. emit_move_r_r(GET_Rn(), tmp2);
  3098. goto end_op;
  3099. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  3100. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  3101. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  3102. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), (op & 3) | drcf.polling);
  3103. goto end_op;
  3104. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  3105. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  3106. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3107. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3108. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3109. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  3110. rcache_free_tmp(tmp2);
  3111. rcache_free_tmp(tmp);
  3112. goto end_op;
  3113. }
  3114. goto default_;
  3115. /////////////////////////////////////////////
  3116. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  3117. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  3118. goto end_op;
  3119. case 0x02:
  3120. switch (op & 0x0f)
  3121. {
  3122. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  3123. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  3124. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  3125. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  3126. goto end_op;
  3127. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  3128. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  3129. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  3130. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  3131. goto end_op;
  3132. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  3133. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3134. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3135. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3136. emith_invalidate_t();
  3137. emith_bic_r_imm(sr, M|Q|T);
  3138. emith_tst_r_imm(tmp2, (1<<31));
  3139. EMITH_SJMP_START(DCOND_EQ);
  3140. emith_or_r_imm_c(DCOND_NE, sr, Q);
  3141. EMITH_SJMP_END(DCOND_EQ);
  3142. emith_tst_r_imm(tmp3, (1<<31));
  3143. EMITH_SJMP_START(DCOND_EQ);
  3144. emith_or_r_imm_c(DCOND_NE, sr, M);
  3145. EMITH_SJMP_END(DCOND_EQ);
  3146. emith_teq_r_r(tmp2, tmp3);
  3147. EMITH_SJMP_START(DCOND_PL);
  3148. emith_or_r_imm_c(DCOND_MI, sr, T);
  3149. EMITH_SJMP_END(DCOND_PL);
  3150. goto end_op;
  3151. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  3152. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3153. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3154. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3155. emith_clr_t_cond(sr);
  3156. emith_tst_r_r(tmp2, tmp3);
  3157. emith_set_t_cond(sr, DCOND_EQ);
  3158. goto end_op;
  3159. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  3160. if (GET_Rm() != GET_Rn()) {
  3161. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3162. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3163. emith_and_r_r_r(tmp, tmp3, tmp2);
  3164. }
  3165. goto end_op;
  3166. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  3167. #if PROPAGATE_CONSTANTS
  3168. if (GET_Rn() == GET_Rm()) {
  3169. gconst_new(GET_Rn(), 0);
  3170. goto end_op;
  3171. }
  3172. #endif
  3173. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3174. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3175. emith_eor_r_r_r(tmp, tmp3, tmp2);
  3176. goto end_op;
  3177. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  3178. if (GET_Rm() != GET_Rn()) {
  3179. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3180. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3181. emith_or_r_r_r(tmp, tmp3, tmp2);
  3182. }
  3183. goto end_op;
  3184. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  3185. tmp = rcache_get_tmp();
  3186. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3187. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3188. emith_eor_r_r_r(tmp, tmp2, tmp3);
  3189. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3190. emith_clr_t_cond(sr);
  3191. emith_tst_r_imm(tmp, 0x000000ff);
  3192. EMITH_SJMP_START(DCOND_EQ);
  3193. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  3194. EMITH_SJMP_START(DCOND_EQ);
  3195. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  3196. EMITH_SJMP_START(DCOND_EQ);
  3197. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  3198. EMITH_SJMP_END(DCOND_EQ);
  3199. EMITH_SJMP_END(DCOND_EQ);
  3200. EMITH_SJMP_END(DCOND_EQ);
  3201. emith_set_t_cond(sr, DCOND_EQ);
  3202. rcache_free_tmp(tmp);
  3203. goto end_op;
  3204. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  3205. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3206. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3207. emith_lsr(tmp, tmp3, 16);
  3208. emith_or_r_r_lsl(tmp, tmp2, 16);
  3209. goto end_op;
  3210. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  3211. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  3212. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3213. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3214. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3215. if (op & 1) {
  3216. emith_sext(tmp, tmp2, 16);
  3217. } else
  3218. emith_clear_msb(tmp, tmp2, 16);
  3219. tmp2 = rcache_get_tmp();
  3220. if (op & 1) {
  3221. emith_sext(tmp2, tmp3, 16);
  3222. } else
  3223. emith_clear_msb(tmp2, tmp3, 16);
  3224. emith_mul(tmp, tmp, tmp2);
  3225. rcache_free_tmp(tmp2);
  3226. goto end_op;
  3227. }
  3228. goto default_;
  3229. /////////////////////////////////////////////
  3230. case 0x03:
  3231. switch (op & 0x0f)
  3232. {
  3233. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  3234. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  3235. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  3236. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  3237. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  3238. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3239. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3240. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3241. emith_clr_t_cond(sr);
  3242. emith_cmp_r_r(tmp2, tmp3);
  3243. switch (op & 0x07)
  3244. {
  3245. case 0x00: // CMP/EQ
  3246. emith_set_t_cond(sr, DCOND_EQ);
  3247. break;
  3248. case 0x02: // CMP/HS
  3249. emith_set_t_cond(sr, DCOND_HS);
  3250. break;
  3251. case 0x03: // CMP/GE
  3252. emith_set_t_cond(sr, DCOND_GE);
  3253. break;
  3254. case 0x06: // CMP/HI
  3255. emith_set_t_cond(sr, DCOND_HI);
  3256. break;
  3257. case 0x07: // CMP/GT
  3258. emith_set_t_cond(sr, DCOND_GT);
  3259. break;
  3260. }
  3261. goto end_op;
  3262. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  3263. // Q1 = carry(Rn = (Rn << 1) | T)
  3264. // if Q ^ M
  3265. // Q2 = carry(Rn += Rm)
  3266. // else
  3267. // Q2 = carry(Rn -= Rm)
  3268. // Q = M ^ Q1 ^ Q2
  3269. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  3270. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3271. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp);
  3272. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3273. emith_sync_t(sr);
  3274. emith_tpop_carry(sr, 0);
  3275. emith_adcf_r_r_r(tmp2, tmp, tmp);
  3276. emith_tpush_carry(sr, 0); // keep Q1 in T for now
  3277. tmp4 = rcache_get_tmp();
  3278. emith_and_r_r_imm(tmp4, sr, M);
  3279. emith_eor_r_r_lsr(sr, tmp4, M_SHIFT - Q_SHIFT); // Q ^= M
  3280. rcache_free_tmp(tmp4);
  3281. // add or sub, invert T if carry to get Q1 ^ Q2
  3282. // in: (Q ^ M) passed in Q, Q1 in T
  3283. emith_sh2_div1_step(tmp2, tmp3, sr);
  3284. emith_bic_r_imm(sr, Q);
  3285. emith_tst_r_imm(sr, M);
  3286. EMITH_SJMP_START(DCOND_EQ);
  3287. emith_or_r_imm_c(DCOND_NE, sr, Q); // Q = M
  3288. EMITH_SJMP_END(DCOND_EQ);
  3289. emith_tst_r_imm(sr, T);
  3290. EMITH_SJMP_START(DCOND_EQ);
  3291. emith_eor_r_imm_c(DCOND_NE, sr, Q); // Q = M ^ Q1 ^ Q2
  3292. EMITH_SJMP_END(DCOND_EQ);
  3293. emith_eor_r_imm(sr, T); // T = !(Q1 ^ Q2)
  3294. goto end_op;
  3295. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  3296. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3297. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3298. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3299. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3300. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  3301. goto end_op;
  3302. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  3303. #if PROPAGATE_CONSTANTS
  3304. if (GET_Rn() == GET_Rm()) {
  3305. gconst_new(GET_Rn(), 0);
  3306. goto end_op;
  3307. }
  3308. #endif
  3309. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  3310. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3311. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3312. if (op & 4) {
  3313. emith_add_r_r_r(tmp, tmp3, tmp2);
  3314. } else
  3315. emith_sub_r_r_r(tmp, tmp3, tmp2);
  3316. goto end_op;
  3317. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  3318. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  3319. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3320. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3321. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3322. emith_sync_t(sr);
  3323. if (op & 4) { // adc
  3324. emith_tpop_carry(sr, 0);
  3325. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  3326. emith_tpush_carry(sr, 0);
  3327. } else {
  3328. emith_tpop_carry(sr, 1);
  3329. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  3330. emith_tpush_carry(sr, 1);
  3331. }
  3332. goto end_op;
  3333. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  3334. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  3335. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3336. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3337. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3338. emith_clr_t_cond(sr);
  3339. if (op & 4) {
  3340. emith_addf_r_r_r(tmp, tmp3, tmp2);
  3341. } else
  3342. emith_subf_r_r_r(tmp, tmp3, tmp2);
  3343. emith_set_t_cond(sr, DCOND_VS);
  3344. goto end_op;
  3345. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  3346. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3347. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3348. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  3349. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  3350. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  3351. goto end_op;
  3352. }
  3353. goto default_;
  3354. /////////////////////////////////////////////
  3355. case 0x04:
  3356. switch (op & 0x0f)
  3357. {
  3358. case 0x00:
  3359. switch (GET_Fx())
  3360. {
  3361. case 0: // SHLL Rn 0100nnnn00000000
  3362. case 2: // SHAL Rn 0100nnnn00100000
  3363. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3364. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3365. emith_sync_t(sr);
  3366. emith_tpop_carry(sr, 0); // dummy
  3367. emith_lslf(tmp, tmp2, 1);
  3368. emith_tpush_carry(sr, 0);
  3369. goto end_op;
  3370. case 1: // DT Rn 0100nnnn00010000
  3371. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3372. #if LOOP_DETECTION
  3373. if (drcf.loop_type == OF_DELAY_LOOP) {
  3374. if (drcf.delay_reg == -1)
  3375. drcf.delay_reg = GET_Rn();
  3376. else
  3377. drcf.polling = drcf.loop_type = 0;
  3378. }
  3379. #endif
  3380. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3381. emith_clr_t_cond(sr);
  3382. emith_subf_r_r_imm(tmp, tmp2, 1);
  3383. emith_set_t_cond(sr, DCOND_EQ);
  3384. goto end_op;
  3385. }
  3386. goto default_;
  3387. case 0x01:
  3388. switch (GET_Fx())
  3389. {
  3390. case 0: // SHLR Rn 0100nnnn00000001
  3391. case 2: // SHAR Rn 0100nnnn00100001
  3392. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3393. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3394. emith_sync_t(sr);
  3395. emith_tpop_carry(sr, 0); // dummy
  3396. if (op & 0x20) {
  3397. emith_asrf(tmp, tmp2, 1);
  3398. } else
  3399. emith_lsrf(tmp, tmp2, 1);
  3400. emith_tpush_carry(sr, 0);
  3401. goto end_op;
  3402. case 1: // CMP/PZ Rn 0100nnnn00010001
  3403. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3404. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3405. emith_clr_t_cond(sr);
  3406. emith_cmp_r_imm(tmp, 0);
  3407. emith_set_t_cond(sr, DCOND_GE);
  3408. goto end_op;
  3409. }
  3410. goto default_;
  3411. case 0x02:
  3412. case 0x03:
  3413. switch (op & 0x3f)
  3414. {
  3415. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  3416. tmp = SHR_MACH;
  3417. break;
  3418. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  3419. tmp = SHR_MACL;
  3420. break;
  3421. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  3422. tmp = SHR_PR;
  3423. break;
  3424. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  3425. tmp = SHR_SR;
  3426. break;
  3427. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  3428. tmp = SHR_GBR;
  3429. break;
  3430. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  3431. tmp = SHR_VBR;
  3432. break;
  3433. default:
  3434. goto default_;
  3435. }
  3436. if (tmp == SHR_SR) {
  3437. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  3438. emith_sync_t(tmp4);
  3439. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  3440. } else
  3441. tmp3 = rcache_get_reg_arg(1, tmp, NULL);
  3442. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  3443. goto end_op;
  3444. case 0x04:
  3445. case 0x05:
  3446. switch (op & 0x3f)
  3447. {
  3448. case 0x04: // ROTL Rn 0100nnnn00000100
  3449. case 0x05: // ROTR Rn 0100nnnn00000101
  3450. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3451. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3452. emith_sync_t(sr);
  3453. emith_tpop_carry(sr, 0); // dummy
  3454. if (op & 1) {
  3455. emith_rorf(tmp, tmp2, 1);
  3456. } else
  3457. emith_rolf(tmp, tmp2, 1);
  3458. emith_tpush_carry(sr, 0);
  3459. goto end_op;
  3460. case 0x24: // ROTCL Rn 0100nnnn00100100
  3461. case 0x25: // ROTCR Rn 0100nnnn00100101
  3462. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  3463. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3464. emith_sync_t(sr);
  3465. emith_tpop_carry(sr, 0);
  3466. if (op & 1) {
  3467. emith_rorcf(tmp);
  3468. } else
  3469. emith_rolcf(tmp);
  3470. emith_tpush_carry(sr, 0);
  3471. goto end_op;
  3472. case 0x15: // CMP/PL Rn 0100nnnn00010101
  3473. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3474. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3475. emith_clr_t_cond(sr);
  3476. emith_cmp_r_imm(tmp, 0);
  3477. emith_set_t_cond(sr, DCOND_GT);
  3478. goto end_op;
  3479. }
  3480. goto default_;
  3481. case 0x06:
  3482. case 0x07:
  3483. switch (op & 0x3f)
  3484. {
  3485. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  3486. tmp = SHR_MACH;
  3487. break;
  3488. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  3489. tmp = SHR_MACL;
  3490. break;
  3491. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  3492. tmp = SHR_PR;
  3493. break;
  3494. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  3495. tmp = SHR_SR;
  3496. break;
  3497. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  3498. tmp = SHR_GBR;
  3499. break;
  3500. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  3501. tmp = SHR_VBR;
  3502. break;
  3503. default:
  3504. goto default_;
  3505. }
  3506. if (tmp == SHR_SR) {
  3507. emith_invalidate_t();
  3508. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  3509. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3510. emith_write_sr(sr, tmp2);
  3511. rcache_free_tmp(tmp2);
  3512. drcf.test_irq = 1;
  3513. } else
  3514. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  3515. goto end_op;
  3516. case 0x08:
  3517. case 0x09:
  3518. switch (GET_Fx())
  3519. {
  3520. case 0: // SHLL2 Rn 0100nnnn00001000
  3521. // SHLR2 Rn 0100nnnn00001001
  3522. tmp = 2;
  3523. break;
  3524. case 1: // SHLL8 Rn 0100nnnn00011000
  3525. // SHLR8 Rn 0100nnnn00011001
  3526. tmp = 8;
  3527. break;
  3528. case 2: // SHLL16 Rn 0100nnnn00101000
  3529. // SHLR16 Rn 0100nnnn00101001
  3530. tmp = 16;
  3531. break;
  3532. default:
  3533. goto default_;
  3534. }
  3535. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3536. if (op & 1) {
  3537. emith_lsr(tmp2, tmp3, tmp);
  3538. } else
  3539. emith_lsl(tmp2, tmp3, tmp);
  3540. goto end_op;
  3541. case 0x0a:
  3542. switch (GET_Fx())
  3543. {
  3544. case 0: // LDS Rm,MACH 0100mmmm00001010
  3545. tmp2 = SHR_MACH;
  3546. break;
  3547. case 1: // LDS Rm,MACL 0100mmmm00011010
  3548. tmp2 = SHR_MACL;
  3549. break;
  3550. case 2: // LDS Rm,PR 0100mmmm00101010
  3551. tmp2 = SHR_PR;
  3552. break;
  3553. default:
  3554. goto default_;
  3555. }
  3556. emit_move_r_r(tmp2, GET_Rn());
  3557. goto end_op;
  3558. case 0x0b:
  3559. switch (GET_Fx())
  3560. {
  3561. case 1: // TAS.B @Rn 0100nnnn00011011
  3562. // XXX: is TAS working on 32X?
  3563. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3564. tmp = emit_memhandler_read(0);
  3565. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3566. emith_clr_t_cond(sr);
  3567. emith_cmp_r_imm(tmp, 0);
  3568. emith_set_t_cond(sr, DCOND_EQ);
  3569. emith_or_r_imm(tmp, 0x80);
  3570. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  3571. emith_move_r_r(tmp2, tmp);
  3572. rcache_free_tmp(tmp);
  3573. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3574. emit_memhandler_write(0);
  3575. break;
  3576. default:
  3577. goto default_;
  3578. }
  3579. goto end_op;
  3580. case 0x0e:
  3581. switch (GET_Fx())
  3582. {
  3583. case 0: // LDC Rm,SR 0100mmmm00001110
  3584. tmp2 = SHR_SR;
  3585. break;
  3586. case 1: // LDC Rm,GBR 0100mmmm00011110
  3587. tmp2 = SHR_GBR;
  3588. break;
  3589. case 2: // LDC Rm,VBR 0100mmmm00101110
  3590. tmp2 = SHR_VBR;
  3591. break;
  3592. default:
  3593. goto default_;
  3594. }
  3595. if (tmp2 == SHR_SR) {
  3596. emith_invalidate_t();
  3597. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3598. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3599. emith_write_sr(sr, tmp);
  3600. drcf.test_irq = 1;
  3601. } else
  3602. emit_move_r_r(tmp2, GET_Rn());
  3603. goto end_op;
  3604. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  3605. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  3606. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3607. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3608. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3609. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  3610. rcache_free_tmp(tmp2);
  3611. rcache_free_tmp(tmp);
  3612. goto end_op;
  3613. }
  3614. goto default_;
  3615. /////////////////////////////////////////////
  3616. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  3617. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2 | drcf.polling);
  3618. goto end_op;
  3619. /////////////////////////////////////////////
  3620. case 0x06:
  3621. switch (op & 0x0f)
  3622. {
  3623. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  3624. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  3625. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  3626. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  3627. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  3628. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  3629. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : drcf.polling;
  3630. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  3631. goto end_op;
  3632. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  3633. emit_move_r_r(GET_Rn(), GET_Rm());
  3634. goto end_op;
  3635. case 0x07 ... 0x0f:
  3636. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3637. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3638. switch (op & 0x0f)
  3639. {
  3640. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  3641. emith_mvn_r_r(tmp2, tmp);
  3642. break;
  3643. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  3644. tmp3 = tmp2;
  3645. if (tmp == tmp2)
  3646. tmp3 = rcache_get_tmp();
  3647. tmp4 = rcache_get_tmp();
  3648. emith_lsr(tmp3, tmp, 16);
  3649. emith_or_r_r_lsl(tmp3, tmp, 24);
  3650. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  3651. emith_or_r_r_lsl(tmp3, tmp4, 8);
  3652. emith_rol(tmp2, tmp3, 16);
  3653. rcache_free_tmp(tmp4);
  3654. if (tmp == tmp2)
  3655. rcache_free_tmp(tmp3);
  3656. break;
  3657. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  3658. emith_rol(tmp2, tmp, 16);
  3659. break;
  3660. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  3661. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3662. emith_sync_t(sr);
  3663. emith_tpop_carry(sr, 1);
  3664. emith_negcf_r_r(tmp2, tmp);
  3665. emith_tpush_carry(sr, 1);
  3666. break;
  3667. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  3668. emith_neg_r_r(tmp2, tmp);
  3669. break;
  3670. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  3671. emith_clear_msb(tmp2, tmp, 24);
  3672. break;
  3673. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  3674. emith_clear_msb(tmp2, tmp, 16);
  3675. break;
  3676. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  3677. emith_sext(tmp2, tmp, 8);
  3678. break;
  3679. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  3680. emith_sext(tmp2, tmp, 16);
  3681. break;
  3682. }
  3683. goto end_op;
  3684. }
  3685. goto default_;
  3686. /////////////////////////////////////////////
  3687. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  3688. if (op & 0x80) // adding negative
  3689. emit_sub_r_imm(GET_Rn(), (u8)-op);
  3690. else
  3691. emit_add_r_imm(GET_Rn(), (u8)op);
  3692. goto end_op;
  3693. /////////////////////////////////////////////
  3694. case 0x08:
  3695. switch (op & 0x0f00)
  3696. {
  3697. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  3698. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  3699. tmp = (op & 0x100) >> 8;
  3700. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  3701. goto end_op;
  3702. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  3703. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  3704. tmp = (op & 0x100) >> 8;
  3705. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp | drcf.polling);
  3706. goto end_op;
  3707. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  3708. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  3709. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3710. emith_clr_t_cond(sr);
  3711. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  3712. emith_set_t_cond(sr, DCOND_EQ);
  3713. goto end_op;
  3714. }
  3715. goto default_;
  3716. /////////////////////////////////////////////
  3717. case 0x0c:
  3718. switch (op & 0x0f00)
  3719. {
  3720. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  3721. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  3722. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  3723. tmp = (op & 0x300) >> 8;
  3724. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  3725. goto end_op;
  3726. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  3727. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  3728. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  3729. tmp = (op & 0x300) >> 8;
  3730. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp | drcf.polling);
  3731. goto end_op;
  3732. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  3733. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  3734. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3735. emith_clr_t_cond(sr);
  3736. emith_tst_r_imm(tmp, op & 0xff);
  3737. emith_set_t_cond(sr, DCOND_EQ);
  3738. goto end_op;
  3739. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  3740. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  3741. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  3742. goto end_op;
  3743. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  3744. if (op & 0xff) {
  3745. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  3746. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  3747. }
  3748. goto end_op;
  3749. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  3750. if (op & 0xff) {
  3751. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  3752. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  3753. }
  3754. goto end_op;
  3755. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  3756. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0 | drcf.polling);
  3757. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3758. emith_clr_t_cond(sr);
  3759. emith_tst_r_imm(tmp, op & 0xff);
  3760. emith_set_t_cond(sr, DCOND_EQ);
  3761. rcache_free_tmp(tmp);
  3762. goto end_op;
  3763. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  3764. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3765. tmp2 = rcache_get_tmp_arg(1);
  3766. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  3767. goto end_rmw_op;
  3768. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  3769. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3770. tmp2 = rcache_get_tmp_arg(1);
  3771. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  3772. goto end_rmw_op;
  3773. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  3774. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3775. tmp2 = rcache_get_tmp_arg(1);
  3776. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  3777. end_rmw_op:
  3778. rcache_free_tmp(tmp);
  3779. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3780. goto end_op;
  3781. }
  3782. goto default_;
  3783. /////////////////////////////////////////////
  3784. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  3785. emit_move_r_imm32(GET_Rn(), (s8)op);
  3786. goto end_op;
  3787. default:
  3788. default_:
  3789. if (!(op_flags[i] & OF_B_IN_DS)) {
  3790. elprintf_sh2(sh2, EL_ANOMALY,
  3791. "drc: illegal op %04x @ %08x", op, pc - 2);
  3792. exit(1);
  3793. }
  3794. }
  3795. end_op:
  3796. rcache_unlock_all();
  3797. #if DRC_DEBUG & 64
  3798. RCACHE_CHECK("after insn");
  3799. #endif
  3800. cycles += opd->cycles;
  3801. if (op_flags[i+1] & OF_DELAY_OP) {
  3802. do_host_disasm(tcache_id);
  3803. continue;
  3804. }
  3805. // test irq?
  3806. if (drcf.test_irq && !drcf.pending_branch_direct) {
  3807. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3808. FLUSH_CYCLES(sr);
  3809. emith_sync_t(sr);
  3810. if (!drcf.pending_branch_indirect)
  3811. emit_move_r_imm32(SHR_PC, pc);
  3812. rcache_flush();
  3813. emith_call(sh2_drc_test_irq);
  3814. drcf.test_irq = 0;
  3815. }
  3816. // branch handling
  3817. if (drcf.pending_branch_direct)
  3818. {
  3819. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  3820. u32 target_pc = opd_b->imm;
  3821. int cond = -1;
  3822. int ctaken = 0;
  3823. void *target = NULL;
  3824. int patchable = 0;
  3825. if (OP_ISBRACND(opd_b->op))
  3826. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  3827. cycles += ctaken; // assume branch taken
  3828. #if LOOP_DETECTION
  3829. if ((drcf.loop_type == OF_IDLE_LOOP ||
  3830. (drcf.loop_type == OF_DELAY_LOOP && drcf.delay_reg >= 0)))
  3831. {
  3832. // idle or delay loop
  3833. emit_sync_t_to_sr();
  3834. emith_sh2_delay_loop(cycles, drcf.delay_reg);
  3835. rcache_unlock_all(); // may lock delay_reg
  3836. drcf.polling = drcf.loop_type = 0;
  3837. }
  3838. #endif
  3839. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3840. FLUSH_CYCLES(sr);
  3841. rcache_clean();
  3842. // emit condition test for conditional branch
  3843. if (OP_ISBRACND(opd_b->op)) {
  3844. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  3845. if (delay_dep_fw & BITMASK1(SHR_T)) {
  3846. emith_sync_t(sr);
  3847. emith_tst_r_imm(sr, T_save);
  3848. } else {
  3849. cond = emith_tst_t(sr, (opd_b->op == OP_BRANCH_CT));
  3850. if (emith_get_t_cond() >= 0) {
  3851. if (opd_b->op == OP_BRANCH_CT)
  3852. emith_or_r_imm_c(cond, sr, T);
  3853. else
  3854. emith_bic_r_imm_c(cond, sr, T);
  3855. }
  3856. }
  3857. } else
  3858. emith_sync_t(sr);
  3859. // no modification of host status/flags between here and branching!
  3860. #if LINK_BRANCHES
  3861. v = find_in_sorted_array(branch_target_pc, branch_target_count, target_pc);
  3862. if (v >= 0)
  3863. {
  3864. // local branch
  3865. if (branch_target_ptr[v]) {
  3866. // jumps back can be linked here since host PC is already known
  3867. target = branch_target_ptr[v];
  3868. } else if (branch_patch_count < MAX_LOCAL_BRANCHES) {
  3869. target = tcache_ptr;
  3870. branch_patch_pc[branch_patch_count] = target_pc;
  3871. branch_patch_ptr[branch_patch_count] = target;
  3872. branch_patch_count++;
  3873. patchable = 1;
  3874. } else
  3875. dbg(1, "warning: too many local branches");
  3876. }
  3877. #endif
  3878. if (target == NULL)
  3879. {
  3880. // can't resolve branch locally, make a block exit
  3881. rcache_clean();
  3882. tmp = rcache_get_tmp_arg(0);
  3883. emith_move_r_imm(tmp, target_pc);
  3884. rcache_free_tmp(tmp);
  3885. #if CALL_STACK
  3886. if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  3887. // BSR
  3888. tmp = rcache_get_tmp_arg(1);
  3889. emith_call_link(tmp, sh2_drc_dispatcher_call);
  3890. rcache_free_tmp(tmp);
  3891. } else
  3892. #endif
  3893. target = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  3894. patchable = 1;
  3895. }
  3896. // create branch
  3897. if (patchable) {
  3898. if (cond != -1)
  3899. emith_jump_cond_patchable(cond, target);
  3900. else if (target != NULL) {
  3901. rcache_invalidate();
  3902. emith_jump_patchable(target);
  3903. }
  3904. } else {
  3905. if (cond != -1)
  3906. emith_jump_cond(cond, target);
  3907. else if (target != NULL) {
  3908. rcache_invalidate();
  3909. emith_jump(target);
  3910. }
  3911. }
  3912. // branch not taken, correct cycle count
  3913. if (ctaken)
  3914. emith_add_r_imm(sr, ctaken << 12);
  3915. // set T bit to reflect branch not taken for OP_BRANCH_CT/CF
  3916. if (emith_get_t_cond() >= 0) // T is synced for all other cases
  3917. emith_set_t(sr, opd_b->op == OP_BRANCH_CF);
  3918. drcf.pending_branch_direct = 0;
  3919. if (target_pc >= base_pc && target_pc < pc)
  3920. drcf.polling = drcf.loop_type = 0;
  3921. }
  3922. else if (drcf.pending_branch_indirect) {
  3923. void *target;
  3924. u32 target_pc;
  3925. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3926. FLUSH_CYCLES(sr);
  3927. emith_sync_t(sr);
  3928. rcache_clean();
  3929. tmp = rcache_get_reg_arg(0, SHR_PC, NULL);
  3930. rcache_invalidate();
  3931. #if CALL_STACK
  3932. struct op_data *opd_b = (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  3933. if (opd_b->rm == SHR_PR) {
  3934. // RTS
  3935. emith_jump(sh2_drc_dispatcher_return);
  3936. } else if ((opd_b->dest & BITMASK1(SHR_PR)) && pc+2 < end_pc) {
  3937. // JSR/BSRF
  3938. tmp = rcache_get_tmp_arg(1);
  3939. emith_call_link(tmp, sh2_drc_dispatcher_call);
  3940. rcache_free(tmp);
  3941. } else
  3942. #endif
  3943. if (gconst_get(SHR_PC, &target_pc)) {
  3944. // JMP const, treat like unconditional direct branch
  3945. target = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  3946. emith_jump_patchable(target);
  3947. } else {
  3948. // JMP
  3949. emith_jump(sh2_drc_dispatcher);
  3950. }
  3951. drcf.pending_branch_indirect = 0;
  3952. drcf.polling = drcf.loop_type = 0;
  3953. }
  3954. do_host_disasm(tcache_id);
  3955. }
  3956. // check the last op
  3957. if (op_flags[i-1] & OF_DELAY_OP)
  3958. opd = &ops[i-2];
  3959. else
  3960. opd = &ops[i-1];
  3961. if (! OP_ISBRAUC(opd->op))
  3962. {
  3963. void *target;
  3964. tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3965. FLUSH_CYCLES(tmp);
  3966. emith_sync_t(tmp);
  3967. rcache_clean();
  3968. tmp = rcache_get_tmp_arg(0);
  3969. emith_move_r_imm(tmp, pc);
  3970. target = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  3971. if (target == NULL)
  3972. return NULL;
  3973. rcache_invalidate();
  3974. emith_jump_patchable(target);
  3975. } else
  3976. rcache_flush();
  3977. emith_flush();
  3978. // link local branches
  3979. for (i = 0; i < branch_patch_count; i++) {
  3980. void *target;
  3981. int t;
  3982. t = find_in_sorted_array(branch_target_pc, branch_target_count, branch_patch_pc[i]);
  3983. target = branch_target_ptr[t];
  3984. if (target == NULL) {
  3985. // flush pc and go back to dispatcher (this should no longer happen)
  3986. dbg(1, "stray branch to %08x %p", branch_patch_pc[i], tcache_ptr);
  3987. target = tcache_ptr;
  3988. tmp = rcache_get_tmp_arg(0);
  3989. emith_move_r_imm(tmp, branch_patch_pc[i]);
  3990. rcache_flush();
  3991. emith_jump(sh2_drc_dispatcher);
  3992. }
  3993. emith_jump_patch(branch_patch_ptr[i], target);
  3994. }
  3995. emith_pool_commit(0);
  3996. dr_mark_memory(1, block, tcache_id, 0);
  3997. tcache_ptrs[tcache_id] = tcache_ptr;
  3998. host_instructions_updated(block_entry_ptr, tcache_ptr);
  3999. do_host_disasm(tcache_id);
  4000. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  4001. tcache_id, blkid_main, tcache_ptr,
  4002. tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id],
  4003. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  4004. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  4005. dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
  4006. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  4007. }
  4008. /*
  4009. printf("~~~\n");
  4010. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  4011. do_host_disasm(tcache_id);
  4012. printf("~~~\n");
  4013. */
  4014. #if (DRC_DEBUG)
  4015. fflush(stdout);
  4016. #endif
  4017. return block_entry_ptr;
  4018. }
  4019. static void sh2_generate_utils(void)
  4020. {
  4021. int arg0, arg1, arg2, arg3, sr, tmp, tmp2;
  4022. host_arg2reg(arg0, 0);
  4023. host_arg2reg(arg1, 1);
  4024. host_arg2reg(arg2, 2);
  4025. host_arg2reg(arg3, 3);
  4026. emith_move_r_r(arg0, arg0); // nop
  4027. emith_move_r_r(arg1, arg1); // nop
  4028. emith_move_r_r(arg2, arg2); // nop
  4029. emith_move_r_r(arg3, arg3); // nop
  4030. emith_flush();
  4031. // sh2_drc_write8(u32 a, u32 d)
  4032. sh2_drc_write8 = (void *)tcache_ptr;
  4033. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  4034. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4035. emith_flush();
  4036. // sh2_drc_write16(u32 a, u32 d)
  4037. sh2_drc_write16 = (void *)tcache_ptr;
  4038. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  4039. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4040. emith_flush();
  4041. // sh2_drc_write32(u32 a, u32 d)
  4042. sh2_drc_write32 = (void *)tcache_ptr;
  4043. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  4044. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  4045. emith_flush();
  4046. // d = sh2_drc_read8(u32 a)
  4047. sh2_drc_read8 = (void *)tcache_ptr;
  4048. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4049. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4050. EMITH_SJMP_START(DCOND_CS);
  4051. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4052. emith_eor_r_imm_ptr_c(DCOND_CC, arg0, 1);
  4053. emith_read8s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4054. emith_ret_c(DCOND_CC);
  4055. EMITH_SJMP_END(DCOND_CS);
  4056. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4057. emith_jump_reg(arg2);
  4058. emith_flush();
  4059. // d = sh2_drc_read16(u32 a)
  4060. sh2_drc_read16 = (void *)tcache_ptr;
  4061. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4062. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4063. EMITH_SJMP_START(DCOND_CS);
  4064. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4065. emith_read16s_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4066. emith_ret_c(DCOND_CC);
  4067. EMITH_SJMP_END(DCOND_CS);
  4068. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4069. emith_jump_reg(arg2);
  4070. emith_flush();
  4071. // d = sh2_drc_read32(u32 a)
  4072. sh2_drc_read32 = (void *)tcache_ptr;
  4073. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4074. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4075. EMITH_SJMP_START(DCOND_CS);
  4076. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  4077. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg2, arg0);
  4078. emith_ror_c(DCOND_CC, RET_REG, RET_REG, 16);
  4079. emith_ret_c(DCOND_CC);
  4080. EMITH_SJMP_END(DCOND_CS);
  4081. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4082. emith_jump_reg(arg2);
  4083. emith_flush();
  4084. // d = sh2_drc_read8_poll(u32 a)
  4085. sh2_drc_read8_poll = (void *)tcache_ptr;
  4086. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  4087. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4088. EMITH_SJMP_START(DCOND_CC);
  4089. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4090. emith_jump_reg_c(DCOND_CS, arg2);
  4091. EMITH_SJMP_END(DCOND_CC);
  4092. emith_and_r_r_r(arg1, arg0, arg3);
  4093. emith_eor_r_imm_ptr(arg1, 1);
  4094. emith_read8s_r_r_r(arg1, arg2, arg1);
  4095. emith_push_ret(arg1);
  4096. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4097. emith_call(p32x_sh2_poll_memory8);
  4098. emith_pop_and_ret(arg1);
  4099. emith_flush();
  4100. // d = sh2_drc_read16_poll(u32 a)
  4101. sh2_drc_read16_poll = (void *)tcache_ptr;
  4102. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  4103. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4104. EMITH_SJMP_START(DCOND_CC);
  4105. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4106. emith_jump_reg_c(DCOND_CS, arg2);
  4107. EMITH_SJMP_END(DCOND_CC);
  4108. emith_and_r_r_r(arg1, arg0, arg3);
  4109. emith_read16s_r_r_r(arg1, arg2, arg1);
  4110. emith_push_ret(arg1);
  4111. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4112. emith_call(p32x_sh2_poll_memory16);
  4113. emith_pop_and_ret(arg1);
  4114. emith_flush();
  4115. // d = sh2_drc_read32_poll(u32 a)
  4116. sh2_drc_read32_poll = (void *)tcache_ptr;
  4117. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  4118. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  4119. EMITH_SJMP_START(DCOND_CC);
  4120. emith_move_r_r_ptr_c(DCOND_CS, arg1, CONTEXT_REG);
  4121. emith_jump_reg_c(DCOND_CS, arg2);
  4122. EMITH_SJMP_END(DCOND_CC);
  4123. emith_and_r_r_r(arg1, arg0, arg3);
  4124. emith_read_r_r_r(arg1, arg2, arg1);
  4125. emith_ror(arg1, arg1, 16);
  4126. emith_push_ret(arg1);
  4127. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4128. emith_call(p32x_sh2_poll_memory32);
  4129. emith_pop_and_ret(arg1);
  4130. emith_flush();
  4131. // sh2_drc_exit(u32 pc)
  4132. sh2_drc_exit = (void *)tcache_ptr;
  4133. emith_ctx_write(arg0, SHR_PC * 4);
  4134. emit_do_static_regs(1, arg2);
  4135. emith_sh2_drc_exit();
  4136. emith_flush();
  4137. #if CALL_STACK
  4138. // sh2_drc_dispatcher_call(u32 pc, uptr host_pr)
  4139. sh2_drc_dispatcher_call = (void *)tcache_ptr;
  4140. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4141. emith_add_r_imm(arg2, 2*sizeof(void *));
  4142. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4143. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4144. emith_add_r_r_ptr_imm(arg3, CONTEXT_REG, offsetof(SH2, rts_cache) + sizeof(void *));
  4145. emith_write_r_r_r_ptr_wb(arg1, arg2, arg3);
  4146. emith_ctx_read(arg3, SHR_PR * 4);
  4147. emith_write_r_r_offs(arg3, arg2, (s8)-sizeof(void *));
  4148. emith_flush();
  4149. // FALLTHROUGH
  4150. #endif
  4151. // sh2_drc_dispatcher(u32 pc)
  4152. sh2_drc_dispatcher = (void *)tcache_ptr;
  4153. emith_ctx_write(arg0, SHR_PC * 4);
  4154. #if BRANCH_CACHE
  4155. // check if PC is in branch target cache
  4156. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4157. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4158. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  4159. emith_cmp_r_r(arg2, arg0);
  4160. EMITH_SJMP_START(DCOND_NE);
  4161. #if (DRC_DEBUG & 128)
  4162. emith_move_r_ptr_imm(arg2, (uptr)&bchit);
  4163. emith_read_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4164. emith_add_r_imm_c(DCOND_EQ, arg3, 1);
  4165. emith_write_r_r_offs_c(DCOND_EQ, arg3, arg2, 0);
  4166. #endif
  4167. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4168. emith_jump_reg_c(DCOND_EQ, RET_REG);
  4169. EMITH_SJMP_END(DCOND_NE);
  4170. #endif
  4171. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  4172. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  4173. emith_call(dr_lookup_block);
  4174. // store PC and block entry ptr (in arg0) in branch target cache
  4175. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4176. EMITH_SJMP_START(DCOND_EQ);
  4177. #if BRANCH_CACHE
  4178. #if (DRC_DEBUG & 128)
  4179. emith_move_r_ptr_imm(arg2, (uptr)&bcmiss);
  4180. emith_read_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4181. emith_add_r_imm_c(DCOND_NE, arg3, 1);
  4182. emith_write_r_r_offs_c(DCOND_NE, arg3, arg2, 0);
  4183. #endif
  4184. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  4185. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*8);
  4186. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 1 : 0);
  4187. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  4188. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  4189. #endif
  4190. emith_jump_reg_c(DCOND_NE, RET_REG);
  4191. EMITH_SJMP_END(DCOND_EQ);
  4192. // lookup failed, call sh2_translate()
  4193. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4194. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  4195. emith_call(sh2_translate);
  4196. /* just after lookup function, jump to address returned */
  4197. emith_tst_r_r_ptr(RET_REG, RET_REG);
  4198. EMITH_SJMP_START(DCOND_EQ);
  4199. emith_jump_reg_c(DCOND_NE, RET_REG);
  4200. EMITH_SJMP_END(DCOND_EQ);
  4201. // XXX: can't translate, fail
  4202. emith_call(dr_failure);
  4203. emith_flush();
  4204. #if CALL_STACK
  4205. // sh2_drc_dispatcher_return(u32 pc)
  4206. sh2_drc_dispatcher_return = (void *)tcache_ptr;
  4207. emith_ctx_read(arg2, offsetof(SH2, rts_cache_idx));
  4208. emith_add_r_r_ptr_imm(arg1, CONTEXT_REG, offsetof(SH2, rts_cache));
  4209. emith_read_r_r_r_wb(arg3, arg1, arg2);
  4210. emith_cmp_r_r(arg0, arg3);
  4211. #if (DRC_DEBUG & 128)
  4212. EMITH_SJMP_START(DCOND_EQ);
  4213. emith_move_r_ptr_imm(arg2, (uptr)&rcmiss);
  4214. emith_read_r_r_offs_c(DCOND_NE, arg1, arg2, 0);
  4215. emith_add_r_imm_c(DCOND_NE, arg1, 1);
  4216. emith_write_r_r_offs_c(DCOND_NE, arg1, arg2, 0);
  4217. EMITH_SJMP_END(DCOND_EQ);
  4218. #endif
  4219. emith_jump_cond(DCOND_NE, sh2_drc_dispatcher);
  4220. emith_read_r_r_offs_ptr(arg0, arg1, sizeof(void *));
  4221. emith_sub_r_imm(arg2, 2*sizeof(void *));
  4222. emith_and_r_imm(arg2, (ARRAY_SIZE(sh2s->rts_cache)-1) * 2*sizeof(void *));
  4223. emith_ctx_write(arg2, offsetof(SH2, rts_cache_idx));
  4224. #if (DRC_DEBUG & 128)
  4225. emith_move_r_ptr_imm(arg2, (uptr)&rchit);
  4226. emith_read_r_r_offs(arg1, arg2, 0);
  4227. emith_add_r_imm(arg1, 1);
  4228. emith_write_r_r_offs(arg1, arg2, 0);
  4229. #endif
  4230. emith_jump_reg(arg0);
  4231. emith_flush();
  4232. #endif
  4233. // sh2_drc_test_irq(void)
  4234. // assumes it's called from main function (may jump to dispatcher)
  4235. sh2_drc_test_irq = (void *)tcache_ptr;
  4236. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4237. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  4238. emith_lsr(arg0, sr, I_SHIFT);
  4239. emith_and_r_imm(arg0, 0x0f);
  4240. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  4241. EMITH_SJMP_START(DCOND_GT);
  4242. emith_ret_c(DCOND_LE); // nope, return
  4243. EMITH_SJMP_END(DCOND_GT);
  4244. // adjust SP
  4245. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  4246. emith_sub_r_imm(tmp, 4*2);
  4247. rcache_clean();
  4248. // push SR
  4249. tmp = rcache_get_reg_arg(0, SHR_SP,&tmp2);
  4250. emith_add_r_r_imm(tmp, tmp2, 4);
  4251. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  4252. emith_clear_msb(tmp, tmp, 22);
  4253. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4254. rcache_invalidate_tmp();
  4255. emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  4256. // push PC
  4257. rcache_get_reg_arg(0, SHR_SP, NULL);
  4258. emith_ctx_read(arg1, SHR_PC * 4);
  4259. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  4260. rcache_invalidate_tmp();
  4261. emith_call(p32x_sh2_write32);
  4262. // update I, cycles, do callback
  4263. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  4264. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  4265. emith_bic_r_imm(sr, I);
  4266. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  4267. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  4268. rcache_flush();
  4269. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  4270. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  4271. // obtain new PC
  4272. emith_ctx_read(arg1, SHR_VBR * 4);
  4273. emith_add_r_r_r_lsl(arg0, arg1, RET_REG, 2);
  4274. emith_call(sh2_drc_read32);
  4275. if (arg0 != RET_REG)
  4276. emith_move_r_r(arg0, RET_REG);
  4277. emith_call_cleanup();
  4278. rcache_invalidate();
  4279. emith_jump(sh2_drc_dispatcher);
  4280. emith_flush();
  4281. // sh2_drc_entry(SH2 *sh2)
  4282. sh2_drc_entry = (void *)tcache_ptr;
  4283. emith_sh2_drc_entry();
  4284. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  4285. emit_do_static_regs(0, arg2);
  4286. emith_call(sh2_drc_test_irq);
  4287. emith_ctx_read(arg0, SHR_PC * 4);
  4288. emith_jump(sh2_drc_dispatcher);
  4289. emith_flush();
  4290. #ifdef PDB_NET
  4291. // debug
  4292. #define MAKE_READ_WRAPPER(func) { \
  4293. void *tmp = (void *)tcache_ptr; \
  4294. emith_push_ret(); \
  4295. emith_call(func); \
  4296. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4297. emith_addf_r_r(arg2, arg0); \
  4298. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4299. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4300. emith_adc_r_imm(arg2, 0x01000000); \
  4301. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4302. emith_pop_and_ret(); \
  4303. emith_flush(); \
  4304. func = tmp; \
  4305. }
  4306. #define MAKE_WRITE_WRAPPER(func) { \
  4307. void *tmp = (void *)tcache_ptr; \
  4308. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4309. emith_addf_r_r(arg2, arg1); \
  4310. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  4311. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4312. emith_adc_r_imm(arg2, 0x01000000); \
  4313. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  4314. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  4315. emith_jump(func); \
  4316. emith_flush(); \
  4317. func = tmp; \
  4318. }
  4319. MAKE_READ_WRAPPER(sh2_drc_read8);
  4320. MAKE_READ_WRAPPER(sh2_drc_read16);
  4321. MAKE_READ_WRAPPER(sh2_drc_read32);
  4322. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  4323. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  4324. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  4325. MAKE_READ_WRAPPER(sh2_drc_read8_poll);
  4326. MAKE_READ_WRAPPER(sh2_drc_read16_poll);
  4327. MAKE_READ_WRAPPER(sh2_drc_read32_poll);
  4328. #endif
  4329. emith_pool_commit(0);
  4330. rcache_invalidate();
  4331. #if (DRC_DEBUG & 4)
  4332. host_dasm_new_symbol(sh2_drc_entry);
  4333. host_dasm_new_symbol(sh2_drc_dispatcher);
  4334. #if CALL_STACK
  4335. host_dasm_new_symbol(sh2_drc_dispatcher_call);
  4336. host_dasm_new_symbol(sh2_drc_dispatcher_return);
  4337. #endif
  4338. host_dasm_new_symbol(sh2_drc_exit);
  4339. host_dasm_new_symbol(sh2_drc_test_irq);
  4340. host_dasm_new_symbol(sh2_drc_write8);
  4341. host_dasm_new_symbol(sh2_drc_write16);
  4342. host_dasm_new_symbol(sh2_drc_write32);
  4343. host_dasm_new_symbol(sh2_drc_read8);
  4344. host_dasm_new_symbol(sh2_drc_read16);
  4345. host_dasm_new_symbol(sh2_drc_read32);
  4346. host_dasm_new_symbol(sh2_drc_read8_poll);
  4347. host_dasm_new_symbol(sh2_drc_read16_poll);
  4348. host_dasm_new_symbol(sh2_drc_read32_poll);
  4349. #endif
  4350. }
  4351. static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit, int free)
  4352. {
  4353. struct block_link *bl;
  4354. u32 i;
  4355. free = free || nolit; // block is invalid if literals are overwritten
  4356. dbg(2," %sing block %08x-%08x,%08x-%08x, blkid %d,%d", free?"delet":"disabl",
  4357. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  4358. tcache_id, bd - block_tables[tcache_id]);
  4359. if (bd->addr == 0 || bd->entry_count == 0) {
  4360. dbg(1, " killing dead block!? %08x", bd->addr);
  4361. return;
  4362. }
  4363. #if LINK_BRANCHES
  4364. // remove from hash table, make incoming links unresolved
  4365. if (bd->active) {
  4366. for (i = 0; i < bd->entry_count; i++) {
  4367. rm_from_hashlist(&bd->entryp[i], tcache_id);
  4368. while ((bl = bd->entryp[i].links) != NULL) {
  4369. dr_block_unlink(bl, 1);
  4370. add_to_hashlist_unresolved(bl, tcache_id);
  4371. }
  4372. }
  4373. dr_mark_memory(-1, bd, tcache_id, nolit);
  4374. add_to_block_list(&inactive_blocks[tcache_id], bd);
  4375. }
  4376. bd->active = 0;
  4377. #endif
  4378. if (free) {
  4379. #if LINK_BRANCHES
  4380. // revoke outgoing links
  4381. for (bl = bd->entryp[0].o_links; bl != NULL; bl = bl->o_next) {
  4382. if (bl->target)
  4383. dr_block_unlink(bl, 0);
  4384. else
  4385. rm_from_hashlist_unresolved(bl, tcache_id);
  4386. bl->jump = NULL;
  4387. bl->next = blink_free[bl->tcache_id];
  4388. blink_free[bl->tcache_id] = bl;
  4389. }
  4390. bd->entryp[0].o_links = NULL;
  4391. #endif
  4392. // invalidate block
  4393. rm_from_block_lists(bd);
  4394. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  4395. bd->entry_count = 0;
  4396. }
  4397. }
  4398. static void sh2_smc_rm_blocks(u32 a, int tcache_id, u32 shift)
  4399. {
  4400. struct block_list **blist, *entry, *next;
  4401. u32 mask = RAM_SIZE(tcache_id) - 1;
  4402. u32 wtmask = ~0x20000000; // writethrough area mask
  4403. u32 start_addr, end_addr;
  4404. u32 start_lit, end_lit;
  4405. struct block_desc *block;
  4406. #if (DRC_DEBUG & 2)
  4407. int removed = 0;
  4408. #endif
  4409. // ignore cache-through
  4410. a &= wtmask;
  4411. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  4412. entry = *blist;
  4413. // go through the block list for this range
  4414. while (entry != NULL) {
  4415. next = entry->next;
  4416. block = entry->block;
  4417. start_addr = block->addr & wtmask;
  4418. end_addr = start_addr + block->size;
  4419. start_lit = block->addr_lit & wtmask;
  4420. end_lit = start_lit + block->size_lit;
  4421. // disable/delete block if it covers the modified address
  4422. if ((start_addr <= a && a < end_addr) ||
  4423. (start_lit <= a && a < end_lit))
  4424. {
  4425. dbg(2, "smc remove @%08x", a);
  4426. end_addr = (start_lit <= a && block->size_lit ? a : 0);
  4427. sh2_smc_rm_block_entry(block, tcache_id, end_addr, 0);
  4428. #if (DRC_DEBUG & 2)
  4429. removed = 1;
  4430. #endif
  4431. }
  4432. entry = next;
  4433. }
  4434. #if (DRC_DEBUG & 2)
  4435. if (!removed)
  4436. dbg(2, "rm_blocks called @%08x, no work?", a);
  4437. #endif
  4438. #if BRANCH_CACHE
  4439. if (tcache_id)
  4440. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  4441. else {
  4442. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  4443. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  4444. }
  4445. #endif
  4446. #if CALL_STACK
  4447. if (tcache_id) {
  4448. memset32(sh2s[tcache_id-1].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  4449. sh2s[tcache_id-1].rts_cache_idx = 0;
  4450. } else {
  4451. memset32(sh2s[0].rts_cache, -1, sizeof(sh2s[0].rts_cache)/4);
  4452. memset32(sh2s[1].rts_cache, -1, sizeof(sh2s[1].rts_cache)/4);
  4453. sh2s[0].rts_cache_idx = sh2s[1].rts_cache_idx = 0;
  4454. }
  4455. #endif
  4456. }
  4457. void sh2_drc_wcheck_ram(unsigned int a, int val, SH2 *sh2)
  4458. {
  4459. dbg(2, "%csh2 smc check @%08x v=%d", sh2->is_slave ? 's' : 'm', a, val);
  4460. sh2_smc_rm_blocks(a, 0, SH2_DRCBLK_RAM_SHIFT);
  4461. }
  4462. void sh2_drc_wcheck_da(unsigned int a, int val, SH2 *sh2)
  4463. {
  4464. int cpuid = sh2->is_slave;
  4465. dbg(2, "%csh2 smc check @%08x v=%d", cpuid ? 's' : 'm', a, val);
  4466. sh2_smc_rm_blocks(a, 1 + cpuid, SH2_DRCBLK_DA_SHIFT);
  4467. }
  4468. int sh2_execute_drc(SH2 *sh2c, int cycles)
  4469. {
  4470. int ret_cycles;
  4471. // cycles are kept in SHR_SR unused bits (upper 20)
  4472. // bit11 contains T saved for delay slot
  4473. // others are usual SH2 flags
  4474. sh2c->sr &= 0x3f3;
  4475. sh2c->sr |= cycles << 12;
  4476. sh2_drc_entry(sh2c);
  4477. // TODO: irq cycles
  4478. ret_cycles = (signed int)sh2c->sr >> 12;
  4479. if (ret_cycles > 0)
  4480. dbg(1, "warning: drc returned with cycles: %d", ret_cycles);
  4481. sh2c->sr &= 0x3f3;
  4482. return ret_cycles;
  4483. }
  4484. static void block_stats(void)
  4485. {
  4486. #if (DRC_DEBUG & 2)
  4487. int c, b, i;
  4488. long total = 0;
  4489. printf("block stats:\n");
  4490. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4491. for (i = 0; i < block_counts[b]; i++)
  4492. if (block_tables[b][i].addr != 0)
  4493. total += block_tables[b][i].refcount;
  4494. for (i = block_limit[b]; i < BLOCK_MAX_COUNT(b); i++)
  4495. if (block_tables[b][i].addr != 0)
  4496. total += block_tables[b][i].refcount;
  4497. }
  4498. printf("total: %ld\n",total);
  4499. for (c = 0; c < 20; c++) {
  4500. struct block_desc *blk, *maxb = NULL;
  4501. int max = 0;
  4502. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4503. for (i = 0; i < block_counts[b]; i++) {
  4504. blk = &block_tables[b][i];
  4505. if (blk->addr != 0 && blk->refcount > max) {
  4506. max = blk->refcount;
  4507. maxb = blk;
  4508. }
  4509. }
  4510. for (i = block_limit[b]; i < BLOCK_MAX_COUNT(b); i++) {
  4511. blk = &block_tables[b][i];
  4512. if (blk->addr != 0 && blk->refcount > max) {
  4513. max = blk->refcount;
  4514. maxb = blk;
  4515. }
  4516. }
  4517. }
  4518. if (maxb == NULL)
  4519. break;
  4520. printf("%08x %p %9d %2.3f%%\n", maxb->addr, maxb->tcache_ptr, maxb->refcount,
  4521. (double)maxb->refcount / total * 100.0);
  4522. maxb->refcount = 0;
  4523. }
  4524. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4525. for (i = 0; i < block_counts[b]; i++)
  4526. block_tables[b][i].refcount = 0;
  4527. for (i = block_limit[b]; i < BLOCK_MAX_COUNT(b); i++)
  4528. block_tables[b][i].refcount = 0;
  4529. }
  4530. #endif
  4531. }
  4532. void entry_stats(void)
  4533. {
  4534. #if (DRC_DEBUG & 32)
  4535. int c, b, i, j;
  4536. long total = 0;
  4537. printf("block entry stats:\n");
  4538. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4539. for (i = 0; i < block_counts[b]; i++)
  4540. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4541. total += block_tables[b][i].entryp[j].entry_count;
  4542. for (i = block_limit[b]; i < BLOCK_MAX_COUNT(b); i++)
  4543. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4544. total += block_tables[b][i].entryp[j].entry_count;
  4545. }
  4546. printf("total: %ld\n",total);
  4547. for (c = 0; c < 20; c++) {
  4548. struct block_desc *blk;
  4549. struct block_entry *maxb = NULL;
  4550. int max = 0;
  4551. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4552. for (i = 0; i < block_counts[b]; i++) {
  4553. blk = &block_tables[b][i];
  4554. for (j = 0; j < blk->entry_count; j++)
  4555. if (blk->entryp[j].entry_count > max) {
  4556. max = blk->entryp[j].entry_count;
  4557. maxb = &blk->entryp[j];
  4558. }
  4559. }
  4560. for (i = block_limit[b]; i < BLOCK_MAX_COUNT(b); i++) {
  4561. blk = &block_tables[b][i];
  4562. for (j = 0; j < blk->entry_count; j++)
  4563. if (blk->entryp[j].entry_count > max) {
  4564. max = blk->entryp[j].entry_count;
  4565. maxb = &blk->entryp[j];
  4566. }
  4567. }
  4568. }
  4569. if (maxb == NULL)
  4570. break;
  4571. printf("%08x %p %9d %2.3f%%\n", maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  4572. (double)100 * maxb->entry_count / total);
  4573. maxb->entry_count = 0;
  4574. }
  4575. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  4576. for (i = 0; i < block_counts[b]; i++)
  4577. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4578. block_tables[b][i].entryp[j].entry_count = 0;
  4579. for (i = block_limit[b]; i < BLOCK_MAX_COUNT(b); i++)
  4580. for (j = 0; j < block_tables[b][i].entry_count; j++)
  4581. block_tables[b][i].entryp[j].entry_count = 0;
  4582. }
  4583. #endif
  4584. }
  4585. static void backtrace(void)
  4586. {
  4587. #if (DRC_DEBUG & 1024)
  4588. int i;
  4589. printf("backtrace master:\n");
  4590. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  4591. SH2_DUMP(&csh2[0][i], "bt msh2");
  4592. printf("backtrace slave:\n");
  4593. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  4594. SH2_DUMP(&csh2[1][i], "bt ssh2");
  4595. #endif
  4596. }
  4597. static void state_dump(void)
  4598. {
  4599. #if (DRC_DEBUG & 2048)
  4600. int i;
  4601. SH2_DUMP(&sh2s[0], "master");
  4602. printf("VBR msh2: %x\n", sh2s[0].vbr);
  4603. for (i = 0; i < 0x60; i++) {
  4604. printf("%08x ",p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  4605. if ((i+1) % 8 == 0) printf("\n");
  4606. }
  4607. printf("stack msh2: %x\n", sh2s[0].r[15]);
  4608. for (i = -0x30; i < 0x30; i++) {
  4609. printf("%08x ",p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  4610. if ((i+1) % 8 == 0) printf("\n");
  4611. }
  4612. SH2_DUMP(&sh2s[1], "slave");
  4613. printf("VBR ssh2: %x\n", sh2s[1].vbr);
  4614. for (i = 0; i < 0x60; i++) {
  4615. printf("%08x ",p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  4616. if ((i+1) % 8 == 0) printf("\n");
  4617. }
  4618. printf("stack ssh2: %x\n", sh2s[1].r[15]);
  4619. for (i = -0x30; i < 0x30; i++) {
  4620. printf("%08x ",p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  4621. if ((i+1) % 8 == 0) printf("\n");
  4622. }
  4623. #endif
  4624. }
  4625. static void bcache_stats(void)
  4626. {
  4627. #if (DRC_DEBUG & 128)
  4628. int i;
  4629. #if CALL_STACK
  4630. for (i = 1; i < ARRAY_SIZE(sh2s->rts_cache); i++)
  4631. if (sh2s[0].rts_cache[i].pc == -1 && sh2s[1].rts_cache[i].pc == -1) break;
  4632. printf("return cache hits:%d misses:%d depth: %d index: %d/%d\n", rchit, rcmiss, i,sh2s[0].rts_cache_idx,sh2s[1].rts_cache_idx);
  4633. for (i = 0; i < ARRAY_SIZE(sh2s[0].rts_cache); i++) {
  4634. printf("%08x ",sh2s[0].rts_cache[i].pc);
  4635. if ((i+1) % 8 == 0) printf("\n");
  4636. }
  4637. for (i = 0; i < ARRAY_SIZE(sh2s[1].rts_cache); i++) {
  4638. printf("%08x ",sh2s[1].rts_cache[i].pc);
  4639. if ((i+1) % 8 == 0) printf("\n");
  4640. }
  4641. #endif
  4642. #if BRANCH_CACHE
  4643. printf("branch cache hits:%d misses:%d\n", bchit, bcmiss);
  4644. printf("branch cache master:\n");
  4645. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  4646. printf("%08x ",sh2s[0].branch_cache[i].pc);
  4647. if ((i+1) % 8 == 0) printf("\n");
  4648. }
  4649. printf("branch cache slave:\n");
  4650. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  4651. printf("%08x ",sh2s[1].branch_cache[i].pc);
  4652. if ((i+1) % 8 == 0) printf("\n");
  4653. }
  4654. #endif
  4655. #endif
  4656. }
  4657. void sh2_drc_flush_all(void)
  4658. {
  4659. backtrace();
  4660. state_dump();
  4661. block_stats();
  4662. entry_stats();
  4663. bcache_stats();
  4664. flush_tcache(0);
  4665. flush_tcache(1);
  4666. flush_tcache(2);
  4667. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  4668. }
  4669. void sh2_drc_mem_setup(SH2 *sh2)
  4670. {
  4671. // fill the DRC-only convenience pointers
  4672. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  4673. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  4674. }
  4675. void sh2_drc_frame(void)
  4676. {
  4677. }
  4678. int sh2_drc_init(SH2 *sh2)
  4679. {
  4680. int i;
  4681. if (block_tables[0] == NULL)
  4682. {
  4683. for (i = 0; i < TCACHE_BUFFERS; i++) {
  4684. block_tables[i] = calloc(BLOCK_MAX_COUNT(i), sizeof(*block_tables[0]));
  4685. if (block_tables[i] == NULL)
  4686. goto fail;
  4687. // max 2 block links (exits) per block
  4688. block_link_pool[i] = calloc(BLOCK_LINK_MAX_COUNT(i),
  4689. sizeof(*block_link_pool[0]));
  4690. if (block_link_pool[i] == NULL)
  4691. goto fail;
  4692. inval_lookup[i] = calloc(RAM_SIZE(i) / INVAL_PAGE_SIZE,
  4693. sizeof(inval_lookup[0]));
  4694. if (inval_lookup[i] == NULL)
  4695. goto fail;
  4696. hash_tables[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*hash_tables[0]));
  4697. if (hash_tables[i] == NULL)
  4698. goto fail;
  4699. unresolved_links[i] = calloc(HASH_TABLE_SIZE(i), sizeof(*unresolved_links[0]));
  4700. if (unresolved_links[i] == NULL)
  4701. goto fail;
  4702. }
  4703. memset(block_counts, 0, sizeof(block_counts));
  4704. for (i = 0; i < ARRAY_SIZE(block_counts); i++) {
  4705. block_limit[i] = BLOCK_MAX_COUNT(i) - 1;
  4706. }
  4707. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  4708. for (i = 0; i < ARRAY_SIZE(blink_free); i++) {
  4709. blink_free[i] = NULL;
  4710. }
  4711. drc_cmn_init();
  4712. rcache_init();
  4713. tcache_ptr = tcache;
  4714. sh2_generate_utils();
  4715. host_instructions_updated(tcache, tcache_ptr);
  4716. tcache_bases[0] = tcache_ptrs[0] = tcache_ptr;
  4717. tcache_limit[0] = tcache_bases[0] + tcache_sizes[0] - (tcache_ptr-tcache);
  4718. for (i = 1; i < ARRAY_SIZE(tcache_bases); i++) {
  4719. tcache_bases[i] = tcache_ptrs[i] = tcache_bases[i - 1] + tcache_sizes[i - 1];
  4720. tcache_limit[i] = tcache_bases[i] + tcache_sizes[i];
  4721. }
  4722. #if (DRC_DEBUG & 4)
  4723. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  4724. tcache_dsm_ptrs[i] = tcache_bases[i];
  4725. // disasm the utils
  4726. tcache_dsm_ptrs[0] = tcache;
  4727. do_host_disasm(0);
  4728. fflush(stdout);
  4729. #endif
  4730. #if (DRC_DEBUG & 1)
  4731. hash_collisions = 0;
  4732. #endif
  4733. }
  4734. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  4735. memset(sh2->rts_cache, -1, sizeof(sh2->rts_cache));
  4736. sh2->rts_cache_idx = 0;
  4737. return 0;
  4738. fail:
  4739. sh2_drc_finish(sh2);
  4740. return -1;
  4741. }
  4742. void sh2_drc_finish(SH2 *sh2)
  4743. {
  4744. struct block_list *bl, *bn;
  4745. int i;
  4746. if (block_tables[0] == NULL)
  4747. return;
  4748. sh2_drc_flush_all();
  4749. for (i = 0; i < TCACHE_BUFFERS; i++) {
  4750. #if (DRC_DEBUG & 4)
  4751. printf("~~~ tcache %d\n", i);
  4752. #if 0
  4753. tcache_dsm_ptrs[i] = tcache_bases[i];
  4754. tcache_ptr = tcache_ptrs[i];
  4755. do_host_disasm(i);
  4756. if (tcache_limit[i] < tcache_bases[i] + tcache_sizes[i]) {
  4757. tcache_dsm_ptrs[i] = tcache_limit[i];
  4758. tcache_ptr = tcache_bases[i] + tcache_sizes[i];
  4759. do_host_disasm(i);
  4760. }
  4761. #endif
  4762. printf("max links: %d\n", block_link_pool_counts[i]);
  4763. #endif
  4764. if (block_tables[i] != NULL)
  4765. free(block_tables[i]);
  4766. block_tables[i] = NULL;
  4767. if (block_link_pool[i] != NULL)
  4768. free(block_link_pool[i]);
  4769. block_link_pool[i] = NULL;
  4770. blink_free[i] = NULL;
  4771. if (inval_lookup[i] != NULL)
  4772. free(inval_lookup[i]);
  4773. inval_lookup[i] = NULL;
  4774. if (hash_tables[i] != NULL) {
  4775. free(hash_tables[i]);
  4776. hash_tables[i] = NULL;
  4777. }
  4778. }
  4779. for (bl = blist_free; bl; bl = bn) {
  4780. bn = bl->next;
  4781. free(bl);
  4782. }
  4783. blist_free = NULL;
  4784. drc_cmn_cleanup();
  4785. }
  4786. #endif /* DRC_SH2 */
  4787. static void *dr_get_pc_base(u32 pc, SH2 *sh2)
  4788. {
  4789. void *ret;
  4790. u32 mask = 0;
  4791. ret = p32x_sh2_get_mem_ptr(pc, &mask, sh2);
  4792. if (ret == (void *)-1)
  4793. return ret;
  4794. return (char *)ret - (pc & ~mask);
  4795. }
  4796. u16 scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  4797. u32 *base_literals_out, u32 *end_literals_out)
  4798. {
  4799. u16 *dr_pc_base;
  4800. u32 pc, op, tmp;
  4801. u32 end_pc, end_literals = 0;
  4802. u32 lowest_literal = 0;
  4803. u32 lowest_mova = 0;
  4804. struct op_data *opd;
  4805. int next_is_delay = 0;
  4806. int end_block = 0;
  4807. int i, i_end;
  4808. u32 crc = 0;
  4809. // 2nd pass stuff
  4810. int last_btarget; // loop detector
  4811. enum { T_UNKNOWN, T_CLEAR, T_SET } t; // T propagation state
  4812. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  4813. op_flags[0] |= OF_BTARGET; // block start is always a target
  4814. dr_pc_base = dr_get_pc_base(base_pc, &sh2s[!!is_slave]);
  4815. // 1st pass: disassemble
  4816. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  4817. // we need an ops[] entry after the last one initialized,
  4818. // so do it before end_block checks
  4819. opd = &ops[i];
  4820. opd->op = OP_UNHANDLED;
  4821. opd->rm = -1;
  4822. opd->source = opd->dest = 0;
  4823. opd->cycles = 1;
  4824. opd->imm = 0;
  4825. if (next_is_delay) {
  4826. op_flags[i] |= OF_DELAY_OP;
  4827. next_is_delay = 0;
  4828. }
  4829. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  4830. break;
  4831. else if ((lowest_mova && lowest_mova <= pc) ||
  4832. (lowest_literal && lowest_literal <= pc))
  4833. break; // text area collides with data area
  4834. op = FETCH_OP(pc);
  4835. switch ((op & 0xf000) >> 12)
  4836. {
  4837. /////////////////////////////////////////////
  4838. case 0x00:
  4839. switch (op & 0x0f)
  4840. {
  4841. case 0x02:
  4842. switch (GET_Fx())
  4843. {
  4844. case 0: // STC SR,Rn 0000nnnn00000010
  4845. tmp = SHR_SR;
  4846. break;
  4847. case 1: // STC GBR,Rn 0000nnnn00010010
  4848. tmp = SHR_GBR;
  4849. break;
  4850. case 2: // STC VBR,Rn 0000nnnn00100010
  4851. tmp = SHR_VBR;
  4852. break;
  4853. default:
  4854. goto undefined;
  4855. }
  4856. opd->op = OP_MOVE;
  4857. opd->source = BITMASK1(tmp);
  4858. opd->dest = BITMASK1(GET_Rn());
  4859. break;
  4860. case 0x03:
  4861. CHECK_UNHANDLED_BITS(0xd0, undefined);
  4862. // BRAF Rm 0000mmmm00100011
  4863. // BSRF Rm 0000mmmm00000011
  4864. opd->op = OP_BRANCH_RF;
  4865. opd->rm = GET_Rn();
  4866. opd->source = BITMASK2(SHR_PC, opd->rm);
  4867. opd->dest = BITMASK1(SHR_PC);
  4868. if (!(op & 0x20))
  4869. opd->dest |= BITMASK1(SHR_PR);
  4870. opd->cycles = 2;
  4871. next_is_delay = 1;
  4872. if (!(opd->dest & BITMASK1(SHR_PR)))
  4873. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4874. else
  4875. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  4876. break;
  4877. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  4878. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  4879. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  4880. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  4881. opd->dest = BITMASK1(SHR_MEM);
  4882. break;
  4883. case 0x07:
  4884. // MUL.L Rm,Rn 0000nnnnmmmm0111
  4885. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4886. opd->dest = BITMASK1(SHR_MACL);
  4887. opd->cycles = 2;
  4888. break;
  4889. case 0x08:
  4890. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4891. switch (GET_Fx())
  4892. {
  4893. case 0: // CLRT 0000000000001000
  4894. opd->op = OP_SETCLRT;
  4895. opd->dest = BITMASK1(SHR_T);
  4896. opd->imm = 0;
  4897. break;
  4898. case 1: // SETT 0000000000011000
  4899. opd->op = OP_SETCLRT;
  4900. opd->dest = BITMASK1(SHR_T);
  4901. opd->imm = 1;
  4902. break;
  4903. case 2: // CLRMAC 0000000000101000
  4904. opd->dest = BITMASK3(SHR_T, SHR_MACL, SHR_MACH);
  4905. break;
  4906. default:
  4907. goto undefined;
  4908. }
  4909. break;
  4910. case 0x09:
  4911. switch (GET_Fx())
  4912. {
  4913. case 0: // NOP 0000000000001001
  4914. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4915. break;
  4916. case 1: // DIV0U 0000000000011001
  4917. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4918. opd->dest = BITMASK2(SHR_SR, SHR_T);
  4919. break;
  4920. case 2: // MOVT Rn 0000nnnn00101001
  4921. opd->source = BITMASK1(SHR_T);
  4922. opd->dest = BITMASK1(GET_Rn());
  4923. break;
  4924. default:
  4925. goto undefined;
  4926. }
  4927. break;
  4928. case 0x0a:
  4929. switch (GET_Fx())
  4930. {
  4931. case 0: // STS MACH,Rn 0000nnnn00001010
  4932. tmp = SHR_MACH;
  4933. break;
  4934. case 1: // STS MACL,Rn 0000nnnn00011010
  4935. tmp = SHR_MACL;
  4936. break;
  4937. case 2: // STS PR,Rn 0000nnnn00101010
  4938. tmp = SHR_PR;
  4939. break;
  4940. default:
  4941. goto undefined;
  4942. }
  4943. opd->op = OP_MOVE;
  4944. opd->source = BITMASK1(tmp);
  4945. opd->dest = BITMASK1(GET_Rn());
  4946. break;
  4947. case 0x0b:
  4948. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4949. switch (GET_Fx())
  4950. {
  4951. case 0: // RTS 0000000000001011
  4952. opd->op = OP_BRANCH_R;
  4953. opd->rm = SHR_PR;
  4954. opd->source = BITMASK1(opd->rm);
  4955. opd->dest = BITMASK1(SHR_PC);
  4956. opd->cycles = 2;
  4957. next_is_delay = 1;
  4958. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4959. break;
  4960. case 1: // SLEEP 0000000000011011
  4961. opd->op = OP_SLEEP;
  4962. end_block = 1;
  4963. break;
  4964. case 2: // RTE 0000000000101011
  4965. opd->op = OP_RTE;
  4966. opd->source = BITMASK1(SHR_SP);
  4967. opd->dest = BITMASK3(SHR_SP, SHR_SR, SHR_PC);
  4968. opd->cycles = 4;
  4969. next_is_delay = 1;
  4970. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4971. break;
  4972. default:
  4973. goto undefined;
  4974. }
  4975. break;
  4976. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  4977. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  4978. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  4979. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  4980. opd->dest = BITMASK1(GET_Rn());
  4981. op_flags[i] |= OF_POLL_INSN;
  4982. break;
  4983. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  4984. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  4985. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  4986. opd->cycles = 3;
  4987. break;
  4988. default:
  4989. goto undefined;
  4990. }
  4991. break;
  4992. /////////////////////////////////////////////
  4993. case 0x01:
  4994. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  4995. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4996. opd->dest = BITMASK1(SHR_MEM);
  4997. opd->imm = (op & 0x0f) * 4;
  4998. break;
  4999. /////////////////////////////////////////////
  5000. case 0x02:
  5001. switch (op & 0x0f)
  5002. {
  5003. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  5004. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  5005. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  5006. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5007. opd->dest = BITMASK1(SHR_MEM);
  5008. break;
  5009. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  5010. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  5011. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  5012. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5013. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5014. break;
  5015. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  5016. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5017. opd->dest = BITMASK1(SHR_SR);
  5018. break;
  5019. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  5020. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5021. opd->dest = BITMASK1(SHR_T);
  5022. break;
  5023. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  5024. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  5025. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  5026. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5027. opd->dest = BITMASK1(GET_Rn());
  5028. break;
  5029. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  5030. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5031. opd->dest = BITMASK1(SHR_T);
  5032. break;
  5033. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  5034. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5035. opd->dest = BITMASK1(GET_Rn());
  5036. break;
  5037. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  5038. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  5039. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5040. opd->dest = BITMASK1(SHR_MACL);
  5041. break;
  5042. default:
  5043. goto undefined;
  5044. }
  5045. break;
  5046. /////////////////////////////////////////////
  5047. case 0x03:
  5048. switch (op & 0x0f)
  5049. {
  5050. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  5051. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  5052. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  5053. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  5054. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  5055. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5056. opd->dest = BITMASK1(SHR_T);
  5057. break;
  5058. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  5059. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_SR);
  5060. opd->dest = BITMASK2(GET_Rn(), SHR_SR);
  5061. break;
  5062. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  5063. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  5064. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5065. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  5066. opd->cycles = 2;
  5067. break;
  5068. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  5069. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  5070. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5071. opd->dest = BITMASK1(GET_Rn());
  5072. break;
  5073. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  5074. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  5075. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  5076. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5077. break;
  5078. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  5079. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  5080. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  5081. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5082. break;
  5083. default:
  5084. goto undefined;
  5085. }
  5086. break;
  5087. /////////////////////////////////////////////
  5088. case 0x04:
  5089. switch (op & 0x0f)
  5090. {
  5091. case 0x00:
  5092. switch (GET_Fx())
  5093. {
  5094. case 0: // SHLL Rn 0100nnnn00000000
  5095. case 2: // SHAL Rn 0100nnnn00100000
  5096. opd->source = BITMASK1(GET_Rn());
  5097. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5098. break;
  5099. case 1: // DT Rn 0100nnnn00010000
  5100. opd->source = BITMASK1(GET_Rn());
  5101. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5102. op_flags[i] |= OF_DELAY_INSN;
  5103. break;
  5104. default:
  5105. goto undefined;
  5106. }
  5107. break;
  5108. case 0x01:
  5109. switch (GET_Fx())
  5110. {
  5111. case 0: // SHLR Rn 0100nnnn00000001
  5112. case 2: // SHAR Rn 0100nnnn00100001
  5113. opd->source = BITMASK1(GET_Rn());
  5114. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5115. break;
  5116. case 1: // CMP/PZ Rn 0100nnnn00010001
  5117. opd->source = BITMASK1(GET_Rn());
  5118. opd->dest = BITMASK1(SHR_T);
  5119. break;
  5120. default:
  5121. goto undefined;
  5122. }
  5123. break;
  5124. case 0x02:
  5125. case 0x03:
  5126. switch (op & 0x3f)
  5127. {
  5128. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  5129. tmp = SHR_MACH;
  5130. break;
  5131. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  5132. tmp = SHR_MACL;
  5133. break;
  5134. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  5135. tmp = SHR_PR;
  5136. break;
  5137. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  5138. tmp = SHR_SR;
  5139. opd->cycles = 2;
  5140. break;
  5141. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  5142. tmp = SHR_GBR;
  5143. opd->cycles = 2;
  5144. break;
  5145. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  5146. tmp = SHR_VBR;
  5147. opd->cycles = 2;
  5148. break;
  5149. default:
  5150. goto undefined;
  5151. }
  5152. opd->source = BITMASK2(GET_Rn(), tmp);
  5153. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  5154. break;
  5155. case 0x04:
  5156. case 0x05:
  5157. switch (op & 0x3f)
  5158. {
  5159. case 0x04: // ROTL Rn 0100nnnn00000100
  5160. case 0x05: // ROTR Rn 0100nnnn00000101
  5161. opd->source = BITMASK1(GET_Rn());
  5162. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5163. break;
  5164. case 0x24: // ROTCL Rn 0100nnnn00100100
  5165. case 0x25: // ROTCR Rn 0100nnnn00100101
  5166. opd->source = BITMASK2(GET_Rn(), SHR_T);
  5167. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5168. break;
  5169. case 0x15: // CMP/PL Rn 0100nnnn00010101
  5170. opd->source = BITMASK1(GET_Rn());
  5171. opd->dest = BITMASK1(SHR_T);
  5172. break;
  5173. default:
  5174. goto undefined;
  5175. }
  5176. break;
  5177. case 0x06:
  5178. case 0x07:
  5179. switch (op & 0x3f)
  5180. {
  5181. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  5182. tmp = SHR_MACH;
  5183. break;
  5184. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  5185. tmp = SHR_MACL;
  5186. break;
  5187. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  5188. tmp = SHR_PR;
  5189. break;
  5190. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  5191. tmp = SHR_SR;
  5192. opd->op = OP_LDC;
  5193. opd->cycles = 3;
  5194. break;
  5195. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  5196. tmp = SHR_GBR;
  5197. opd->op = OP_LDC;
  5198. opd->cycles = 3;
  5199. break;
  5200. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  5201. tmp = SHR_VBR;
  5202. opd->op = OP_LDC;
  5203. opd->cycles = 3;
  5204. break;
  5205. default:
  5206. goto undefined;
  5207. }
  5208. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5209. opd->dest = BITMASK2(GET_Rn(), tmp);
  5210. break;
  5211. case 0x08:
  5212. case 0x09:
  5213. switch (GET_Fx())
  5214. {
  5215. case 0:
  5216. // SHLL2 Rn 0100nnnn00001000
  5217. // SHLR2 Rn 0100nnnn00001001
  5218. break;
  5219. case 1:
  5220. // SHLL8 Rn 0100nnnn00011000
  5221. // SHLR8 Rn 0100nnnn00011001
  5222. break;
  5223. case 2:
  5224. // SHLL16 Rn 0100nnnn00101000
  5225. // SHLR16 Rn 0100nnnn00101001
  5226. break;
  5227. default:
  5228. goto undefined;
  5229. }
  5230. opd->source = BITMASK1(GET_Rn());
  5231. opd->dest = BITMASK1(GET_Rn());
  5232. break;
  5233. case 0x0a:
  5234. switch (GET_Fx())
  5235. {
  5236. case 0: // LDS Rm,MACH 0100mmmm00001010
  5237. tmp = SHR_MACH;
  5238. break;
  5239. case 1: // LDS Rm,MACL 0100mmmm00011010
  5240. tmp = SHR_MACL;
  5241. break;
  5242. case 2: // LDS Rm,PR 0100mmmm00101010
  5243. tmp = SHR_PR;
  5244. break;
  5245. default:
  5246. goto undefined;
  5247. }
  5248. opd->op = OP_MOVE;
  5249. opd->source = BITMASK1(GET_Rn());
  5250. opd->dest = BITMASK1(tmp);
  5251. break;
  5252. case 0x0b:
  5253. switch (GET_Fx())
  5254. {
  5255. case 0: // JSR @Rm 0100mmmm00001011
  5256. opd->dest = BITMASK1(SHR_PR);
  5257. case 2: // JMP @Rm 0100mmmm00101011
  5258. opd->op = OP_BRANCH_R;
  5259. opd->rm = GET_Rn();
  5260. opd->source = BITMASK1(opd->rm);
  5261. opd->dest |= BITMASK1(SHR_PC);
  5262. opd->cycles = 2;
  5263. next_is_delay = 1;
  5264. if (!(opd->dest & BITMASK1(SHR_PR)))
  5265. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5266. else
  5267. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5268. break;
  5269. case 1: // TAS.B @Rn 0100nnnn00011011
  5270. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  5271. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  5272. opd->cycles = 4;
  5273. break;
  5274. default:
  5275. goto undefined;
  5276. }
  5277. break;
  5278. case 0x0e:
  5279. switch (GET_Fx())
  5280. {
  5281. case 0: // LDC Rm,SR 0100mmmm00001110
  5282. tmp = SHR_SR;
  5283. break;
  5284. case 1: // LDC Rm,GBR 0100mmmm00011110
  5285. tmp = SHR_GBR;
  5286. break;
  5287. case 2: // LDC Rm,VBR 0100mmmm00101110
  5288. tmp = SHR_VBR;
  5289. break;
  5290. default:
  5291. goto undefined;
  5292. }
  5293. opd->op = OP_LDC;
  5294. opd->source = BITMASK1(GET_Rn());
  5295. opd->dest = BITMASK1(tmp);
  5296. break;
  5297. case 0x0f:
  5298. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  5299. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  5300. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  5301. opd->cycles = 3;
  5302. break;
  5303. default:
  5304. goto undefined;
  5305. }
  5306. break;
  5307. /////////////////////////////////////////////
  5308. case 0x05:
  5309. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  5310. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5311. opd->dest = BITMASK1(GET_Rn());
  5312. opd->imm = (op & 0x0f) * 4;
  5313. op_flags[i] |= OF_POLL_INSN;
  5314. break;
  5315. /////////////////////////////////////////////
  5316. case 0x06:
  5317. switch (op & 0x0f)
  5318. {
  5319. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  5320. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  5321. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  5322. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  5323. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5324. break;
  5325. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  5326. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  5327. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  5328. opd->dest = BITMASK1(GET_Rn());
  5329. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5330. op_flags[i] |= OF_POLL_INSN;
  5331. break;
  5332. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  5333. opd->source = BITMASK2(GET_Rm(), SHR_T);
  5334. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  5335. break;
  5336. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  5337. opd->op = OP_MOVE;
  5338. goto arith_rmrn;
  5339. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  5340. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  5341. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  5342. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  5343. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  5344. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  5345. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  5346. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  5347. arith_rmrn:
  5348. opd->source = BITMASK1(GET_Rm());
  5349. opd->dest = BITMASK1(GET_Rn());
  5350. break;
  5351. }
  5352. break;
  5353. /////////////////////////////////////////////
  5354. case 0x07:
  5355. // ADD #imm,Rn 0111nnnniiiiiiii
  5356. opd->source = opd->dest = BITMASK1(GET_Rn());
  5357. opd->imm = (s8)op;
  5358. break;
  5359. /////////////////////////////////////////////
  5360. case 0x08:
  5361. switch (op & 0x0f00)
  5362. {
  5363. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  5364. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  5365. opd->dest = BITMASK1(SHR_MEM);
  5366. opd->imm = (op & 0x0f);
  5367. break;
  5368. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  5369. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  5370. opd->dest = BITMASK1(SHR_MEM);
  5371. opd->imm = (op & 0x0f) * 2;
  5372. break;
  5373. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  5374. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5375. opd->dest = BITMASK1(SHR_R0);
  5376. opd->imm = (op & 0x0f);
  5377. op_flags[i] |= OF_POLL_INSN;
  5378. break;
  5379. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  5380. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  5381. opd->dest = BITMASK1(SHR_R0);
  5382. opd->imm = (op & 0x0f) * 2;
  5383. op_flags[i] |= OF_POLL_INSN;
  5384. break;
  5385. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  5386. opd->source = BITMASK1(SHR_R0);
  5387. opd->dest = BITMASK1(SHR_T);
  5388. opd->imm = (s8)op;
  5389. break;
  5390. case 0x0d00: // BT/S label 10001101dddddddd
  5391. case 0x0f00: // BF/S label 10001111dddddddd
  5392. next_is_delay = 1;
  5393. // fallthrough
  5394. case 0x0900: // BT label 10001001dddddddd
  5395. case 0x0b00: // BF label 10001011dddddddd
  5396. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  5397. opd->source = BITMASK2(SHR_PC, SHR_T);
  5398. opd->dest = BITMASK1(SHR_PC);
  5399. opd->imm = ((signed int)(op << 24) >> 23);
  5400. opd->imm += pc + 4;
  5401. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  5402. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  5403. break;
  5404. default:
  5405. goto undefined;
  5406. }
  5407. break;
  5408. /////////////////////////////////////////////
  5409. case 0x09:
  5410. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  5411. opd->op = OP_LOAD_POOL;
  5412. tmp = pc + 2;
  5413. if (op_flags[i] & OF_DELAY_OP) {
  5414. if (ops[i-1].op == OP_BRANCH)
  5415. tmp = ops[i-1].imm;
  5416. else if (ops[i-1].op != OP_BRANCH_N)
  5417. tmp = 0;
  5418. }
  5419. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  5420. opd->dest = BITMASK1(GET_Rn());
  5421. if (tmp) {
  5422. opd->imm = tmp + 2 + (op & 0xff) * 2;
  5423. if (lowest_literal == 0 || opd->imm < lowest_literal)
  5424. lowest_literal = opd->imm;
  5425. }
  5426. opd->size = 1;
  5427. break;
  5428. /////////////////////////////////////////////
  5429. case 0x0b:
  5430. // BSR label 1011dddddddddddd
  5431. opd->dest = BITMASK1(SHR_PR);
  5432. case 0x0a:
  5433. // BRA label 1010dddddddddddd
  5434. opd->op = OP_BRANCH;
  5435. opd->source = BITMASK1(SHR_PC);
  5436. opd->dest |= BITMASK1(SHR_PC);
  5437. opd->imm = ((signed int)(op << 20) >> 19);
  5438. opd->imm += pc + 4;
  5439. opd->cycles = 2;
  5440. next_is_delay = 1;
  5441. if (!(opd->dest & BITMASK1(SHR_PR))) {
  5442. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  5443. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  5444. if (opd->imm <= pc)
  5445. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5446. } else
  5447. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  5448. } else
  5449. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  5450. break;
  5451. /////////////////////////////////////////////
  5452. case 0x0c:
  5453. switch (op & 0x0f00)
  5454. {
  5455. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  5456. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  5457. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  5458. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  5459. opd->dest = BITMASK1(SHR_MEM);
  5460. opd->size = (op & 0x300) >> 8;
  5461. opd->imm = (op & 0xff) << opd->size;
  5462. break;
  5463. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  5464. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  5465. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  5466. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  5467. opd->dest = BITMASK1(SHR_R0);
  5468. opd->size = (op & 0x300) >> 8;
  5469. opd->imm = (op & 0xff) << opd->size;
  5470. op_flags[i] |= OF_POLL_INSN;
  5471. break;
  5472. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  5473. opd->op = OP_TRAPA;
  5474. opd->source = BITMASK3(SHR_SP, SHR_PC, SHR_SR);
  5475. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  5476. opd->imm = (op & 0xff);
  5477. opd->cycles = 8;
  5478. op_flags[i+1] |= OF_BTARGET;
  5479. break;
  5480. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  5481. opd->op = OP_MOVA;
  5482. tmp = pc + 2;
  5483. if (op_flags[i] & OF_DELAY_OP) {
  5484. if (ops[i-1].op == OP_BRANCH)
  5485. tmp = ops[i-1].imm;
  5486. else if (ops[i-1].op != OP_BRANCH_N)
  5487. tmp = 0;
  5488. }
  5489. opd->dest = BITMASK1(SHR_R0);
  5490. if (tmp) {
  5491. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  5492. if (opd->imm >= base_pc) {
  5493. if (lowest_mova == 0 || opd->imm < lowest_mova)
  5494. lowest_mova = opd->imm;
  5495. }
  5496. }
  5497. break;
  5498. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  5499. opd->source = BITMASK1(SHR_R0);
  5500. opd->dest = BITMASK1(SHR_T);
  5501. opd->imm = op & 0xff;
  5502. break;
  5503. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  5504. opd->source = opd->dest = BITMASK1(SHR_R0);
  5505. opd->imm = op & 0xff;
  5506. break;
  5507. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  5508. opd->source = opd->dest = BITMASK1(SHR_R0);
  5509. opd->imm = op & 0xff;
  5510. break;
  5511. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  5512. opd->source = opd->dest = BITMASK1(SHR_R0);
  5513. opd->imm = op & 0xff;
  5514. break;
  5515. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  5516. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  5517. opd->dest = BITMASK1(SHR_T);
  5518. opd->imm = op & 0xff;
  5519. op_flags[i] |= OF_POLL_INSN;
  5520. opd->cycles = 3;
  5521. break;
  5522. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  5523. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  5524. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  5525. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  5526. opd->dest = BITMASK1(SHR_MEM);
  5527. opd->imm = op & 0xff;
  5528. opd->cycles = 3;
  5529. break;
  5530. default:
  5531. goto undefined;
  5532. }
  5533. break;
  5534. /////////////////////////////////////////////
  5535. case 0x0d:
  5536. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  5537. opd->op = OP_LOAD_POOL;
  5538. tmp = pc + 2;
  5539. if (op_flags[i] & OF_DELAY_OP) {
  5540. if (ops[i-1].op == OP_BRANCH)
  5541. tmp = ops[i-1].imm;
  5542. else if (ops[i-1].op != OP_BRANCH_N)
  5543. tmp = 0;
  5544. }
  5545. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  5546. opd->dest = BITMASK1(GET_Rn());
  5547. if (tmp) {
  5548. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  5549. if (lowest_literal == 0 || opd->imm < lowest_literal)
  5550. lowest_literal = opd->imm;
  5551. }
  5552. opd->size = 2;
  5553. break;
  5554. /////////////////////////////////////////////
  5555. case 0x0e:
  5556. // MOV #imm,Rn 1110nnnniiiiiiii
  5557. opd->op = OP_LOAD_CONST;
  5558. opd->dest = BITMASK1(GET_Rn());
  5559. opd->imm = (s8)op;
  5560. break;
  5561. default:
  5562. undefined:
  5563. opd->op = OP_UNDEFINED;
  5564. // an unhandled instruction is probably not code if it's not the 1st insn
  5565. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  5566. goto end;
  5567. break;
  5568. }
  5569. if (op_flags[i] & OF_DELAY_OP) {
  5570. switch (opd->op) {
  5571. case OP_BRANCH:
  5572. case OP_BRANCH_N:
  5573. case OP_BRANCH_CT:
  5574. case OP_BRANCH_CF:
  5575. case OP_BRANCH_R:
  5576. case OP_BRANCH_RF:
  5577. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  5578. is_slave ? 's' : 'm', pc);
  5579. opd->op = OP_UNDEFINED;
  5580. op_flags[i] |= OF_B_IN_DS;
  5581. next_is_delay = 0;
  5582. break;
  5583. }
  5584. }
  5585. }
  5586. end:
  5587. i_end = i;
  5588. end_pc = pc;
  5589. // 2nd pass: some analysis
  5590. lowest_literal = end_literals = lowest_mova = 0;
  5591. t = T_UNKNOWN;
  5592. last_btarget = 0;
  5593. op = 0; // delay/poll insns counter
  5594. for (i = 0, pc = base_pc; i < i_end; i++, pc += 2) {
  5595. opd = &ops[i];
  5596. crc += FETCH_OP(pc);
  5597. // propagate T (TODO: DIV0U)
  5598. if ((op_flags[i] & OF_BTARGET) || (opd->dest & BITMASK1(SHR_T)))
  5599. t = T_UNKNOWN;
  5600. if ((opd->op == OP_BRANCH_CT && t == T_SET) ||
  5601. (opd->op == OP_BRANCH_CF && t == T_CLEAR)) {
  5602. opd->op = OP_BRANCH;
  5603. opd->cycles = (op_flags[i + 1] & OF_DELAY_OP) ? 2 : 3;
  5604. } else if ((opd->op == OP_BRANCH_CT && t == T_CLEAR) ||
  5605. (opd->op == OP_BRANCH_CF && t == T_SET))
  5606. opd->op = OP_BRANCH_N;
  5607. else if ((opd->op == OP_SETCLRT && !opd->imm) || opd->op == OP_BRANCH_CT)
  5608. t = T_CLEAR;
  5609. else if ((opd->op == OP_SETCLRT && opd->imm) || opd->op == OP_BRANCH_CF)
  5610. t = T_SET;
  5611. // "overscan" detection: unreachable code after unconditional branch
  5612. // this can happen if the insn after a forward branch isn't a local target
  5613. if (OP_ISBRAUC(opd->op)) {
  5614. if (op_flags[i + 1] & OF_DELAY_OP) {
  5615. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  5616. i_end = i + 2;
  5617. } else {
  5618. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  5619. i_end = i + 1;
  5620. }
  5621. }
  5622. // literal pool size detection
  5623. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  5624. if (lowest_mova == 0 || opd->imm < lowest_mova)
  5625. lowest_mova = opd->imm;
  5626. if (opd->op == OP_LOAD_POOL) {
  5627. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  5628. if (end_literals < opd->imm + opd->size * 2)
  5629. end_literals = opd->imm + opd->size * 2;
  5630. if (lowest_literal == 0 || lowest_literal > opd->imm)
  5631. lowest_literal = opd->imm;
  5632. if (opd->size == 2) {
  5633. // tweak for NFL: treat a 32bit literal as an address and check if it
  5634. // points to the literal space. In that case handle it like MOVA.
  5635. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  5636. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  5637. if (lowest_mova == 0 || tmp < lowest_mova)
  5638. lowest_mova = tmp;
  5639. }
  5640. }
  5641. }
  5642. #if LOOP_DETECTION
  5643. // inner loop detection
  5644. // 1. a loop always starts with a branch target (for the backwards jump)
  5645. // 2. it doesn't contain more than one polling and/or delaying insn
  5646. // 3. it doesn't contain unconditional jumps
  5647. // 4. no overlapping of loops
  5648. if (op_flags[i] & OF_BTARGET) {
  5649. last_btarget = i; // possible loop starting point
  5650. op = 0;
  5651. }
  5652. // XXX let's hope nobody is putting a delay or poll insn in a delay slot :-/
  5653. if (OP_ISBRAIMM(opd->op)) {
  5654. // BSR, BRA, BT, BF with immediate target
  5655. int i_tmp = (opd->imm - base_pc) / 2; // branch target, index in ops
  5656. if (i_tmp == last_btarget && op <= 1) {
  5657. op_flags[i_tmp] |= OF_LOOP; // conditions met -> mark loop
  5658. last_btarget = i+1; // condition 4
  5659. } else if (opd->op == OP_BRANCH)
  5660. last_btarget = i+1; // condition 3
  5661. }
  5662. else if (OP_ISBRAIND(opd->op))
  5663. // BRAF, BSRF, JMP, JSR, register indirect. treat it as off-limits jump
  5664. last_btarget = i+1; // condition 3
  5665. else if (op_flags[i] & (OF_POLL_INSN|OF_DELAY_INSN))
  5666. op ++; // condition 2
  5667. #endif
  5668. }
  5669. end_pc = base_pc + i_end * 2;
  5670. // end_literals is used to decide to inline a literal or not
  5671. // XXX: need better detection if this actually is used in write
  5672. if (lowest_literal >= base_pc) {
  5673. if (lowest_literal < end_pc) {
  5674. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  5675. // TODO: does this always mean end_pc covers data?
  5676. }
  5677. }
  5678. if (lowest_mova >= base_pc) {
  5679. if (lowest_mova < end_literals) {
  5680. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  5681. end_literals = lowest_mova;
  5682. }
  5683. if (lowest_mova < end_pc) {
  5684. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  5685. end_literals = end_pc;
  5686. }
  5687. }
  5688. if (lowest_literal >= end_literals)
  5689. lowest_literal = end_literals;
  5690. if (lowest_literal && end_literals)
  5691. for (pc = lowest_literal; pc < end_literals; pc += 2)
  5692. crc += FETCH_OP(pc);
  5693. *end_pc_out = end_pc;
  5694. if (base_literals_out != NULL)
  5695. *base_literals_out = (lowest_literal ?: end_pc);
  5696. if (end_literals_out != NULL)
  5697. *end_literals_out = (end_literals ?: end_pc);
  5698. // crc overflow handling, twice to collect all overflows
  5699. crc = (crc & 0xffff) + (crc >> 16);
  5700. crc = (crc & 0xffff) + (crc >> 16);
  5701. return crc;
  5702. }
  5703. // vim:shiftwidth=2:ts=2:expandtab