compiler.c 163 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437
  1. /*
  2. * SH2 recompiler
  3. * (C) notaz, 2009,2010,2013
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. *
  8. * notes:
  9. * - tcache, block descriptor, link buffer overflows result in sh2_translate()
  10. * failure, followed by full tcache invalidation for that region
  11. * - jumps between blocks are tracked for SMC handling (in block_entry->links),
  12. * except jumps between different tcaches
  13. *
  14. * implemented:
  15. * - static register allocation
  16. * - remaining register caching and tracking in temporaries
  17. * - block-local branch linking
  18. * - block linking (except between tcaches)
  19. * - some constant propagation
  20. *
  21. * TODO:
  22. * - better constant propagation
  23. * - stack caching?
  24. * - bug fixing
  25. */
  26. #include <stddef.h>
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <assert.h>
  30. #include "../../pico/pico_int.h"
  31. #include "../../pico/arm_features.h"
  32. #include "sh2.h"
  33. #include "compiler.h"
  34. #include "../drc/cmn.h"
  35. #include "../debug.h"
  36. // features
  37. #define PROPAGATE_CONSTANTS 1
  38. #define LINK_BRANCHES 1
  39. #define BRANCH_CACHE 1
  40. #define ALIAS_REGISTERS 1
  41. #define REMAP_REGISTER 1
  42. // limits (per block)
  43. #define MAX_BLOCK_SIZE (BLOCK_INSN_LIMIT * 6 * 6)
  44. // max literal offset from the block end
  45. #define MAX_LITERAL_OFFSET 0x200 // max. MOVA, MOV @(PC) offset
  46. #define MAX_LITERALS (BLOCK_INSN_LIMIT / 4)
  47. #define MAX_LOCAL_BRANCHES (BLOCK_INSN_LIMIT / 4)
  48. // debug stuff
  49. // 01 - warnings/errors
  50. // 02 - block info/smc
  51. // 04 - asm
  52. // 08 - runtime block entry log
  53. // 10 - smc self-check
  54. // 20 - runtime block entry counter
  55. // 100 - write trace
  56. // 200 - compare trace
  57. // 400 - block entry backtrace on exit
  58. // 800 - state dump on exit
  59. // {
  60. #ifndef DRC_DEBUG
  61. #define DRC_DEBUG 0
  62. #endif
  63. #if DRC_DEBUG
  64. #define dbg(l,...) { \
  65. if ((l) & DRC_DEBUG) \
  66. elprintf(EL_STATUS, ##__VA_ARGS__); \
  67. }
  68. #include "mame/sh2dasm.h"
  69. #include <platform/libpicofe/linux/host_dasm.h>
  70. static int insns_compiled, hash_collisions, host_insn_count;
  71. #define COUNT_OP \
  72. host_insn_count++
  73. #else // !DRC_DEBUG
  74. #define COUNT_OP
  75. #define dbg(...)
  76. #endif
  77. ///
  78. #define FETCH_OP(pc) \
  79. dr_pc_base[(pc) / 2]
  80. #define FETCH32(a) \
  81. ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
  82. #define CHECK_UNHANDLED_BITS(mask, label) { \
  83. if ((op & (mask)) != 0) \
  84. goto label; \
  85. }
  86. #define GET_Fx() \
  87. ((op >> 4) & 0x0f)
  88. #define GET_Rm GET_Fx
  89. #define GET_Rn() \
  90. ((op >> 8) & 0x0f)
  91. #define BITMASK1(v0) (1 << (v0))
  92. #define BITMASK2(v0,v1) ((1 << (v0)) | (1 << (v1)))
  93. #define BITMASK3(v0,v1,v2) (BITMASK2(v0,v1) | (1 << (v2)))
  94. #define BITMASK4(v0,v1,v2,v3) (BITMASK3(v0,v1,v2) | (1 << (v3)))
  95. #define BITMASK5(v0,v1,v2,v3,v4) (BITMASK4(v0,v1,v2,v3) | (1 << (v4)))
  96. #define BITMASK6(v0,v1,v2,v3,v4,v5) (BITMASK5(v0,v1,v2,v3,v4) | (1 << (v5)))
  97. #define BITRANGE(v0,v1) (BITMASK1(v1+1)-BITMASK1(v0)) // set with v0..v1
  98. #define SHR_T SHR_SR // might make them separate someday
  99. #define SHR_MEM 31
  100. #define SHR_TMP -1
  101. static struct op_data {
  102. u8 op;
  103. u8 cycles;
  104. u8 size; // 0, 1, 2 - byte, word, long
  105. s8 rm; // branch or load/store data reg
  106. u32 source; // bitmask of src regs
  107. u32 dest; // bitmask of dest regs
  108. u32 imm; // immediate/io address/branch target
  109. // (for literal - address, not value)
  110. } ops[BLOCK_INSN_LIMIT];
  111. enum op_types {
  112. OP_UNHANDLED = 0,
  113. OP_BRANCH,
  114. OP_BRANCH_N, // conditional known not to be taken
  115. OP_BRANCH_CT, // conditional, branch if T set
  116. OP_BRANCH_CF, // conditional, branch if T clear
  117. OP_BRANCH_R, // indirect
  118. OP_BRANCH_RF, // indirect far (PC + Rm)
  119. OP_SETCLRT, // T flag set/clear
  120. OP_MOVE, // register move
  121. OP_LOAD_POOL, // literal pool load, imm is address
  122. OP_MOVA,
  123. OP_SLEEP,
  124. OP_RTE,
  125. OP_TRAPA,
  126. OP_UNDEFINED,
  127. };
  128. #define OP_ISBRANCH(op) (BITRANGE(OP_BRANCH, OP_BRANCH_RF) & BITMASK1(op))
  129. #define OP_ISBRAUC(op) (BITMASK4(OP_BRANCH, OP_BRANCH_R, OP_BRANCH_RF, OP_RTE) \
  130. & BITMASK1(op))
  131. #define OP_ISBRACND(op) (BITMASK2(OP_BRANCH_CT, OP_BRANCH_CF) & BITMASK1(op))
  132. #ifdef DRC_SH2
  133. #if (DRC_DEBUG & 4)
  134. static u8 *tcache_dsm_ptrs[3];
  135. static char sh2dasm_buff[64];
  136. #define do_host_disasm(tcid) \
  137. host_dasm(tcache_dsm_ptrs[tcid], tcache_ptr - tcache_dsm_ptrs[tcid]); \
  138. tcache_dsm_ptrs[tcid] = tcache_ptr
  139. #else
  140. #define do_host_disasm(x)
  141. #endif
  142. #define SH2_DUMP(sh2, reason) { \
  143. char ms = (sh2)->is_slave ? 's' : 'm'; \
  144. printf("%csh2 %s %08x\n", ms, reason, (sh2)->pc); \
  145. printf("%csh2 r0-7 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  146. (sh2)->r[0], (sh2)->r[1], (sh2)->r[2], (sh2)->r[3], \
  147. (sh2)->r[4], (sh2)->r[5], (sh2)->r[6], (sh2)->r[7]); \
  148. printf("%csh2 r8-15 %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  149. (sh2)->r[8], (sh2)->r[9], (sh2)->r[10], (sh2)->r[11], \
  150. (sh2)->r[12], (sh2)->r[13], (sh2)->r[14], (sh2)->r[15]); \
  151. printf("%csh2 pc-ml %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  152. (sh2)->pc, (sh2)->ppc, (sh2)->pr, (sh2)->sr&0x3ff, \
  153. (sh2)->gbr, (sh2)->vbr, (sh2)->mach, (sh2)->macl); \
  154. printf("%csh2 tmp-p %08x %08x %08x %08x %08x %08x %08x %08x\n", ms, \
  155. (sh2)->drc_tmp, (sh2)->irq_cycles, \
  156. (sh2)->pdb_io_csum[0], (sh2)->pdb_io_csum[1], (sh2)->state, \
  157. (sh2)->poll_addr, (sh2)->poll_cycles, (sh2)->poll_cnt); \
  158. }
  159. #if (DRC_DEBUG & (8|256|512|1024)) || defined(PDB)
  160. static SH2 csh2[2][8];
  161. static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
  162. {
  163. if (block != NULL) {
  164. dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
  165. sh2->pc, block, (signed int)sr >> 12);
  166. #if defined PDB
  167. pdb_step(sh2, sh2->pc);
  168. #elif (DRC_DEBUG & 256)
  169. {
  170. static FILE *trace[2];
  171. int idx = sh2->is_slave;
  172. if (!trace[0]) {
  173. truncate("pico.trace", 0);
  174. trace[0] = fopen("pico.trace0", "wb");
  175. trace[1] = fopen("pico.trace1", "wb");
  176. }
  177. if (csh2[idx][0].pc != sh2->pc) {
  178. fwrite(sh2, offsetof(SH2, read8_map), 1, trace[idx]);
  179. fwrite(&sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx]);
  180. memcpy(&csh2[idx][0], sh2, offsetof(SH2, poll_cnt)+4);
  181. csh2[idx][0].is_slave = idx;
  182. }
  183. }
  184. #elif (DRC_DEBUG & 512)
  185. {
  186. static FILE *trace[2];
  187. static SH2 fsh2;
  188. int idx = sh2->is_slave;
  189. if (!trace[0]) {
  190. trace[0] = fopen("pico.trace0", "rb");
  191. trace[1] = fopen("pico.trace1", "rb");
  192. }
  193. if (csh2[idx][0].pc != sh2->pc) {
  194. if (!fread(&fsh2, offsetof(SH2, read8_map), 1, trace[idx]) ||
  195. !fread(&fsh2.pdb_io_csum, sizeof(sh2->pdb_io_csum), 1, trace[idx])) {
  196. printf("trace eof at %08lx\n",ftell(trace[idx]));
  197. exit(1);
  198. }
  199. fsh2.sr = (fsh2.sr & 0xfff) | (sh2->sr & ~0xfff);
  200. fsh2.is_slave = idx;
  201. if (memcmp(&fsh2, sh2, offsetof(SH2, read8_map)) ||
  202. 0)//memcmp(&fsh2.pdb_io_csum, &sh2->pdb_io_csum, sizeof(sh2->pdb_io_csum)))
  203. {
  204. printf("difference at %08lx!\n",ftell(trace[idx]));
  205. SH2_DUMP(&fsh2, "file");
  206. SH2_DUMP(sh2, "current");
  207. SH2_DUMP(&csh2[idx][0], "previous");
  208. exit(1);
  209. }
  210. csh2[idx][0] = fsh2;
  211. }
  212. }
  213. #elif (DRC_DEBUG & 1024)
  214. {
  215. int x = sh2->is_slave, i;
  216. for (i = 0; i < ARRAY_SIZE(csh2[x])-1; i++)
  217. memcpy(&csh2[x][i], &csh2[x][i+1], offsetof(SH2, poll_cnt)+4);
  218. memcpy(&csh2[x][ARRAY_SIZE(csh2[x])-1], sh2, offsetof(SH2, poll_cnt)+4);
  219. csh2[x][0].is_slave = x;
  220. }
  221. #endif
  222. }
  223. return block;
  224. }
  225. #endif
  226. // } debug
  227. #define TCACHE_BUFFERS 3
  228. // we have 3 translation cache buffers, split from one drc/cmn buffer.
  229. // BIOS shares tcache with data array because it's only used for init
  230. // and can be discarded early
  231. // XXX: need to tune sizes
  232. static const int tcache_sizes[TCACHE_BUFFERS] = {
  233. DRC_TCACHE_SIZE * 14 / 16, // ROM (rarely used), DRAM
  234. DRC_TCACHE_SIZE / 16, // BIOS, data array in master sh2
  235. DRC_TCACHE_SIZE / 16, // ... slave
  236. };
  237. static u8 *tcache_bases[TCACHE_BUFFERS];
  238. static u8 *tcache_ptrs[TCACHE_BUFFERS];
  239. static u8 *tcache_limit[TCACHE_BUFFERS];
  240. // ptr for code emiters
  241. static u8 *tcache_ptr;
  242. #define MAX_BLOCK_ENTRIES (BLOCK_INSN_LIMIT / 8)
  243. struct block_link {
  244. u32 target_pc;
  245. void *jump; // insn address
  246. struct block_link *next; // either in block_entry->links or unresolved
  247. struct block_link *o_next; // ...in block_entry->o_links
  248. struct block_link *prev;
  249. struct block_link *o_prev;
  250. struct block_entry *target;// target block this is linked in (be->links)
  251. int tcache_id;
  252. };
  253. struct block_entry {
  254. u32 pc;
  255. u8 *tcache_ptr; // translated block for above PC
  256. struct block_entry *next; // chain in hash_table with same pc hash
  257. struct block_entry *prev;
  258. struct block_link *links; // incoming links to this entry
  259. struct block_link *o_links;// outgoing links from this entry
  260. #if (DRC_DEBUG & 2)
  261. struct block_desc *block;
  262. #endif
  263. #if (DRC_DEBUG & 32)
  264. int entry_count;
  265. #endif
  266. };
  267. struct block_desc {
  268. u32 addr; // block start SH2 PC address
  269. u32 addr_lit; // block start SH2 literal pool addr
  270. int size; // ..of recompiled insns
  271. int size_lit; // ..of (insns+)literal pool
  272. u8 *tcache_ptr; // start address of block in cache
  273. u16 active; // actively used or deactivated?
  274. struct block_list *list;
  275. #if (DRC_DEBUG & 2)
  276. int refcount;
  277. #endif
  278. int entry_count;
  279. struct block_entry entryp[MAX_BLOCK_ENTRIES];
  280. };
  281. static const int block_max_counts[TCACHE_BUFFERS] = {
  282. 4*1024,
  283. 256,
  284. 256,
  285. };
  286. static struct block_desc *block_tables[TCACHE_BUFFERS];
  287. static int block_counts[TCACHE_BUFFERS];
  288. static int block_limit[TCACHE_BUFFERS];
  289. // we have block_link_pool to avoid using mallocs
  290. static const int block_link_pool_max_counts[TCACHE_BUFFERS] = {
  291. 16*1024,
  292. 4*256,
  293. 4*256,
  294. };
  295. static struct block_link *block_link_pool[TCACHE_BUFFERS];
  296. static int block_link_pool_counts[TCACHE_BUFFERS];
  297. static struct block_link **unresolved_links[TCACHE_BUFFERS];
  298. static struct block_link *blink_free[TCACHE_BUFFERS];
  299. // used for invalidation
  300. static const int ram_sizes[TCACHE_BUFFERS] = {
  301. 0x40000,
  302. 0x1000,
  303. 0x1000,
  304. };
  305. #define INVAL_PAGE_SIZE 0x100
  306. struct block_list {
  307. struct block_desc *block;
  308. struct block_list *next;
  309. struct block_list *prev;
  310. struct block_list **head;
  311. struct block_list *l_next;
  312. };
  313. struct block_list *blist_free;
  314. // array of pointers to block_lists for RAM and 2 data arrays
  315. // each array has len: sizeof(mem) / INVAL_PAGE_SIZE
  316. static struct block_list **inval_lookup[TCACHE_BUFFERS];
  317. static const int hash_table_sizes[TCACHE_BUFFERS] = {
  318. 0x4000,
  319. 0x100,
  320. 0x100,
  321. };
  322. static struct block_entry **hash_tables[TCACHE_BUFFERS];
  323. #define HASH_FUNC(hash_tab, addr, mask) \
  324. (hash_tab)[(((addr) >> 20) ^ ((addr) >> 2)) & (mask)]
  325. // host register tracking
  326. enum {
  327. HR_FREE,
  328. HR_STATIC, // vreg has a static mapping
  329. HR_CACHED, // vreg has sh2_reg_e
  330. HR_TEMP, // reg used for temp storage
  331. } cach_reg_type;
  332. enum {
  333. HRF_DIRTY = 1 << 0, // has "dirty" value to be written to ctx
  334. HRF_LOCKED = 1 << 1, // can't be evicted
  335. HRF_TEMP = 1 << 2, // is for temps and args
  336. HRF_REG = 1 << 3, // is for sh2 regs
  337. } cache_reg_flags;
  338. typedef struct {
  339. u8 hreg; // "host" reg
  340. u8 flags:4; // TEMP or REG?
  341. u8 type:4;
  342. u16 stamp; // kind of a timestamp
  343. u32 gregs; // "guest" reg mask
  344. } cache_reg_t;
  345. // guest register tracking
  346. enum {
  347. GRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
  348. GRF_CONST = 1 << 1, // reg has a constant
  349. GRF_CDIRTY = 1 << 2, // constant not yet written to ctx
  350. GRF_STATIC = 1 << 3, // reg has static mapping to vreg
  351. } guest_reg_flags;
  352. typedef struct {
  353. u16 flags; // guest flags: is constant, is dirty?
  354. s8 sreg; // cache reg for static mapping
  355. s8 vreg; // cache_reg this is currently mapped to, -1 if not mapped
  356. u32 val; // value if this is constant
  357. } guest_reg_t;
  358. // note: cache_regs[] must have at least the amount of
  359. // HRF_REG registers used by handlers in worst case (currently 4)
  360. #ifdef __arm__
  361. #include "../drc/emit_arm.c"
  362. // register assigment goes by ABI convention. All caller save registers are TEMP
  363. // the others are either static or REG. SR must be static, R0 very recommended
  364. static guest_reg_t guest_regs[] = {
  365. // SHR_R0 .. SHR_SP
  366. #ifndef __MACH__ // no r9..
  367. { GRF_STATIC, 8 }, { GRF_STATIC, 9 }, { 0 } , { 0 } ,
  368. #else
  369. { GRF_STATIC, 8 }, { 0 } , { 0 } , { 0 } ,
  370. #endif
  371. { 0 } , { 0 } , { 0 } , { 0 } ,
  372. { 0 } , { 0 } , { 0 } , { 0 } ,
  373. { 0 } , { 0 } , { 0 } , { 0 } ,
  374. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  375. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  376. { 0 } , { 0 } , { 0 } , { GRF_STATIC, 10 },
  377. { 0 } , { 0 } , { 0 } , { 0 } ,
  378. };
  379. // NB first TEMP, then REG. alloc/evict algorithm depends on this
  380. static cache_reg_t cache_regs[] = {
  381. { 12, HRF_TEMP },
  382. { 14, HRF_TEMP },
  383. { 0, HRF_TEMP },
  384. { 1, HRF_TEMP },
  385. { 2, HRF_TEMP },
  386. { 3, HRF_TEMP },
  387. { 8, HRF_LOCKED },
  388. #ifndef __MACH__ // no r9..
  389. { 9, HRF_LOCKED },
  390. #endif
  391. { 10, HRF_LOCKED },
  392. { 4, HRF_REG },
  393. { 5, HRF_REG },
  394. { 6, HRF_REG },
  395. { 7, HRF_REG },
  396. };
  397. #elif defined(__i386__)
  398. #include "../drc/emit_x86.c"
  399. static guest_reg_t guest_regs[] = {
  400. // SHR_R0 .. SHR_SP
  401. {GRF_STATIC, xSI}, { 0 } , { 0 } , { 0 } ,
  402. { 0 } , { 0 } , { 0 } , { 0 } ,
  403. { 0 } , { 0 } , { 0 } , { 0 } ,
  404. { 0 } , { 0 } , { 0 } , { 0 } ,
  405. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  406. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  407. { 0 } , { 0 } , { 0 } , {GRF_STATIC, xDI},
  408. { 0 } , { 0 } , { 0 } , { 0 } ,
  409. };
  410. // ax, cx, dx are usually temporaries by convention
  411. static cache_reg_t cache_regs[] = {
  412. { xBX, HRF_REG|HRF_TEMP },
  413. { xCX, HRF_REG|HRF_TEMP },
  414. { xDX, HRF_REG|HRF_TEMP },
  415. { xAX, HRF_REG|HRF_TEMP },
  416. { xSI, HRF_LOCKED },
  417. { xDI, HRF_LOCKED },
  418. };
  419. #elif defined(__x86_64__)
  420. #include "../drc/emit_x86.c"
  421. static guest_reg_t guest_regs[] = {
  422. // SHR_R0 .. SHR_SP
  423. #ifndef _WIN32
  424. { 0 } , { 0 } , { 0 } , { 0 } ,
  425. #else
  426. {GRF_STATIC, xDI}, { 0 } , { 0 } , { 0 } ,
  427. #endif
  428. { 0 } , { 0 } , { 0 } , { 0 } ,
  429. { 0 } , { 0 } , { 0 } , { 0 } ,
  430. { 0 } , { 0 } , { 0 } , { 0 } ,
  431. // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
  432. // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
  433. { 0 } , { 0 } , { 0 } , {GRF_STATIC, xBX},
  434. { 0 } , { 0 } , { 0 } , { 0 } ,
  435. };
  436. // ax, cx, dx are usually temporaries by convention
  437. static cache_reg_t cache_regs[] = {
  438. { xCX, HRF_REG|HRF_TEMP },
  439. { xDX, HRF_REG|HRF_TEMP },
  440. { xAX, HRF_REG|HRF_TEMP },
  441. { xSI, HRF_REG|HRF_TEMP },
  442. #ifndef _WIN32
  443. { xDI, HRF_REG|HRF_TEMP },
  444. #else
  445. { xDI, HRF_LOCKED },
  446. #endif
  447. { xBX, HRF_LOCKED },
  448. };
  449. #else
  450. #error unsupported arch
  451. #endif
  452. static signed char reg_map_host[HOST_REGS];
  453. #define T 0x00000001
  454. #define S 0x00000002
  455. #define I 0x000000f0
  456. #define Q 0x00000100
  457. #define M 0x00000200
  458. #define T_save 0x00000800
  459. #define I_SHIFT 4
  460. #define Q_SHIFT 8
  461. #define M_SHIFT 9
  462. static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
  463. static void (*sh2_drc_dispatcher)(void);
  464. static void (*sh2_drc_exit)(void);
  465. static void (*sh2_drc_test_irq)(void);
  466. static u32 REGPARM(1) (*sh2_drc_read8)(u32 a);
  467. static u32 REGPARM(1) (*sh2_drc_read16)(u32 a);
  468. static u32 REGPARM(1) (*sh2_drc_read32)(u32 a);
  469. static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
  470. static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
  471. static void REGPARM(2) (*sh2_drc_write32)(u32 a, u32 d);
  472. // flags for memory access
  473. #define MF_SIZEMASK 0x03 // size of access
  474. #define MF_POSTINCR 0x10 // post increment (for read_rr)
  475. #define MF_PREDECR MF_POSTINCR // pre decrement (for write_rr)
  476. // address space stuff
  477. static int dr_is_rom(u32 a)
  478. {
  479. // tweak for WWF Raw which writes data to some high ROM addresses
  480. return (a & 0xc6000000) == 0x02000000 && (a & 0x3f0000) < 0x3e0000;
  481. }
  482. static int dr_ctx_get_mem_ptr(u32 a, u32 *mask)
  483. {
  484. int poffs = -1;
  485. if ((a & ~0x7ff) == 0) {
  486. // BIOS
  487. poffs = offsetof(SH2, p_bios);
  488. *mask = 0x7ff;
  489. }
  490. else if ((a & 0xfffff000) == 0xc0000000) {
  491. // data array
  492. // FIXME: access sh2->data_array instead
  493. poffs = offsetof(SH2, p_da);
  494. *mask = 0xfff;
  495. }
  496. else if ((a & 0xc6000000) == 0x06000000) {
  497. // SDRAM
  498. poffs = offsetof(SH2, p_sdram);
  499. *mask = 0x03ffff;
  500. }
  501. else if ((a & 0xc6000000) == 0x02000000) {
  502. // ROM
  503. poffs = offsetof(SH2, p_rom);
  504. *mask = 0x3fffff;
  505. }
  506. return poffs;
  507. }
  508. static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
  509. {
  510. struct block_entry *be;
  511. u32 tcid = 0, mask;
  512. // data arrays have their own caches
  513. if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0)
  514. tcid = 1 + is_slave;
  515. *tcache_id = tcid;
  516. mask = hash_table_sizes[tcid] - 1;
  517. be = HASH_FUNC(hash_tables[tcid], pc, mask);
  518. for (; be != NULL; be = be->next)
  519. if (be->pc == pc)
  520. return be;
  521. return NULL;
  522. }
  523. // ---------------------------------------------------------------
  524. // block management
  525. static void add_to_block_list(struct block_list **blist, struct block_desc *block)
  526. {
  527. struct block_list *added;
  528. if (blist_free) {
  529. added = blist_free;
  530. blist_free = added->next;
  531. } else {
  532. added = malloc(sizeof(*added));
  533. }
  534. if (!added) {
  535. elprintf(EL_ANOMALY, "drc OOM (1)");
  536. return;
  537. }
  538. added->block = block;
  539. added->l_next = block->list;
  540. block->list = added;
  541. added->head = blist;
  542. added->prev = NULL;
  543. if (*blist)
  544. (*blist)->prev = added;
  545. added->next = *blist;
  546. *blist = added;
  547. }
  548. static void rm_from_block_lists(struct block_desc *block)
  549. {
  550. struct block_list *entry;
  551. entry = block->list;
  552. while (entry != NULL) {
  553. if (entry->prev != NULL)
  554. entry->prev->next = entry->next;
  555. else
  556. *(entry->head) = entry->next;
  557. if (entry->next != NULL)
  558. entry->next->prev = entry->prev;
  559. entry->next = blist_free;
  560. blist_free = entry;
  561. entry = entry->l_next;
  562. }
  563. block->list = NULL;
  564. }
  565. static void rm_block_list(struct block_list **blist)
  566. {
  567. struct block_list *next, *current = *blist;
  568. while (current != NULL) {
  569. next = current->next;
  570. current->next = blist_free;
  571. blist_free = current;
  572. current = next;
  573. }
  574. *blist = NULL;
  575. }
  576. static void REGPARM(1) flush_tcache(int tcid)
  577. {
  578. int i;
  579. #if (DRC_DEBUG & 1)
  580. int tc_used, bl_used;
  581. tc_used = tcache_sizes[tcid] - (tcache_limit[tcid] - tcache_ptrs[tcid]);
  582. bl_used = block_max_counts[tcid] - (block_limit[tcid] - block_counts[tcid]);
  583. elprintf(EL_STATUS, "tcache #%d flush! (%d/%d, bds %d/%d)", tcid, tc_used,
  584. tcache_sizes[tcid], bl_used, block_max_counts[tcid]);
  585. #endif
  586. block_counts[tcid] = 0;
  587. block_limit[tcid] = block_max_counts[tcid] - 1;
  588. block_link_pool_counts[tcid] = 0;
  589. blink_free[tcid] = NULL;
  590. memset(unresolved_links[tcid], 0, sizeof(*unresolved_links[0]) * hash_table_sizes[tcid]);
  591. memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * hash_table_sizes[tcid]);
  592. tcache_ptrs[tcid] = tcache_bases[tcid];
  593. tcache_limit[tcid] = tcache_bases[tcid] + tcache_sizes[tcid];
  594. if (Pico32xMem->sdram != NULL) {
  595. if (tcid == 0) { // ROM, RAM
  596. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  597. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  598. memset(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  599. memset(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache));
  600. } else {
  601. memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
  602. memset(Pico32xMem->drclit_ram, 0, sizeof(Pico32xMem->drclit_ram));
  603. memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[tcid - 1]));
  604. memset(Pico32xMem->drclit_da[tcid - 1], 0, sizeof(Pico32xMem->drclit_da[tcid - 1]));
  605. memset(sh2s[tcid - 1].branch_cache, -1, sizeof(sh2s[0].branch_cache));
  606. }
  607. }
  608. #if (DRC_DEBUG & 4)
  609. tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
  610. #endif
  611. for (i = 0; i < ram_sizes[tcid] / INVAL_PAGE_SIZE; i++)
  612. rm_block_list(&inval_lookup[tcid][i]);
  613. }
  614. static void add_to_hashlist(struct block_entry *be, int tcache_id)
  615. {
  616. u32 tcmask = hash_table_sizes[tcache_id] - 1;
  617. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  618. be->prev = NULL;
  619. if (*head)
  620. (*head)->prev = be;
  621. be->next = *head;
  622. *head = be;
  623. #if (DRC_DEBUG & 2)
  624. if (be->next != NULL) {
  625. printf(" %08x: entry hash collision with %08x\n",
  626. be->pc, be->next->pc);
  627. hash_collisions++;
  628. }
  629. #endif
  630. }
  631. static void rm_from_hashlist(struct block_entry *be, int tcache_id)
  632. {
  633. u32 tcmask = hash_table_sizes[tcache_id] - 1;
  634. struct block_entry **head = &HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
  635. #if DRC_DEBUG & 1
  636. struct block_entry *current = be;
  637. while (current->prev != NULL)
  638. current = current->prev;
  639. if (current != *head)
  640. dbg(1, "rm_from_hashlist @%p: be %p %08x missing?", head, be, be->pc);
  641. #endif
  642. if (be->prev != NULL)
  643. be->prev->next = be->next;
  644. else
  645. *head = be->next;
  646. if (be->next != NULL)
  647. be->next->prev = be->prev;
  648. }
  649. static void add_to_hashlist_unresolved(struct block_link *bl, int tcache_id)
  650. {
  651. u32 tcmask = hash_table_sizes[tcache_id] - 1;
  652. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  653. #if DRC_DEBUG & 1
  654. struct block_link *current = *head;
  655. while (current != NULL && current != bl)
  656. current = current->next;
  657. if (current == bl)
  658. dbg(1, "add_to_hashlist_unresolved @%p: bl %p %p %08x already in?", head, bl, bl->target, bl->target_pc);
  659. #endif
  660. bl->target = NULL; // marker for not resolved
  661. bl->prev = NULL;
  662. if (*head)
  663. (*head)->prev = bl;
  664. bl->next = *head;
  665. *head = bl;
  666. }
  667. static void rm_from_hashlist_unresolved(struct block_link *bl, int tcache_id)
  668. {
  669. u32 tcmask = hash_table_sizes[tcache_id] - 1;
  670. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], bl->target_pc, tcmask);
  671. #if DRC_DEBUG & 1
  672. struct block_link *current = bl;
  673. while (current->prev != NULL)
  674. current = current->prev;
  675. if (current != *head)
  676. dbg(1, "rm_from_hashlist_unresolved @%p: bl %p %p %08x missing?", head, bl, bl->target, bl->target_pc);
  677. #endif
  678. if (bl->prev != NULL)
  679. bl->prev->next = bl->next;
  680. else
  681. *head = bl->next;
  682. if (bl->next != NULL)
  683. bl->next->prev = bl->prev;
  684. }
  685. static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit);
  686. static void dr_free_oldest_block(int tcache_id)
  687. {
  688. struct block_desc *bd;
  689. if (block_limit[tcache_id] >= block_max_counts[tcache_id]) {
  690. // block desc wrap around
  691. block_limit[tcache_id] = 0;
  692. }
  693. bd = &block_tables[tcache_id][block_limit[tcache_id]];
  694. if (bd->tcache_ptr && bd->tcache_ptr < tcache_ptrs[tcache_id]) {
  695. // cache wrap around
  696. tcache_ptrs[tcache_id] = bd->tcache_ptr;
  697. }
  698. if (bd->addr && bd->entry_count)
  699. sh2_smc_rm_block_entry(bd, tcache_id, 0);
  700. block_limit[tcache_id]++;
  701. if (block_limit[tcache_id] >= block_max_counts[tcache_id])
  702. block_limit[tcache_id] = 0;
  703. bd = &block_tables[tcache_id][block_limit[tcache_id]];
  704. if (bd->tcache_ptr >= tcache_ptrs[tcache_id])
  705. tcache_limit[tcache_id] = bd->tcache_ptr;
  706. else
  707. tcache_limit[tcache_id] = tcache_bases[tcache_id] + tcache_sizes[tcache_id];
  708. }
  709. static u8 *dr_prepare_cache(int tcache_id, int insn_count)
  710. {
  711. #if BRANCH_CACHE
  712. u8 *limit = tcache_limit[tcache_id];
  713. #endif
  714. // if no block desc available
  715. if (block_counts[tcache_id] == block_limit[tcache_id])
  716. dr_free_oldest_block(tcache_id);
  717. // while not enough cache space left (limit - tcache_ptr < max space needed)
  718. while (tcache_limit[tcache_id] - tcache_ptrs[tcache_id] < insn_count * 128)
  719. dr_free_oldest_block(tcache_id);
  720. #if BRANCH_CACHE
  721. if (limit != tcache_limit[tcache_id]) {
  722. if (tcache_id)
  723. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  724. else {
  725. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  726. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  727. }
  728. }
  729. #endif
  730. return (u8 *)tcache_ptrs[tcache_id];
  731. }
  732. static void dr_mark_memory(int mark, struct block_desc *block, int tcache_id, u32 nolit)
  733. {
  734. u8 *drc_ram_blk = NULL, *lit_ram_blk = NULL;
  735. u32 addr, end, mask = 0, shift = 0, idx;
  736. // mark memory blocks as containing compiled code
  737. if ((block->addr & 0xc7fc0000) == 0x06000000
  738. || (block->addr & 0xfffff000) == 0xc0000000)
  739. {
  740. if (tcache_id != 0) {
  741. // data array
  742. drc_ram_blk = Pico32xMem->drcblk_da[tcache_id-1];
  743. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  744. shift = SH2_DRCBLK_DA_SHIFT;
  745. }
  746. else {
  747. // SDRAM
  748. drc_ram_blk = Pico32xMem->drcblk_ram;
  749. lit_ram_blk = Pico32xMem->drclit_ram;
  750. shift = SH2_DRCBLK_RAM_SHIFT;
  751. }
  752. mask = ram_sizes[tcache_id] - 1;
  753. // mark recompiled insns
  754. addr = block->addr & ~((1 << shift) - 1);
  755. end = block->addr + block->size;
  756. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  757. drc_ram_blk[idx++] += mark;
  758. // mark literal pool
  759. if (addr < (block->addr_lit & ~((1 << shift) - 1)))
  760. addr = block->addr_lit & ~((1 << shift) - 1);
  761. end = block->addr_lit + block->size_lit;
  762. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  763. drc_ram_blk[idx++] += mark;
  764. // mark for literals disabled
  765. if (nolit) {
  766. addr = nolit & ~((1 << shift) - 1);
  767. end = block->addr_lit + block->size_lit;
  768. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  769. lit_ram_blk[idx++] = 1;
  770. }
  771. if (mark < 0)
  772. rm_from_block_lists(block);
  773. else {
  774. // add to invalidation lookup lists
  775. addr = block->addr & ~(INVAL_PAGE_SIZE - 1);
  776. end = block->addr + block->size;
  777. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  778. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  779. if (addr < (block->addr_lit & ~(INVAL_PAGE_SIZE - 1)))
  780. addr = block->addr_lit & ~(INVAL_PAGE_SIZE - 1);
  781. end = block->addr_lit + block->size_lit;
  782. for (idx = (addr & mask) / INVAL_PAGE_SIZE; addr < end; addr += INVAL_PAGE_SIZE)
  783. add_to_block_list(&inval_lookup[tcache_id][idx++], block);
  784. }
  785. }
  786. }
  787. static u32 dr_check_nolit(u32 start, u32 end, int tcache_id)
  788. {
  789. u8 *lit_ram_blk = NULL;
  790. u32 mask = 0, shift = 0, addr, idx;
  791. if ((start & 0xc7fc0000) == 0x06000000
  792. || (start & 0xfffff000) == 0xc0000000)
  793. {
  794. if (tcache_id != 0) {
  795. // data array
  796. lit_ram_blk = Pico32xMem->drclit_da[tcache_id-1];
  797. shift = SH2_DRCBLK_DA_SHIFT;
  798. }
  799. else {
  800. // SDRAM
  801. lit_ram_blk = Pico32xMem->drclit_ram;
  802. shift = SH2_DRCBLK_RAM_SHIFT;
  803. }
  804. mask = ram_sizes[tcache_id] - 1;
  805. addr = start & ~((1 << shift) - 1);
  806. for (idx = (addr & mask) >> shift; addr < end; addr += (1 << shift))
  807. if (lit_ram_blk[idx++])
  808. break;
  809. return (addr < start ? start : addr > end ? end : addr);
  810. }
  811. return end;
  812. }
  813. static struct block_desc *dr_add_block(u32 addr, int size,
  814. u32 addr_lit, int size_lit, int is_slave, int *blk_id)
  815. {
  816. struct block_entry *be;
  817. struct block_desc *bd;
  818. int tcache_id;
  819. int *bcount;
  820. // do a lookup to get tcache_id and override check
  821. be = dr_get_entry(addr, is_slave, &tcache_id);
  822. if (be != NULL)
  823. dbg(1, "block override for %08x", addr);
  824. bcount = &block_counts[tcache_id];
  825. if (*bcount == block_limit[tcache_id]) {
  826. dbg(1, "bd overflow for tcache %d", tcache_id);
  827. return NULL;
  828. }
  829. bd = &block_tables[tcache_id][*bcount];
  830. bd->addr = addr;
  831. bd->size = size;
  832. bd->addr_lit = addr_lit;
  833. bd->size_lit = size_lit;
  834. bd->tcache_ptr = tcache_ptr;
  835. bd->active = 1;
  836. bd->entry_count = 1;
  837. bd->entryp[0].pc = addr;
  838. bd->entryp[0].tcache_ptr = tcache_ptr;
  839. bd->entryp[0].links = bd->entryp[0].o_links = NULL;
  840. #if (DRC_DEBUG & 2)
  841. bd->entryp[0].block = bd;
  842. bd->refcount = 0;
  843. #endif
  844. add_to_hashlist(&bd->entryp[0], tcache_id);
  845. *blk_id = *bcount;
  846. (*bcount)++;
  847. if (*bcount >= block_max_counts[tcache_id])
  848. *bcount = 0;
  849. return bd;
  850. }
  851. static void REGPARM(3) *dr_lookup_block(u32 pc, int is_slave, int *tcache_id)
  852. {
  853. struct block_entry *be = NULL;
  854. void *block = NULL;
  855. be = dr_get_entry(pc, is_slave, tcache_id);
  856. if (be != NULL)
  857. block = be->tcache_ptr;
  858. #if (DRC_DEBUG & 2)
  859. if (be != NULL)
  860. be->block->refcount++;
  861. #endif
  862. return block;
  863. }
  864. static void *dr_failure(void)
  865. {
  866. lprintf("recompilation failed\n");
  867. exit(1);
  868. }
  869. static void *dr_prepare_ext_branch(struct block_entry *owner, u32 pc, int is_slave, int tcache_id)
  870. {
  871. #if LINK_BRANCHES
  872. struct block_link *bl = block_link_pool[tcache_id];
  873. int cnt = block_link_pool_counts[tcache_id];
  874. struct block_entry *be = NULL;
  875. int target_tcache_id;
  876. // get the target block entry
  877. be = dr_get_entry(pc, is_slave, &target_tcache_id);
  878. if (target_tcache_id && target_tcache_id != tcache_id)
  879. return sh2_drc_dispatcher;
  880. // get a block link
  881. if (blink_free[tcache_id] != NULL) {
  882. bl = blink_free[tcache_id];
  883. blink_free[tcache_id] = bl->next;
  884. } else if (cnt >= block_link_pool_max_counts[tcache_id]) {
  885. dbg(1, "bl overflow for tcache %d", tcache_id);
  886. return sh2_drc_dispatcher;
  887. } else {
  888. bl += cnt;
  889. block_link_pool_counts[tcache_id] = cnt+1;
  890. }
  891. // prepare link and add to ougoing list of owner
  892. bl->tcache_id = tcache_id;
  893. bl->target_pc = pc;
  894. bl->jump = tcache_ptr;
  895. bl->o_next = owner->o_links;
  896. owner->o_links = bl;
  897. if (be != NULL) {
  898. dbg(2, "- early link from %p to pc %08x entry %p", bl->jump, pc, be->tcache_ptr);
  899. bl->target = be;
  900. bl->prev = NULL;
  901. if (be->links)
  902. be->links->prev = bl;
  903. bl->next = be->links;
  904. be->links = bl;
  905. return be->tcache_ptr;
  906. }
  907. else {
  908. add_to_hashlist_unresolved(bl, tcache_id);
  909. return sh2_drc_dispatcher;
  910. }
  911. #else
  912. return sh2_drc_dispatcher;
  913. #endif
  914. }
  915. static void dr_link_blocks(struct block_entry *be, int tcache_id)
  916. {
  917. #if LINK_BRANCHES
  918. u32 tcmask = hash_table_sizes[tcache_id] - 1;
  919. u32 pc = be->pc;
  920. struct block_link **head = &HASH_FUNC(unresolved_links[tcache_id], pc, tcmask);
  921. struct block_link *bl = *head, *next;
  922. while (bl != NULL) {
  923. next = bl->next;
  924. if (bl->target_pc == pc) {
  925. dbg(2, "- link from %p to pc %08x entry %p", bl->jump, pc, be->tcache_ptr);
  926. // move bl from unresolved_links to block_entry
  927. rm_from_hashlist_unresolved(bl, tcache_id);
  928. emith_jump_patch(bl->jump, be->tcache_ptr);
  929. bl->target = be;
  930. bl->prev = NULL;
  931. if (be->links)
  932. be->links->prev = bl;
  933. bl->next = be->links;
  934. be->links = bl;
  935. }
  936. bl = next;
  937. }
  938. // could sync arm caches here, but that's unnecessary
  939. #endif
  940. }
  941. #define ADD_TO_ARRAY(array, count, item, failcode) { \
  942. if (count >= ARRAY_SIZE(array)) { \
  943. dbg(1, "warning: " #array " overflow"); \
  944. failcode; \
  945. } else \
  946. array[count++] = item; \
  947. }
  948. static int find_in_array(u32 *array, size_t size, u32 what)
  949. {
  950. size_t i;
  951. for (i = 0; i < size; i++)
  952. if (what == array[i])
  953. return i;
  954. return -1;
  955. }
  956. // ---------------------------------------------------------------
  957. // NB rcache allocation dependencies:
  958. // - get_reg_arg/get_tmp_arg first (might evict other regs just allocated)
  959. // - get_reg(..., NULL) before get_reg(..., &x) if it might get the same reg
  960. // - get_reg(..., RC_GR_READ/RMW, ...) before WRITE (might evict needed reg)
  961. // register cache / constant propagation stuff
  962. typedef enum {
  963. RC_GR_READ,
  964. RC_GR_WRITE,
  965. RC_GR_RMW,
  966. } rc_gr_mode;
  967. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
  968. static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
  969. #define RCACHE_DUMP(msg) { \
  970. cache_reg_t *cp; \
  971. guest_reg_t *gp; \
  972. int i; \
  973. printf("cache dump %s:\n",msg); \
  974. printf("cache_regs:\n"); \
  975. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
  976. cp = &cache_regs[i]; \
  977. if (cp->type != HR_FREE || cp->gregs) \
  978. printf("%d: hr=%d t=%d f=%x m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->gregs); \
  979. } \
  980. printf("guest_regs:\n"); \
  981. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
  982. gp = &guest_regs[i]; \
  983. if (gp->vreg != -1 || gp->sreg >= 0) \
  984. printf("%d: v=%d f=%x s=%d\n", i, gp->vreg, gp->flags, gp->sreg); \
  985. } \
  986. }
  987. #if PROPAGATE_CONSTANTS
  988. static void gconst_set(sh2_reg_e r, u32 val)
  989. {
  990. guest_regs[r].flags |= GRF_CONST;
  991. guest_regs[r].val = val;
  992. }
  993. static void gconst_new(sh2_reg_e r, u32 val)
  994. {
  995. gconst_set(r, val);
  996. guest_regs[r].flags |= GRF_CDIRTY;
  997. // throw away old r that we might have cached
  998. if (guest_regs[r].vreg >= 0)
  999. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1000. }
  1001. static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
  1002. {
  1003. guest_regs[rd].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1004. if (guest_regs[rs].flags & GRF_CONST)
  1005. gconst_set(rd, guest_regs[rs].val);
  1006. }
  1007. #endif
  1008. static int gconst_get(sh2_reg_e r, u32 *val)
  1009. {
  1010. if (guest_regs[r].flags & GRF_CONST) {
  1011. *val = guest_regs[r].val;
  1012. return 1;
  1013. }
  1014. return 0;
  1015. }
  1016. static int gconst_check(sh2_reg_e r)
  1017. {
  1018. if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
  1019. return 1;
  1020. return 0;
  1021. }
  1022. // update hr if dirty, else do nothing
  1023. static int gconst_try_read(int hr, sh2_reg_e r)
  1024. {
  1025. if (guest_regs[r].flags & GRF_CDIRTY) {
  1026. emith_move_r_imm(hr, guest_regs[r].val);
  1027. guest_regs[r].flags &= ~GRF_CDIRTY;
  1028. return 1;
  1029. }
  1030. return 0;
  1031. }
  1032. static u32 gconst_dirty_mask(void)
  1033. {
  1034. u32 mask = 0;
  1035. int i;
  1036. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1037. if (guest_regs[i].flags & GRF_CDIRTY)
  1038. mask |= (1 << i);
  1039. return mask;
  1040. }
  1041. static void gconst_kill(sh2_reg_e r)
  1042. {
  1043. guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1044. }
  1045. static void gconst_clean(void)
  1046. {
  1047. int i;
  1048. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1049. if (guest_regs[i].flags & GRF_CDIRTY) {
  1050. // using RC_GR_READ here: it will call gconst_try_read,
  1051. // cache the reg and mark it dirty.
  1052. rcache_get_reg_(i, RC_GR_READ, 0, NULL);
  1053. }
  1054. }
  1055. static void gconst_invalidate(void)
  1056. {
  1057. int i;
  1058. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1059. guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
  1060. }
  1061. static u16 rcache_counter;
  1062. static u32 rcache_static;
  1063. static u32 rcache_locked;
  1064. static u32 rcache_hint_soon;
  1065. static u32 rcache_hint_late;
  1066. #define rcache_hint (rcache_hint_soon|rcache_hint_late)
  1067. // binary search approach, since we don't have CLZ on ARM920T
  1068. #define FOR_ALL_BITS_SET_DO(mask, bit, code) { \
  1069. u32 __mask = mask; \
  1070. for (bit = 31; bit >= 0 && mask; bit--, __mask <<= 1) { \
  1071. if (!(__mask & (0xffff << 16))) \
  1072. bit -= 16, __mask <<= 16; \
  1073. if (!(__mask & (0xff << 24))) \
  1074. bit -= 8, __mask <<= 8; \
  1075. if (!(__mask & (0xf << 28))) \
  1076. bit -= 4, __mask <<= 4; \
  1077. if (!(__mask & (0x3 << 30))) \
  1078. bit -= 2, __mask <<= 2; \
  1079. if (!(__mask & (0x1 << 31))) \
  1080. bit -= 1, __mask <<= 1; \
  1081. if (__mask & (0x1 << 31)) { \
  1082. code; \
  1083. } \
  1084. } \
  1085. }
  1086. static void rcache_unmap_vreg(int x)
  1087. {
  1088. int i;
  1089. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
  1090. guest_regs[i].vreg = -1);
  1091. if (cache_regs[x].type != HR_STATIC)
  1092. cache_regs[x].type = HR_FREE;
  1093. cache_regs[x].gregs = 0;
  1094. cache_regs[x].flags &= (HRF_REG|HRF_TEMP);
  1095. }
  1096. static void rcache_clean_vreg(int x)
  1097. {
  1098. int r;
  1099. if (cache_regs[x].flags & HRF_DIRTY) { // writeback
  1100. cache_regs[x].flags &= ~HRF_DIRTY;
  1101. FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
  1102. if (guest_regs[r].flags & GRF_DIRTY) {
  1103. if (guest_regs[r].flags & GRF_STATIC) {
  1104. if (guest_regs[r].vreg != guest_regs[r].sreg) {
  1105. if (!(cache_regs[guest_regs[r].sreg].flags & HRF_LOCKED)) {
  1106. // statically mapped reg not in its sreg. move back to sreg
  1107. rcache_clean_vreg(guest_regs[r].sreg);
  1108. rcache_unmap_vreg(guest_regs[r].sreg);
  1109. emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg, cache_regs[guest_regs[r].vreg].hreg);
  1110. rcache_remove_vreg_alias(x, r);
  1111. cache_regs[guest_regs[r].sreg].gregs = (1 << r);
  1112. guest_regs[r].vreg = guest_regs[r].sreg;
  1113. } else {
  1114. // must evict since sreg is locked
  1115. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1116. guest_regs[r].vreg = -1;
  1117. }
  1118. }
  1119. } else
  1120. emith_ctx_write(cache_regs[x].hreg, r * 4);
  1121. }
  1122. guest_regs[r].flags &= ~GRF_DIRTY;)
  1123. }
  1124. }
  1125. static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
  1126. {
  1127. cache_regs[x].gregs &= ~(1 << r);
  1128. if (!cache_regs[x].gregs) {
  1129. // no reg mapped -> free vreg
  1130. if (cache_regs[x].type != HR_STATIC)
  1131. cache_regs[x].type = HR_FREE;
  1132. cache_regs[x].flags &= (HRF_REG|HRF_TEMP);
  1133. }
  1134. guest_regs[r].vreg = -1;
  1135. }
  1136. static void rcache_evict_vreg(int x)
  1137. {
  1138. rcache_clean_vreg(x);
  1139. rcache_unmap_vreg(x);
  1140. }
  1141. static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
  1142. {
  1143. cache_regs[x].gregs &= ~(1 << r);
  1144. rcache_evict_vreg(x);
  1145. cache_regs[x].gregs = (1 << r);
  1146. if (cache_regs[x].type != HR_STATIC)
  1147. cache_regs[x].type = HR_CACHED;
  1148. if (guest_regs[r].flags & GRF_DIRTY)
  1149. cache_regs[x].flags |= HRF_DIRTY;
  1150. }
  1151. static cache_reg_t *rcache_evict(void)
  1152. {
  1153. // evict reg with oldest stamp (only for HRF_REG, no temps)
  1154. int i, i_prio, oldest = -1, prio = 0;
  1155. u16 min_stamp = (u16)-1;
  1156. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  1157. // consider only unlocked REG
  1158. if (!(cache_regs[i].flags & HRF_REG) || (cache_regs[i].flags & HRF_LOCKED))
  1159. continue;
  1160. if (cache_regs[i].type == HR_FREE || (cache_regs[i].type == HR_TEMP)) {
  1161. oldest = i;
  1162. break;
  1163. }
  1164. if (cache_regs[i].type == HR_CACHED) {
  1165. if (rcache_locked & cache_regs[i].gregs)
  1166. // REGs needed for the current insn
  1167. i_prio = 1;
  1168. else if (rcache_hint_soon & cache_regs[i].gregs)
  1169. // REGs needed in some future insn
  1170. i_prio = 2;
  1171. else if (rcache_hint_late & cache_regs[i].gregs)
  1172. // REGs needed in some future insn
  1173. i_prio = 3;
  1174. else
  1175. // REGs not needed soon
  1176. i_prio = 4;
  1177. if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
  1178. min_stamp = cache_regs[i].stamp;
  1179. oldest = i;
  1180. prio = i_prio;
  1181. }
  1182. }
  1183. }
  1184. if (oldest == -1) {
  1185. printf("no registers to evict, aborting\n");
  1186. exit(1);
  1187. }
  1188. if (cache_regs[oldest].type == HR_CACHED)
  1189. rcache_evict_vreg(oldest);
  1190. cache_regs[oldest].type = HR_FREE;
  1191. cache_regs[oldest].flags &= (HRF_TEMP|HRF_REG);
  1192. cache_regs[oldest].gregs = 0;
  1193. return &cache_regs[oldest];
  1194. }
  1195. #if REMAP_REGISTER
  1196. // maps a host register to a REG
  1197. static int rcache_map_reg(sh2_reg_e r, int hr, int mode)
  1198. {
  1199. int i;
  1200. gconst_kill(r);
  1201. // lookup the TEMP hr maps to
  1202. i = reg_map_host[hr];
  1203. if (i < 0) {
  1204. // must not happen
  1205. printf("invalid host register %d\n", hr);
  1206. exit(1);
  1207. }
  1208. // deal with statically mapped regs
  1209. if (mode == RC_GR_RMW && (guest_regs[r].flags & GRF_STATIC)) {
  1210. if (guest_regs[r].vreg == guest_regs[r].sreg) {
  1211. // STATIC in its sreg with no aliases, and some processing pending
  1212. if (cache_regs[guest_regs[r].vreg].gregs == 1 << r)
  1213. return cache_regs[guest_regs[r].vreg].hreg;
  1214. } else if (!cache_regs[guest_regs[r].sreg].gregs)
  1215. // STATIC not in its sreg, with sreg available -> move it
  1216. i = guest_regs[r].sreg;
  1217. }
  1218. // remove old mappings of r and i if one exists
  1219. if (guest_regs[r].vreg >= 0)
  1220. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1221. if (cache_regs[i].type == HR_CACHED)
  1222. rcache_unmap_vreg(i);
  1223. // set new mappping
  1224. if (cache_regs[i].type != HR_STATIC)
  1225. cache_regs[i].type = HR_CACHED;
  1226. cache_regs[i].gregs = 1 << r;
  1227. cache_regs[i].flags &= (HRF_TEMP|HRF_REG);
  1228. cache_regs[i].stamp = ++rcache_counter;
  1229. cache_regs[i].flags |= HRF_DIRTY|HRF_LOCKED;
  1230. guest_regs[r].flags |= GRF_DIRTY;
  1231. guest_regs[r].vreg = i;
  1232. return cache_regs[i].hreg;
  1233. }
  1234. // remap vreg from a TEMP to a REG if it is hinted (upcoming TEMP invalidation)
  1235. static void rcache_remap_vreg(int r)
  1236. {
  1237. int i, j, free = -1, cached = -1, hinted = -1;
  1238. u16 min_stamp_cached = (u16)-1, min_stamp_hinted = -1;
  1239. // r must be a vreg
  1240. if (cache_regs[r].type != HR_CACHED)
  1241. return;
  1242. // if r is already a REG or isn't used, clean here to avoid data loss on inval
  1243. if ((cache_regs[r].flags & HRF_REG) || !(rcache_hint & cache_regs[r].gregs)) {
  1244. rcache_clean_vreg(r);
  1245. return;
  1246. }
  1247. // find REG, either free or unused temp or oldest cached
  1248. for (i = 0; i < ARRAY_SIZE(cache_regs) && free < 0; i++) {
  1249. if ((cache_regs[i].flags & HRF_TEMP) || (cache_regs[i].flags & HRF_LOCKED))
  1250. continue;
  1251. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP)
  1252. free = i;
  1253. if (cache_regs[i].type == HR_CACHED && !(rcache_hint & cache_regs[i].gregs)) {
  1254. if (cache_regs[i].stamp < min_stamp_cached) {
  1255. min_stamp_cached = cache_regs[i].stamp;
  1256. cached = i;
  1257. }
  1258. }
  1259. if (cache_regs[i].type == HR_CACHED && !(rcache_hint_soon & cache_regs[i].gregs)
  1260. && (rcache_hint_soon & cache_regs[r].gregs))
  1261. if (cache_regs[i].stamp < min_stamp_hinted) {
  1262. min_stamp_hinted = cache_regs[i].stamp;
  1263. hinted = i;
  1264. }
  1265. }
  1266. if (free >= 0) {
  1267. i = free;
  1268. } else if (cached >= 0 && cached != r) {
  1269. i = cached;
  1270. rcache_evict_vreg(i);
  1271. } else if (hinted >= 0 && hinted != r) {
  1272. i = hinted;
  1273. rcache_evict_vreg(i);
  1274. } else {
  1275. rcache_clean_vreg(r);
  1276. return;
  1277. }
  1278. // set new mapping and remove old one
  1279. cache_regs[i].type = HR_CACHED;
  1280. cache_regs[i].gregs = cache_regs[r].gregs;
  1281. cache_regs[i].flags &= (HRF_TEMP|HRF_REG);
  1282. cache_regs[i].flags |= cache_regs[r].flags & ~(HRF_TEMP|HRF_REG);
  1283. cache_regs[i].stamp = cache_regs[r].stamp;
  1284. emith_move_r_r(cache_regs[i].hreg, cache_regs[r].hreg);
  1285. for (j = 0; j < ARRAY_SIZE(guest_regs); j++)
  1286. if (guest_regs[j].vreg == r)
  1287. guest_regs[j].vreg = i;
  1288. cache_regs[r].type = HR_FREE;
  1289. cache_regs[r].flags &= (HRF_TEMP|HRF_REG);
  1290. cache_regs[r].gregs = 0;
  1291. }
  1292. #endif
  1293. // note: must not be called when doing conditional code
  1294. static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
  1295. {
  1296. cache_reg_t *tr = NULL;
  1297. int i, h, split = -1;
  1298. rcache_counter++;
  1299. // maybe already cached?
  1300. // if so, prefer against gconst (they must be in sync)
  1301. i = guest_regs[r].vreg;
  1302. if ((guest_regs[r].flags & GRF_STATIC) && i != guest_regs[r].sreg &&
  1303. !(cache_regs[guest_regs[r].sreg].flags & HRF_LOCKED) &&
  1304. (i < 0 || mode != RC_GR_READ) &&
  1305. !((rcache_hint_soon|rcache_locked) & cache_regs[guest_regs[r].sreg].gregs)) {
  1306. // good opportunity to relocate a remapped STATIC
  1307. h = guest_regs[r].sreg;
  1308. rcache_evict_vreg(h);
  1309. tr = &cache_regs[h];
  1310. if (i >= 0) {
  1311. if (mode != RC_GR_WRITE) {
  1312. if (hr)
  1313. *hr = cache_regs[i].hreg;
  1314. else
  1315. emith_move_r_r(cache_regs[h].hreg, cache_regs[i].hreg);
  1316. hr = NULL;
  1317. }
  1318. rcache_remove_vreg_alias(guest_regs[r].vreg, r);
  1319. } else if (mode != RC_GR_WRITE) {
  1320. if (gconst_try_read(tr->hreg, r)) {
  1321. tr->flags |= HRF_DIRTY;
  1322. guest_regs[r].flags |= GRF_DIRTY;
  1323. } else
  1324. emith_ctx_read(tr->hreg, r * 4);
  1325. }
  1326. guest_regs[r].vreg = guest_regs[r].sreg;
  1327. tr->gregs = 1 << r;
  1328. goto end;
  1329. } else if (i >= 0) {
  1330. if (mode == RC_GR_READ || !(cache_regs[i].gregs & ~(1 << r))) {
  1331. // either only reading, or no multiple mapping
  1332. tr = &cache_regs[i];
  1333. goto end;
  1334. }
  1335. // split if aliases needed rsn, or already locked, or r is STATIC in sreg
  1336. if (((rcache_hint|rcache_locked) & cache_regs[i].gregs & ~(1 << r)) ||
  1337. (cache_regs[i].flags & HRF_LOCKED) ||
  1338. (cache_regs[i].type == HR_STATIC && !(guest_regs[r].flags & GRF_STATIC))) {
  1339. // need to split up. take reg out here to avoid unnecessary writebacks
  1340. cache_regs[i].gregs &= ~(1 << r);
  1341. split = i;
  1342. } else {
  1343. // aliases not needed anytime soon, remove them
  1344. // XXX split aliases away if writing and static and not locked and hinted?
  1345. rcache_evict_vreg_aliases(i, r);
  1346. tr = &cache_regs[i];
  1347. goto end;
  1348. }
  1349. }
  1350. // get a free reg, but use temps only if r is not needed soon
  1351. for (i = ARRAY_SIZE(cache_regs) - 1; i >= 0; i--) {
  1352. if ((cache_regs[i].type == HR_FREE ||
  1353. (cache_regs[i].type == HR_TEMP && !(cache_regs[i].flags & HRF_LOCKED))) &&
  1354. (!(rcache_hint & (1 << r)) || (cache_regs[i].flags & HRF_REG))) {
  1355. tr = &cache_regs[i];
  1356. break;
  1357. }
  1358. }
  1359. if (!tr)
  1360. tr = rcache_evict();
  1361. tr->type = HR_CACHED;
  1362. tr->gregs = 1 << r;
  1363. guest_regs[r].vreg = tr - cache_regs;
  1364. if (mode != RC_GR_WRITE) {
  1365. if (gconst_try_read(tr->hreg, r)) {
  1366. tr->flags |= HRF_DIRTY;
  1367. guest_regs[r].flags |= GRF_DIRTY;
  1368. } else if (split >= 0) {
  1369. if (hr) {
  1370. cache_regs[split].flags |= HRF_LOCKED;
  1371. *hr = cache_regs[split].hreg;
  1372. hr = NULL;
  1373. } else if (tr->hreg != cache_regs[split].hreg)
  1374. emith_move_r_r(tr->hreg, cache_regs[split].hreg);
  1375. } else
  1376. emith_ctx_read(tr->hreg, r * 4);
  1377. }
  1378. end:
  1379. if (hr)
  1380. *hr = tr->hreg;
  1381. if (do_locking)
  1382. tr->flags |= HRF_LOCKED;
  1383. tr->stamp = rcache_counter;
  1384. if (mode != RC_GR_READ) {
  1385. tr->flags |= HRF_DIRTY;
  1386. guest_regs[r].flags |= GRF_DIRTY;
  1387. gconst_kill(r);
  1388. }
  1389. return tr->hreg;
  1390. }
  1391. static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr)
  1392. {
  1393. return rcache_get_reg_(r, mode, 1, hr);
  1394. }
  1395. static int rcache_get_tmp(void)
  1396. {
  1397. cache_reg_t *tr = NULL;
  1398. int i;
  1399. // use any free reg, but prefer TEMP regs
  1400. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  1401. if (cache_regs[i].type == HR_FREE ||
  1402. (cache_regs[i].type == HR_TEMP && !(cache_regs[i].flags & HRF_LOCKED))) {
  1403. tr = &cache_regs[i];
  1404. break;
  1405. }
  1406. }
  1407. if (!tr)
  1408. tr = rcache_evict();
  1409. tr->type = HR_TEMP;
  1410. tr->flags |= HRF_LOCKED;
  1411. return tr->hreg;
  1412. }
  1413. static int rcache_get_hr_id(int hr)
  1414. {
  1415. int i;
  1416. i = reg_map_host[hr];
  1417. if (i < 0) // can't happen
  1418. exit(1);
  1419. #if REMAP_REGISTER
  1420. if (cache_regs[i].type == HR_CACHED)
  1421. rcache_remap_vreg(i);
  1422. #endif
  1423. if (cache_regs[i].type == HR_CACHED)
  1424. rcache_evict_vreg(i);
  1425. else if (cache_regs[i].type == HR_TEMP && (cache_regs[i].flags & HRF_LOCKED)) {
  1426. printf("host reg %d already used, aborting\n", hr);
  1427. exit(1);
  1428. }
  1429. return i;
  1430. }
  1431. static int rcache_get_arg_id(int arg)
  1432. {
  1433. int hr = 0;
  1434. host_arg2reg(hr, arg);
  1435. return rcache_get_hr_id(hr);
  1436. }
  1437. // get a reg to be used as function arg
  1438. static int rcache_get_tmp_arg(int arg)
  1439. {
  1440. int id = rcache_get_arg_id(arg);
  1441. cache_regs[id].type = HR_TEMP;
  1442. cache_regs[id].flags |= HRF_LOCKED;
  1443. return cache_regs[id].hreg;
  1444. }
  1445. // ... as return value after a call
  1446. static int rcache_get_tmp_ret(void)
  1447. {
  1448. int id = rcache_get_hr_id(RET_REG);
  1449. cache_regs[id].type = HR_TEMP;
  1450. cache_regs[id].flags |= HRF_LOCKED;
  1451. return cache_regs[id].hreg;
  1452. }
  1453. // same but caches a reg if access is readonly (announced by hr being NULL)
  1454. static int rcache_get_reg_arg(int arg, sh2_reg_e r, int *hr)
  1455. {
  1456. int i, srcr, dstr, dstid;
  1457. int dirty = 0, src_dirty = 0, is_const = 0, is_cached = 0;
  1458. u32 val;
  1459. host_arg2reg(dstr, arg);
  1460. i = guest_regs[r].vreg;
  1461. if (i >= 0 && cache_regs[i].type == HR_CACHED && cache_regs[i].hreg == dstr)
  1462. // r is already in arg
  1463. dstid = i;
  1464. else
  1465. dstid = rcache_get_arg_id(arg);
  1466. dstr = cache_regs[dstid].hreg;
  1467. if (rcache_hint & (1 << r)) {
  1468. // r is needed later on anyway
  1469. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1470. is_cached = (cache_regs[reg_map_host[srcr]].type == HR_CACHED);
  1471. } else if ((guest_regs[r].flags & GRF_CDIRTY) && gconst_get(r, &val)) {
  1472. // r has an uncomitted const - load into arg, but keep constant uncomitted
  1473. srcr = dstr;
  1474. is_const = 1;
  1475. } else if ((i = guest_regs[r].vreg) >= 0) {
  1476. // maybe already cached?
  1477. srcr = cache_regs[i].hreg;
  1478. is_cached = (cache_regs[reg_map_host[srcr]].type == HR_CACHED);
  1479. } else {
  1480. // must read either const or from ctx
  1481. srcr = dstr;
  1482. if (rcache_static & (1 << r))
  1483. srcr = rcache_get_reg_(r, RC_GR_READ, 0, NULL);
  1484. else if (gconst_try_read(srcr, r))
  1485. dirty = 1;
  1486. else
  1487. emith_ctx_read(srcr, r * 4);
  1488. }
  1489. if (is_cached) {
  1490. i = reg_map_host[srcr];
  1491. if (srcr == dstr) { // evict aliases here since it is reallocated below
  1492. if (guest_regs[r].flags & GRF_STATIC) // move STATIC back to its sreg
  1493. rcache_clean_vreg(guest_regs[r].vreg);
  1494. #if REMAP_REGISTER
  1495. rcache_remap_vreg(i);
  1496. #endif
  1497. if (cache_regs[i].type == HR_CACHED)
  1498. rcache_evict_vreg(i);
  1499. }
  1500. else if (hr != NULL) // must lock srcr if not copied here
  1501. cache_regs[i].flags |= HRF_LOCKED;
  1502. if (guest_regs[r].flags & GRF_DIRTY)
  1503. src_dirty = 1;
  1504. }
  1505. cache_regs[dstid].type = HR_TEMP;
  1506. if (is_const) {
  1507. // uncomitted constant
  1508. emith_move_r_imm(srcr, val);
  1509. } else if (dstr != srcr) {
  1510. // arg is a copy of cached r
  1511. if (hr == NULL)
  1512. emith_move_r_r(dstr, srcr);
  1513. } else if (hr != NULL) {
  1514. // caller will modify arg, so it will soon be out of sync with r
  1515. if (dirty || src_dirty)
  1516. emith_ctx_write(dstr, r * 4); // must clean since arg will be modified
  1517. } else if (guest_regs[r].vreg < 0) {
  1518. // keep arg as vreg for r
  1519. cache_regs[dstid].type = HR_CACHED;
  1520. cache_regs[dstid].gregs = 1 << r;
  1521. guest_regs[r].vreg = dstid;
  1522. if (dirty || src_dirty) { // mark as modifed for cleaning later on
  1523. cache_regs[dstid].flags |= HRF_DIRTY;
  1524. guest_regs[r].flags |= GRF_DIRTY;
  1525. }
  1526. }
  1527. if (hr)
  1528. *hr = srcr;
  1529. cache_regs[dstid].stamp = ++rcache_counter;
  1530. cache_regs[dstid].flags |= HRF_LOCKED;
  1531. return dstr;
  1532. }
  1533. static void rcache_free_tmp(int hr)
  1534. {
  1535. int i = reg_map_host[hr];
  1536. if (i < 0 || cache_regs[i].type != HR_TEMP) {
  1537. printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, cache_regs[i].type);
  1538. return;
  1539. }
  1540. cache_regs[i].type = HR_FREE;
  1541. cache_regs[i].flags &= (HRF_REG|HRF_TEMP);
  1542. }
  1543. // saves temporary result either in REG or in drctmp
  1544. static int rcache_save_tmp(int hr)
  1545. {
  1546. int i, free = -1, cached = -1;
  1547. u16 min_stamp = (u16)-1;
  1548. // find REG, either free or unlocked temp or oldest non-hinted cached
  1549. for (i = 0; i < ARRAY_SIZE(cache_regs) && free < 0; i++) {
  1550. if ((cache_regs[i].flags & HRF_TEMP) || (cache_regs[i].flags & HRF_LOCKED))
  1551. continue;
  1552. if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP)
  1553. free = i;
  1554. if (cache_regs[i].type == HR_CACHED &&
  1555. !((rcache_hint | rcache_locked) & cache_regs[i].gregs)) {
  1556. if (cache_regs[i].stamp < min_stamp) {
  1557. min_stamp = cache_regs[i].stamp;
  1558. cached = i;
  1559. }
  1560. }
  1561. }
  1562. if (free >= 0)
  1563. i = free;
  1564. else if (cached >= 0) {
  1565. i = cached;
  1566. rcache_evict_vreg(i);
  1567. } else {
  1568. // if none is available, store in drctmp
  1569. emith_ctx_write(hr, offsetof(SH2, drc_tmp));
  1570. rcache_free_tmp(hr);
  1571. return -1;
  1572. }
  1573. cache_regs[i].type = HR_CACHED;
  1574. cache_regs[i].gregs = 0; // not storing any guest register
  1575. cache_regs[i].flags &= (HRF_TEMP|HRF_REG);
  1576. cache_regs[i].flags |= HRF_LOCKED;
  1577. cache_regs[i].stamp = ++rcache_counter;
  1578. emith_move_r_r(cache_regs[i].hreg, hr);
  1579. rcache_free_tmp(hr);
  1580. return i;
  1581. }
  1582. static int rcache_restore_tmp(int r)
  1583. {
  1584. int hr;
  1585. // find REG with tmp store: cached but with no gregs
  1586. if (r >= 0) {
  1587. if (cache_regs[r].type != HR_CACHED || cache_regs[r].gregs) {
  1588. printf("invalid tmp storage %d\n", r);
  1589. exit(1);
  1590. }
  1591. // found, transform to a TEMP
  1592. cache_regs[r].type = HR_TEMP;
  1593. cache_regs[r].flags |= HRF_LOCKED;
  1594. return cache_regs[r].hreg;
  1595. }
  1596. // if not available, create a TEMP store and fetch from drctmp
  1597. hr = rcache_get_tmp();
  1598. emith_ctx_read(hr, offsetof(SH2, drc_tmp));
  1599. return hr;
  1600. }
  1601. static void rcache_unlock(int hr)
  1602. {
  1603. if (hr >= 0) {
  1604. cache_regs[hr].flags &= ~HRF_LOCKED;
  1605. rcache_locked &= ~cache_regs[hr].gregs;
  1606. }
  1607. }
  1608. static void rcache_unlock_all(void)
  1609. {
  1610. int i;
  1611. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1612. cache_regs[i].flags &= ~HRF_LOCKED;
  1613. }
  1614. static inline void rcache_set_locked(u32 mask)
  1615. {
  1616. rcache_locked = mask & ~rcache_static;
  1617. }
  1618. static inline void rcache_set_hint_soon(u32 mask)
  1619. {
  1620. rcache_hint_soon = mask & ~rcache_static;
  1621. }
  1622. static inline void rcache_set_hint_late(u32 mask)
  1623. {
  1624. rcache_hint_late = mask & ~rcache_static;
  1625. }
  1626. static inline int rcache_is_hinted(sh2_reg_e r)
  1627. {
  1628. // consider static REGs as always hinted, since they are always there
  1629. return ((rcache_hint | rcache_static) & (1 << r));
  1630. }
  1631. static inline int rcache_is_cached(sh2_reg_e r)
  1632. {
  1633. // consider static REGs as always hinted, since they are always there
  1634. return (guest_regs[r].vreg >= 0);
  1635. }
  1636. static inline u32 rcache_used_hreg_mask(void)
  1637. {
  1638. u32 mask = 0;
  1639. int i;
  1640. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1641. if (cache_regs[i].type != HR_FREE)
  1642. mask |= 1 << cache_regs[i].hreg;
  1643. return mask & ~rcache_static;
  1644. }
  1645. static inline u32 rcache_dirty_mask(void)
  1646. {
  1647. u32 mask = 0;
  1648. int i;
  1649. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1650. if (guest_regs[i].flags & GRF_DIRTY)
  1651. mask |= 1 << i;
  1652. mask |= gconst_dirty_mask();
  1653. return mask;
  1654. }
  1655. static inline u32 rcache_reg_mask(void)
  1656. {
  1657. u32 mask = 0;
  1658. int i;
  1659. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1660. if (cache_regs[i].type == HR_CACHED)
  1661. mask |= cache_regs[i].gregs;
  1662. return mask;
  1663. }
  1664. static void rcache_clean_tmp(void)
  1665. {
  1666. int i;
  1667. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1668. if (cache_regs[i].type == HR_CACHED && (cache_regs[i].flags & HRF_TEMP))
  1669. #if REMAP_REGISTER
  1670. rcache_remap_vreg(i);
  1671. #else
  1672. rcache_clean_vreg(i);
  1673. #endif
  1674. }
  1675. static void rcache_clean_mask(u32 mask)
  1676. {
  1677. int i;
  1678. // XXX consider gconst?
  1679. if (!(mask &= ~rcache_static & ~gconst_dirty_mask()))
  1680. return;
  1681. // clean only vregs where all aliases are covered by the mask
  1682. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1683. if (cache_regs[i].type == HR_CACHED &&
  1684. (cache_regs[i].gregs & mask) && !(cache_regs[i].gregs & ~mask))
  1685. rcache_clean_vreg(i);
  1686. }
  1687. static void rcache_clean(void)
  1688. {
  1689. int i;
  1690. gconst_clean();
  1691. for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--)
  1692. if (cache_regs[i].type == HR_CACHED || cache_regs[i].type == HR_STATIC)
  1693. rcache_clean_vreg(i);
  1694. }
  1695. static void rcache_invalidate_tmp(void)
  1696. {
  1697. int i;
  1698. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  1699. if (cache_regs[i].flags & HRF_TEMP) {
  1700. if (cache_regs[i].type == HR_CACHED)
  1701. rcache_unmap_vreg(i);
  1702. cache_regs[i].type = HR_FREE;
  1703. cache_regs[i].flags &= (HRF_TEMP|HRF_REG);
  1704. cache_regs[i].gregs = 0;
  1705. }
  1706. }
  1707. }
  1708. static void rcache_invalidate(void)
  1709. {
  1710. int i;
  1711. for (i = 0; i < ARRAY_SIZE(cache_regs); i++) {
  1712. cache_regs[i].flags &= (HRF_TEMP|HRF_REG);
  1713. if (cache_regs[i].type != HR_STATIC)
  1714. cache_regs[i].type = HR_FREE;
  1715. cache_regs[i].gregs = 0;
  1716. }
  1717. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  1718. guest_regs[i].flags &= GRF_STATIC;
  1719. if (!(guest_regs[i].flags & GRF_STATIC))
  1720. guest_regs[i].vreg = -1;
  1721. else {
  1722. if (guest_regs[i].vreg < 0)
  1723. emith_ctx_read(cache_regs[guest_regs[i].sreg].hreg, i*4);
  1724. else if (guest_regs[i].vreg != guest_regs[i].sreg)
  1725. emith_move_r_r(cache_regs[guest_regs[i].sreg].hreg,
  1726. cache_regs[guest_regs[i].vreg].hreg);
  1727. cache_regs[guest_regs[i].sreg].gregs = 1 << i;
  1728. guest_regs[i].vreg = guest_regs[i].sreg;
  1729. }
  1730. }
  1731. rcache_counter = 0;
  1732. rcache_hint_soon = rcache_hint_late = 0;
  1733. gconst_invalidate();
  1734. }
  1735. static void rcache_flush(void)
  1736. {
  1737. rcache_unlock_all();
  1738. rcache_clean();
  1739. rcache_invalidate();
  1740. }
  1741. static void rcache_init(void)
  1742. {
  1743. static int once = 1;
  1744. int i;
  1745. // init is executed on every rom load, but this must only be executed once...
  1746. if (once) {
  1747. memset(reg_map_host, -1, sizeof(reg_map_host));
  1748. for (i = 0; i < ARRAY_SIZE(cache_regs); i++)
  1749. reg_map_host[cache_regs[i].hreg] = i;
  1750. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1751. if (guest_regs[i].flags & GRF_STATIC) {
  1752. rcache_static |= (1 << i);
  1753. guest_regs[i].sreg = reg_map_host[guest_regs[i].sreg];
  1754. cache_regs[guest_regs[i].sreg].type = HR_STATIC;
  1755. } else
  1756. guest_regs[i].sreg = -1;
  1757. once = 0;
  1758. }
  1759. for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
  1760. if (guest_regs[i].flags & GRF_STATIC) {
  1761. guest_regs[i].vreg = guest_regs[i].sreg;
  1762. cache_regs[guest_regs[i].sreg].gregs = (1 << i);
  1763. }
  1764. rcache_invalidate();
  1765. }
  1766. // ---------------------------------------------------------------
  1767. static int emit_get_rbase_and_offs(SH2 *sh2, u32 a, u32 *offs)
  1768. {
  1769. u32 omask = 0xff; // offset mask, XXX: ARM oriented..
  1770. u32 mask = 0;
  1771. int poffs;
  1772. int hr;
  1773. unsigned long la;
  1774. poffs = dr_ctx_get_mem_ptr(a, &mask);
  1775. if (poffs == -1)
  1776. return -1;
  1777. hr = rcache_get_tmp();
  1778. if (mask < 0x1000) {
  1779. // can't access data array or BIOS directly from ROM or SDRAM,
  1780. // since code may run on both SH2s (tcache_id of translation block needed))
  1781. emith_ctx_read_ptr(hr, poffs);
  1782. if (a & mask & ~omask)
  1783. emith_add_r_r_ptr_imm(hr, hr, a & mask & ~omask);
  1784. *offs = a & omask;
  1785. } else {
  1786. // known fixed host address
  1787. la = (unsigned long)*(void **)((char *)sh2 + poffs) + (a & mask);
  1788. *offs = la & omask;
  1789. emith_move_r_ptr_imm(hr, la & ~omask);
  1790. }
  1791. return hr;
  1792. }
  1793. // read const data from const ROM address
  1794. static int emit_get_rom_data(sh2_reg_e r, u32 offs, int size, u32 *val)
  1795. {
  1796. u32 tmp;
  1797. *val = 0;
  1798. if (gconst_get(r, &tmp)) {
  1799. tmp += offs;
  1800. if (dr_is_rom(tmp)) {
  1801. switch (size & MF_SIZEMASK) {
  1802. case 0: *val = (s8)p32x_sh2_read8(tmp, sh2s); break; // 8
  1803. case 1: *val = (s16)p32x_sh2_read16(tmp, sh2s); break; // 16
  1804. case 2: *val = p32x_sh2_read32(tmp, sh2s); break; // 32
  1805. }
  1806. return 1;
  1807. }
  1808. }
  1809. return 0;
  1810. }
  1811. static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
  1812. {
  1813. #if PROPAGATE_CONSTANTS
  1814. gconst_new(dst, imm);
  1815. #else
  1816. int hr = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  1817. emith_move_r_imm(hr, imm);
  1818. #endif
  1819. }
  1820. static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
  1821. {
  1822. int hr_d, hr_s;
  1823. if (guest_regs[src].vreg >= 0 || gconst_check(src) || rcache_is_hinted(src)) {
  1824. hr_s = rcache_get_reg(src, RC_GR_READ, NULL);
  1825. #if ALIAS_REGISTERS
  1826. // check for aliasing
  1827. int i = guest_regs[src].vreg;
  1828. if (guest_regs[dst].vreg != i) {
  1829. // remove possible old mapping of dst
  1830. if (guest_regs[dst].vreg >= 0)
  1831. rcache_remove_vreg_alias(guest_regs[dst].vreg, dst);
  1832. // make dst an alias of src
  1833. cache_regs[i].gregs |= (1 << dst);
  1834. cache_regs[i].flags |= HRF_DIRTY;
  1835. guest_regs[dst].flags |= GRF_DIRTY;
  1836. guest_regs[dst].vreg = i;
  1837. gconst_kill(dst);
  1838. #if PROPAGATE_CONSTANTS
  1839. gconst_copy(dst, src);
  1840. #endif
  1841. return;
  1842. }
  1843. #endif
  1844. hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  1845. emith_move_r_r(hr_d, hr_s);
  1846. #if PROPAGATE_CONSTANTS
  1847. gconst_copy(dst, src);
  1848. #endif
  1849. } else {
  1850. hr_d = rcache_get_reg(dst, RC_GR_WRITE, NULL);
  1851. emith_ctx_read(hr_d, src * 4);
  1852. }
  1853. }
  1854. // T must be clear, and comparison done just before this
  1855. static void emit_or_t_if_eq(int srr)
  1856. {
  1857. EMITH_SJMP_START(DCOND_NE);
  1858. emith_or_r_imm_c(DCOND_EQ, srr, T);
  1859. EMITH_SJMP_END(DCOND_NE);
  1860. }
  1861. // rd = @(arg0)
  1862. static int emit_memhandler_read(int size)
  1863. {
  1864. rcache_clean_tmp();
  1865. #ifndef DRC_SR_REG
  1866. // must writeback cycles for poll detection stuff
  1867. if (guest_regs[SHR_SR].vreg != -1)
  1868. rcache_evict_vreg(guest_regs[SHR_SR].vreg);
  1869. #endif
  1870. switch (size & MF_SIZEMASK) {
  1871. case 0: emith_call(sh2_drc_read8); break; // 8
  1872. case 1: emith_call(sh2_drc_read16); break; // 16
  1873. case 2: emith_call(sh2_drc_read32); break; // 32
  1874. }
  1875. rcache_invalidate_tmp();
  1876. return rcache_get_tmp_ret();
  1877. }
  1878. // @(arg0) = arg1
  1879. static void emit_memhandler_write(int size)
  1880. {
  1881. rcache_clean_tmp();
  1882. #ifndef DRC_SR_REG
  1883. if (guest_regs[SHR_SR].vreg != -1)
  1884. rcache_evict_vreg(guest_regs[SHR_SR].vreg);
  1885. #endif
  1886. switch (size & MF_SIZEMASK) {
  1887. case 0: emith_call(sh2_drc_write8); break; // 8
  1888. case 1: emith_call(sh2_drc_write16); break; // 16
  1889. case 2: emith_call(sh2_drc_write32); break; // 32
  1890. }
  1891. rcache_invalidate_tmp();
  1892. }
  1893. // rd = @(Rs,#offs); rd < 0 -> return a temp
  1894. static int emit_memhandler_read_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  1895. {
  1896. int hr, hr2;
  1897. u32 val, offs2;
  1898. #if PROPAGATE_CONSTANTS
  1899. if (emit_get_rom_data(rs, offs, size, &val)) {
  1900. if (rd == SHR_TMP) {
  1901. hr2 = rcache_get_tmp();
  1902. emith_move_r_imm(hr2, val);
  1903. } else {
  1904. emit_move_r_imm32(rd, val);
  1905. hr2 = rcache_get_reg(rd, RC_GR_RMW, NULL);
  1906. }
  1907. if ((size & MF_POSTINCR) && gconst_get(rs, &val))
  1908. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  1909. return hr2;
  1910. }
  1911. if (gconst_get(rs, &val)) {
  1912. hr = emit_get_rbase_and_offs(sh2, val + offs, &offs2);
  1913. if (hr != -1) {
  1914. if (rd == SHR_TMP)
  1915. hr2 = rcache_get_tmp();
  1916. else
  1917. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  1918. switch (size & MF_SIZEMASK) {
  1919. case 0: // 8
  1920. emith_read8s_r_r_offs(hr2, hr, offs2 ^ 1);
  1921. break;
  1922. case 1: // 16
  1923. emith_read16s_r_r_offs(hr2, hr, offs2);
  1924. break;
  1925. case 2: // 32
  1926. emith_read_r_r_offs(hr2, hr, offs2);
  1927. emith_ror(hr2, hr2, 16);
  1928. break;
  1929. }
  1930. rcache_free_tmp(hr);
  1931. if (size & MF_POSTINCR)
  1932. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  1933. return hr2;
  1934. }
  1935. }
  1936. #endif
  1937. if (gconst_get(rs, &val) && (!(size & MF_POSTINCR) /*|| !(rcache_hint_soon & (1 << rs))*/)) {
  1938. hr = rcache_get_tmp_arg(0);
  1939. emith_move_r_imm(hr, val + offs);
  1940. if (size & MF_POSTINCR)
  1941. gconst_new(rs, val + (1 << (size & MF_SIZEMASK)));
  1942. } else if (offs || (size & MF_POSTINCR)) {
  1943. hr = rcache_get_reg_arg(0, rs, &hr2);
  1944. if (offs || hr != hr2)
  1945. emith_add_r_r_imm(hr, hr2, offs);
  1946. if (size & MF_POSTINCR) {
  1947. hr = rcache_get_reg(rs, RC_GR_WRITE, NULL);
  1948. emith_add_r_r_imm(hr, hr2, 1 << (size & MF_SIZEMASK));
  1949. }
  1950. } else
  1951. rcache_get_reg_arg(0, rs, NULL);
  1952. hr = emit_memhandler_read(size);
  1953. size &= MF_SIZEMASK;
  1954. if (rd == SHR_TMP)
  1955. hr2 = hr;
  1956. else
  1957. #if REMAP_REGISTER
  1958. hr2 = rcache_map_reg(rd, hr, size != 2 ? RC_GR_RMW : RC_GR_WRITE);
  1959. #else
  1960. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  1961. #endif
  1962. if (rd != SHR_TMP && size != 2) { // 16, 8
  1963. emith_sext(hr2, hr, size ? 16 : 8);
  1964. } else if (hr != hr2) // 32
  1965. emith_move_r_r(hr2, hr);
  1966. if (hr != hr2)
  1967. rcache_free_tmp(hr);
  1968. return hr2;
  1969. }
  1970. // @(Rs,#offs) = rd; rd < 0 -> write arg1
  1971. static void emit_memhandler_write_rr(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
  1972. {
  1973. int hr, hr2;
  1974. u32 val;
  1975. if (rd == SHR_TMP) {
  1976. host_arg2reg(hr2, 1);
  1977. } else if ((size & MF_PREDECR) && rd == rs) { // must avoid caching rd in arg1
  1978. hr2 = rcache_get_reg_arg(1, rd, &hr);
  1979. if (hr != hr2) emith_move_r_r(hr2, hr);
  1980. } else
  1981. hr2 = rcache_get_reg_arg(1, rd, NULL);
  1982. if (gconst_get(rs, &val) && (!(size & MF_PREDECR) /*|| !(rcache_hint_soon & (1 << rs))*/)) {
  1983. if (size & MF_PREDECR) {
  1984. val -= 1 << (size & MF_SIZEMASK);
  1985. gconst_new(rs, val);
  1986. }
  1987. hr = rcache_get_tmp_arg(0);
  1988. emith_move_r_imm(hr, val + offs);
  1989. } else if (offs || (size & MF_PREDECR)) {
  1990. if (size & MF_PREDECR) {
  1991. hr = rcache_get_reg(rs, RC_GR_RMW, &hr2);
  1992. emith_sub_r_r_imm(hr, hr2, 1 << (size & MF_SIZEMASK));
  1993. }
  1994. hr = rcache_get_reg_arg(0, rs, &hr2);
  1995. if (offs || hr != hr2)
  1996. emith_add_r_r_imm(hr, hr2, offs);
  1997. } else
  1998. rcache_get_reg_arg(0, rs, NULL);
  1999. emit_memhandler_write(size);
  2000. }
  2001. // rd = @(Rx,Ry); rd < 0 -> return a temp
  2002. static int emit_indirect_indexed_read(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2003. {
  2004. int hr, hr2;
  2005. int tx, ty;
  2006. #if PROPAGATE_CONSTANTS
  2007. u32 offs;
  2008. if (gconst_get(ry, &offs))
  2009. return emit_memhandler_read_rr(sh2, rd, rx, offs, size);
  2010. if (gconst_get(rx, &offs))
  2011. return emit_memhandler_read_rr(sh2, rd, ry, offs, size);
  2012. #endif
  2013. hr = rcache_get_reg_arg(0, rx, &tx);
  2014. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2015. emith_add_r_r_r(hr, tx, ty);
  2016. hr = emit_memhandler_read(size);
  2017. size &= MF_SIZEMASK;
  2018. if (rd != SHR_TMP)
  2019. #if REMAP_REGISTER
  2020. hr2 = rcache_map_reg(rd, hr, size != 2 ? RC_GR_RMW : RC_GR_WRITE);
  2021. #else
  2022. hr2 = rcache_get_reg(rd, RC_GR_WRITE, NULL);
  2023. #endif
  2024. else
  2025. hr2 = hr;
  2026. if (rd != SHR_TMP && size != 2) { // 16, 8
  2027. emith_sext(hr2, hr, size ? 16 : 8);
  2028. } else if (hr != hr2) // 32
  2029. emith_move_r_r(hr2, hr);
  2030. if (hr != hr2)
  2031. rcache_free_tmp(hr);
  2032. return hr2;
  2033. }
  2034. // @(Rx,Ry) = rd; rd < 0 -> write arg1
  2035. static void emit_indirect_indexed_write(SH2 *sh2, sh2_reg_e rd, sh2_reg_e rx, sh2_reg_e ry, int size)
  2036. {
  2037. int hr, tx, ty;
  2038. #if PROPAGATE_CONSTANTS
  2039. u32 offs;
  2040. if (gconst_get(ry, &offs))
  2041. return emit_memhandler_write_rr(sh2, rd, rx, offs, size);
  2042. if (gconst_get(rx, &offs))
  2043. return emit_memhandler_write_rr(sh2, rd, ry, offs, size);
  2044. #endif
  2045. if (rd != SHR_TMP)
  2046. rcache_get_reg_arg(1, rd, NULL);
  2047. hr = rcache_get_reg_arg(0, rx, &tx);
  2048. ty = rcache_get_reg(ry, RC_GR_READ, NULL);
  2049. emith_add_r_r_r(hr, tx, ty);
  2050. emit_memhandler_write(size);
  2051. }
  2052. // @Rn+,@Rm+
  2053. static void emit_indirect_read_double(SH2 *sh2, int *rnr, int *rmr, sh2_reg_e rn, sh2_reg_e rm, int size)
  2054. {
  2055. int tmp;
  2056. // unlock rn, rm here to avoid REG shortage in MAC operation
  2057. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, rn, 0, size | MF_POSTINCR);
  2058. rcache_unlock(guest_regs[rn].vreg);
  2059. tmp = rcache_save_tmp(tmp);
  2060. *rmr = emit_memhandler_read_rr(sh2, SHR_TMP, rm, 0, size | MF_POSTINCR);
  2061. rcache_unlock(guest_regs[rm].vreg);
  2062. *rnr = rcache_restore_tmp(tmp);
  2063. }
  2064. static void emit_do_static_regs(int is_write, int tmpr)
  2065. {
  2066. int i, r, count;
  2067. for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
  2068. if (guest_regs[i].flags & GRF_STATIC)
  2069. r = cache_regs[guest_regs[i].vreg].hreg;
  2070. else
  2071. continue;
  2072. for (count = 1; i < ARRAY_SIZE(guest_regs) - 1; i++, r++) {
  2073. if ((guest_regs[i + 1].flags & GRF_STATIC) &&
  2074. cache_regs[guest_regs[i + 1].vreg].hreg == r + 1)
  2075. count++;
  2076. else
  2077. break;
  2078. }
  2079. if (count > 1) {
  2080. // i, r point to last item
  2081. if (is_write)
  2082. emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2083. else
  2084. emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
  2085. } else {
  2086. if (is_write)
  2087. emith_ctx_write(r, i * 4);
  2088. else
  2089. emith_ctx_read(r, i * 4);
  2090. }
  2091. }
  2092. }
  2093. /* just after lookup function, jump to address returned */
  2094. static void emit_block_entry(void)
  2095. {
  2096. emith_tst_r_r_ptr(RET_REG, RET_REG);
  2097. EMITH_SJMP_START(DCOND_EQ);
  2098. emith_jump_reg_c(DCOND_NE, RET_REG);
  2099. EMITH_SJMP_END(DCOND_EQ);
  2100. }
  2101. #define DELAY_SAVE_T(sr) { \
  2102. emith_bic_r_imm(sr, T_save); \
  2103. emith_tst_r_imm(sr, T); \
  2104. EMITH_SJMP_START(DCOND_EQ); \
  2105. emith_or_r_imm_c(DCOND_NE, sr, T_save); \
  2106. EMITH_SJMP_END(DCOND_EQ); \
  2107. }
  2108. #define FLUSH_CYCLES(sr) \
  2109. if (cycles > 0) { \
  2110. emith_sub_r_imm(sr, cycles << 12); \
  2111. cycles = 0; \
  2112. }
  2113. static void *dr_get_pc_base(u32 pc, int is_slave);
  2114. static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
  2115. {
  2116. u32 branch_target_pc[MAX_LOCAL_BRANCHES];
  2117. void *branch_target_ptr[MAX_LOCAL_BRANCHES];
  2118. int branch_target_count = 0;
  2119. void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
  2120. u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
  2121. int branch_patch_count = 0;
  2122. u8 op_flags[BLOCK_INSN_LIMIT];
  2123. struct {
  2124. u32 test_irq:1;
  2125. u32 pending_branch_direct:1;
  2126. u32 pending_branch_indirect:1;
  2127. } drcf = { 0, };
  2128. // PC of current, first, last SH2 insn
  2129. u32 pc, base_pc, end_pc;
  2130. u32 base_literals, end_literals;
  2131. void *block_entry_ptr;
  2132. struct block_desc *block;
  2133. struct block_entry *entry;
  2134. u16 *dr_pc_base;
  2135. struct op_data *opd;
  2136. int blkid_main = 0;
  2137. int skip_op = 0;
  2138. int tmp, tmp2;
  2139. int cycles;
  2140. int i, v;
  2141. u32 u;
  2142. int op;
  2143. base_pc = sh2->pc;
  2144. // get base/validate PC
  2145. dr_pc_base = dr_get_pc_base(base_pc, sh2->is_slave);
  2146. if (dr_pc_base == (void *)-1) {
  2147. printf("invalid PC, aborting: %08x\n", base_pc);
  2148. // FIXME: be less destructive
  2149. exit(1);
  2150. }
  2151. // initial passes to disassemble and analyze the block
  2152. scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &base_literals, &end_literals);
  2153. end_literals = dr_check_nolit(base_literals, end_literals, tcache_id);
  2154. if (base_literals == end_literals) // map empty lit section to end of code
  2155. base_literals = end_literals = end_pc;
  2156. // collect branch_targets that don't land on delay slots
  2157. for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
  2158. if (!(op_flags[i] & OF_BTARGET))
  2159. continue;
  2160. if (op_flags[i] & OF_DELAY_OP) {
  2161. op_flags[i] &= ~OF_BTARGET;
  2162. continue;
  2163. }
  2164. ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, break);
  2165. }
  2166. if (branch_target_count > 0) {
  2167. memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
  2168. }
  2169. tcache_ptr = dr_prepare_cache(tcache_id, (end_pc - base_pc) / 2);
  2170. #if (DRC_DEBUG & 4)
  2171. tcache_dsm_ptrs[tcache_id] = tcache_ptr;
  2172. #endif
  2173. block = dr_add_block(base_pc, end_pc - base_pc, base_literals,
  2174. end_literals - base_literals, sh2->is_slave, &blkid_main);
  2175. if (block == NULL)
  2176. return NULL;
  2177. block_entry_ptr = tcache_ptr;
  2178. dbg(2, "== %csh2 block #%d,%d %08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
  2179. tcache_id, blkid_main, base_pc, end_pc, block_entry_ptr);
  2180. // clear stale state after compile errors
  2181. rcache_invalidate();
  2182. // -------------------------------------------------
  2183. // 3rd pass: actual compilation
  2184. pc = base_pc;
  2185. cycles = 0;
  2186. for (i = 0; pc < end_pc; i++)
  2187. {
  2188. u32 delay_dep_fw = 0, delay_dep_bk = 0;
  2189. int tmp3, tmp4;
  2190. int sr;
  2191. opd = &ops[i];
  2192. op = FETCH_OP(pc);
  2193. #if (DRC_DEBUG & 2)
  2194. insns_compiled++;
  2195. #endif
  2196. #if (DRC_DEBUG & 4)
  2197. DasmSH2(sh2dasm_buff, pc, op);
  2198. printf("%c%08x %04x %s\n", (op_flags[i] & OF_BTARGET) ? '*' : ' ',
  2199. pc, op, sh2dasm_buff);
  2200. #endif
  2201. if (op_flags[i] & OF_BTARGET)
  2202. {
  2203. if (pc != base_pc)
  2204. {
  2205. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2206. FLUSH_CYCLES(sr);
  2207. rcache_flush();
  2208. // make block entry
  2209. v = block->entry_count;
  2210. entry = &block->entryp[v];
  2211. if (v < ARRAY_SIZE(block->entryp))
  2212. {
  2213. entry = &block->entryp[v];
  2214. entry->pc = pc;
  2215. entry->tcache_ptr = tcache_ptr;
  2216. entry->links = entry->o_links = NULL;
  2217. #if (DRC_DEBUG & 2)
  2218. entry->block = block;
  2219. #endif
  2220. add_to_hashlist(entry, tcache_id);
  2221. block->entry_count++;
  2222. dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
  2223. sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
  2224. pc, tcache_ptr);
  2225. }
  2226. else {
  2227. dbg(1, "too many entryp for block #%d,%d pc=%08x",
  2228. tcache_id, blkid_main, pc);
  2229. break;
  2230. }
  2231. } else {
  2232. entry = block->entryp;
  2233. }
  2234. // since we made a block entry, link any other blocks that jump to it
  2235. dr_link_blocks(entry, tcache_id);
  2236. if (!tcache_id) // can safely link from cpu-local to global memory
  2237. dr_link_blocks(entry, sh2->is_slave?2:1);
  2238. v = find_in_array(branch_target_pc, branch_target_count, pc);
  2239. if (v >= 0)
  2240. branch_target_ptr[v] = tcache_ptr;
  2241. // must update PC
  2242. emit_move_r_imm32(SHR_PC, pc);
  2243. rcache_clean();
  2244. #if (DRC_DEBUG & 0x10)
  2245. rcache_get_reg_arg(0, SHR_PC, NULL);
  2246. tmp = emit_memhandler_read(1);
  2247. tmp2 = rcache_get_tmp();
  2248. tmp3 = rcache_get_tmp();
  2249. emith_move_r_imm(tmp2, (s16)FETCH_OP(pc));
  2250. emith_move_r_imm(tmp3, 0);
  2251. emith_cmp_r_r(tmp, tmp2);
  2252. EMITH_SJMP_START(DCOND_EQ);
  2253. emith_read_r_r_offs_c(DCOND_NE, tmp3, tmp3, 0); // crash
  2254. EMITH_SJMP_END(DCOND_EQ);
  2255. rcache_free_tmp(tmp);
  2256. rcache_free_tmp(tmp2);
  2257. rcache_free_tmp(tmp3);
  2258. #endif
  2259. // check cycles
  2260. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2261. emith_cmp_r_imm(sr, 0);
  2262. emith_jump_cond(DCOND_LE, sh2_drc_exit);
  2263. #if (DRC_DEBUG & 32)
  2264. // block hit counter
  2265. tmp = rcache_get_tmp_arg(0);
  2266. tmp2 = rcache_get_tmp_arg(1);
  2267. emith_move_r_ptr_imm(tmp, (uptr)entry);
  2268. emith_read_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  2269. emith_add_r_imm(tmp2, 1);
  2270. emith_write_r_r_offs(tmp2, tmp, offsetof(struct block_entry, entry_count));
  2271. rcache_free_tmp(tmp);
  2272. rcache_free_tmp(tmp2);
  2273. #endif
  2274. #if (DRC_DEBUG & (8|256|512|1024))
  2275. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2276. rcache_clean();
  2277. tmp = rcache_used_hreg_mask();
  2278. emith_save_caller_regs(tmp);
  2279. emit_do_static_regs(1, 0);
  2280. rcache_get_reg_arg(2, SHR_SR, NULL);
  2281. tmp2 = rcache_get_tmp_arg(0);
  2282. tmp3 = rcache_get_tmp_arg(1);
  2283. emith_move_r_ptr_imm(tmp2, tcache_ptr);
  2284. emith_move_r_r_ptr(tmp3,CONTEXT_REG);
  2285. emith_call(sh2_drc_log_entry);
  2286. emith_restore_caller_regs(tmp);
  2287. rcache_invalidate_tmp();
  2288. #endif
  2289. do_host_disasm(tcache_id);
  2290. rcache_unlock_all();
  2291. }
  2292. #ifdef DRC_CMP
  2293. if (!(op_flags[i] & OF_DELAY_OP)) {
  2294. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2295. FLUSH_CYCLES(sr);
  2296. rcache_clean();
  2297. tmp = rcache_used_hreg_mask();
  2298. emith_save_caller_regs(tmp);
  2299. emit_do_static_regs(1, 0);
  2300. emith_pass_arg_r(0, CONTEXT_REG);
  2301. emith_call(do_sh2_cmp);
  2302. emith_restore_caller_regs(tmp);
  2303. }
  2304. #endif
  2305. pc += 2;
  2306. if (skip_op > 0) {
  2307. skip_op--;
  2308. continue;
  2309. }
  2310. if (op_flags[i] & OF_DELAY_OP)
  2311. {
  2312. // handle delay slot dependencies
  2313. delay_dep_fw = opd->dest & ops[i-1].source;
  2314. delay_dep_bk = opd->source & ops[i-1].dest;
  2315. if (delay_dep_fw & BITMASK1(SHR_T)) {
  2316. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2317. DELAY_SAVE_T(sr);
  2318. }
  2319. if (delay_dep_bk & BITMASK1(SHR_PC)) {
  2320. if (opd->op != OP_LOAD_POOL && opd->op != OP_MOVA) {
  2321. // can only be those 2 really..
  2322. elprintf_sh2(sh2, EL_ANOMALY,
  2323. "drc: illegal slot insn %04x @ %08x?", op, pc - 2);
  2324. }
  2325. // store PC for MOVA/MOV @PC address calculation
  2326. if (opd->imm != 0)
  2327. ; // case OP_BRANCH - addr already resolved in scan_block
  2328. else {
  2329. switch (ops[i-1].op) {
  2330. case OP_BRANCH:
  2331. emit_move_r_imm32(SHR_PC, ops[i-1].imm);
  2332. break;
  2333. case OP_BRANCH_CT:
  2334. case OP_BRANCH_CF:
  2335. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2336. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  2337. emith_move_r_imm(tmp, pc);
  2338. emith_tst_r_imm(sr, T);
  2339. tmp2 = ops[i-1].op == OP_BRANCH_CT ? DCOND_NE : DCOND_EQ;
  2340. tmp3 = ops[i-1].op == OP_BRANCH_CT ? DCOND_EQ : DCOND_NE;
  2341. EMITH_SJMP_START(tmp3);
  2342. emith_move_r_imm_c(tmp2, tmp, ops[i-1].imm);
  2343. EMITH_SJMP_END(tmp3);
  2344. break;
  2345. case OP_BRANCH_N: // BT/BF known not to be taken
  2346. // XXX could modify opd->imm instead?
  2347. emit_move_r_imm32(SHR_PC, pc);
  2348. break;
  2349. // case OP_BRANCH_R OP_BRANCH_RF - PC already loaded
  2350. }
  2351. }
  2352. }
  2353. //if (delay_dep_fw & ~BITMASK1(SHR_T))
  2354. // dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
  2355. if (delay_dep_bk & ~BITMASK2(SHR_PC, SHR_PR))
  2356. dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
  2357. rcache_set_hint_soon(0);
  2358. rcache_set_hint_late(0);
  2359. }
  2360. else
  2361. {
  2362. // inform cache about future register usage
  2363. u32 late = 0; // regs read by future ops
  2364. u32 write = 0; // regs written to (to detect write before read)
  2365. u32 soon = 0; // regs read soon
  2366. tmp = OP_ISBRANCH(opd[0].op); // branch insn detected
  2367. for (v = 1; v <= 9; v++) {
  2368. // no sense in looking any further than the next rcache flush
  2369. if (pc + 2*v < end_pc && !(op_flags[i+v] & OF_BTARGET) &&
  2370. (!tmp || (op_flags[i+v] & OF_DELAY_OP))) {
  2371. late |= opd[v].source & ~write;
  2372. // ignore source regs after they have been written to
  2373. write |= opd[v].dest;
  2374. } else {
  2375. // upcoming rcache_flush, start writing back unused dirty stuff
  2376. tmp2 = write|opd[0].source|opd[0].dest; // insn may change reg aliases
  2377. rcache_clean_mask(rcache_dirty_mask() & ~tmp2);
  2378. break;
  2379. }
  2380. // XXX must also include test-irq locations!
  2381. tmp |= (OP_ISBRANCH(opd[v].op) || opd[v].op == OP_RTE ||
  2382. opd[v].op == OP_TRAPA || opd[v].op == OP_UNDEFINED);
  2383. // regs needed in the next few instructions
  2384. if (v <= 4)
  2385. soon = late;
  2386. }
  2387. rcache_set_hint_soon(late); // insns 1-3
  2388. rcache_set_hint_late(late & ~soon); // insns 4-9
  2389. }
  2390. rcache_set_locked(opd[0].source); // try not to evict src regs for this op
  2391. switch (opd->op)
  2392. {
  2393. case OP_BRANCH_N:
  2394. // never taken, just use up cycles
  2395. goto end_op;
  2396. case OP_BRANCH:
  2397. case OP_BRANCH_CT:
  2398. case OP_BRANCH_CF:
  2399. if (opd->dest & BITMASK1(SHR_PR))
  2400. emit_move_r_imm32(SHR_PR, pc + 2);
  2401. drcf.pending_branch_direct = 1;
  2402. goto end_op;
  2403. case OP_BRANCH_R:
  2404. if (opd->dest & BITMASK1(SHR_PR))
  2405. emit_move_r_imm32(SHR_PR, pc + 2);
  2406. if (gconst_get(opd->rm, &u)) {
  2407. opd->imm = u;
  2408. drcf.pending_branch_direct = 1;
  2409. } else {
  2410. emit_move_r_r(SHR_PC, opd->rm);
  2411. drcf.pending_branch_indirect = 1;
  2412. }
  2413. goto end_op;
  2414. case OP_BRANCH_RF:
  2415. if (gconst_get(GET_Rn(), &u)) {
  2416. if (opd->dest & BITMASK1(SHR_PR))
  2417. emit_move_r_imm32(SHR_PR, pc + 2);
  2418. opd->imm = pc + 2 + u;
  2419. drcf.pending_branch_direct = 1;
  2420. } else {
  2421. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2422. tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE, NULL);
  2423. emith_move_r_imm(tmp, pc + 2);
  2424. if (opd->dest & BITMASK1(SHR_PR)) {
  2425. tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE, NULL);
  2426. emith_move_r_r(tmp3, tmp);
  2427. }
  2428. emith_add_r_r(tmp, tmp2);
  2429. drcf.pending_branch_indirect = 1;
  2430. }
  2431. goto end_op;
  2432. case OP_SLEEP: // SLEEP 0000000000011011
  2433. printf("TODO sleep\n");
  2434. goto end_op;
  2435. case OP_RTE: // RTE 0000000000101011
  2436. // pop PC
  2437. emit_memhandler_read_rr(sh2, SHR_PC, SHR_SP, 0, 2 | MF_POSTINCR);
  2438. // pop SR
  2439. tmp = emit_memhandler_read_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_POSTINCR);
  2440. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2441. emith_write_sr(sr, tmp);
  2442. rcache_free_tmp(tmp);
  2443. drcf.test_irq = 1;
  2444. drcf.pending_branch_indirect = 1;
  2445. goto end_op;
  2446. case OP_UNDEFINED:
  2447. elprintf_sh2(sh2, EL_ANOMALY, "drc: unhandled op %04x @ %08x", op, pc-2);
  2448. opd->imm = (op_flags[i] & OF_B_IN_DS) ? 6 : 4;
  2449. // fallthrough
  2450. case OP_TRAPA: // TRAPA #imm 11000011iiiiiiii
  2451. // push SR
  2452. tmp = rcache_get_reg_arg(1, SHR_SR, &tmp2);
  2453. emith_clear_msb(tmp, tmp2, 22);
  2454. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  2455. // push PC
  2456. if (op == OP_TRAPA) {
  2457. tmp = rcache_get_tmp_arg(1);
  2458. emith_move_r_imm(tmp, pc);
  2459. } else if (drcf.pending_branch_indirect) {
  2460. tmp = rcache_get_reg_arg(1, SHR_PC, NULL);
  2461. } else {
  2462. tmp = rcache_get_tmp_arg(1);
  2463. emith_move_r_imm(tmp, pc - 2);
  2464. }
  2465. emith_move_r_imm(tmp, pc);
  2466. emit_memhandler_write_rr(sh2, SHR_TMP, SHR_SP, 0, 2 | MF_PREDECR);
  2467. // obtain new PC
  2468. emit_memhandler_read_rr(sh2, SHR_PC, SHR_VBR, opd->imm * 4, 2);
  2469. // indirect jump -> back to dispatcher
  2470. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2471. FLUSH_CYCLES(sr);
  2472. rcache_flush();
  2473. emith_jump(sh2_drc_dispatcher);
  2474. goto end_op;
  2475. case OP_LOAD_POOL:
  2476. #if PROPAGATE_CONSTANTS
  2477. if ((opd->imm && opd->imm >= base_pc && opd->imm < end_literals) ||
  2478. dr_is_rom(opd->imm))
  2479. {
  2480. if (opd->size == 2)
  2481. u = FETCH32(opd->imm);
  2482. else
  2483. u = (s16)FETCH_OP(opd->imm);
  2484. // tweak for Blackthorne: avoid stack overwriting
  2485. if (GET_Rn() == SHR_SP && u == 0x0603f800) u = 0x0603f880;
  2486. gconst_new(GET_Rn(), u);
  2487. }
  2488. else
  2489. #endif
  2490. {
  2491. if (opd->imm != 0) {
  2492. tmp = rcache_get_tmp_arg(0);
  2493. emith_move_r_imm(tmp, opd->imm);
  2494. } else {
  2495. // have to calculate read addr from PC for delay slot
  2496. tmp = rcache_get_reg_arg(0, SHR_PC, &tmp2);
  2497. if (opd->size == 2) {
  2498. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  2499. emith_bic_r_imm(tmp, 3);
  2500. }
  2501. else
  2502. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 2);
  2503. }
  2504. tmp2 = emit_memhandler_read(opd->size);
  2505. #if REMAP_REGISTER
  2506. tmp3 = rcache_map_reg(GET_Rn(), tmp2, opd->size != 2 ? RC_GR_RMW : RC_GR_WRITE);
  2507. #else
  2508. tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  2509. #endif
  2510. if (opd->size != 2) {
  2511. emith_sext(tmp3, tmp2, 16);
  2512. } else if (tmp3 != tmp2)
  2513. emith_move_r_r(tmp3, tmp2);
  2514. if (tmp3 != tmp2)
  2515. rcache_free_tmp(tmp2);
  2516. }
  2517. goto end_op;
  2518. case OP_MOVA: // MOVA @(disp,PC),R0 11000111dddddddd
  2519. if (opd->imm != 0)
  2520. emit_move_r_imm32(SHR_R0, opd->imm);
  2521. else {
  2522. // have to calculate addr from PC for delay slot
  2523. tmp2 = rcache_get_reg(SHR_PC, RC_GR_READ, NULL);
  2524. tmp = rcache_get_reg(SHR_R0, RC_GR_WRITE, NULL);
  2525. emith_add_r_r_imm(tmp, tmp2, 2 + (op & 0xff) * 4);
  2526. emith_bic_r_imm(tmp, 3);
  2527. }
  2528. goto end_op;
  2529. }
  2530. switch ((op >> 12) & 0x0f)
  2531. {
  2532. /////////////////////////////////////////////
  2533. case 0x00:
  2534. switch (op & 0x0f)
  2535. {
  2536. case 0x02:
  2537. switch (GET_Fx())
  2538. {
  2539. case 0: // STC SR,Rn 0000nnnn00000010
  2540. tmp2 = SHR_SR;
  2541. break;
  2542. case 1: // STC GBR,Rn 0000nnnn00010010
  2543. tmp2 = SHR_GBR;
  2544. break;
  2545. case 2: // STC VBR,Rn 0000nnnn00100010
  2546. tmp2 = SHR_VBR;
  2547. break;
  2548. default:
  2549. goto default_;
  2550. }
  2551. if (tmp2 == SHR_SR) {
  2552. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2553. tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  2554. emith_clear_msb(tmp, sr, 22); // reserved bits defined by ISA as 0
  2555. } else
  2556. emit_move_r_r(GET_Rn(), tmp2);
  2557. goto end_op;
  2558. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  2559. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  2560. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  2561. emit_indirect_indexed_write(sh2, GET_Rm(), SHR_R0, GET_Rn(), op & 3);
  2562. goto end_op;
  2563. case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111
  2564. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2565. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2566. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  2567. emith_mul(tmp3, tmp2, tmp);
  2568. goto end_op;
  2569. case 0x08:
  2570. switch (GET_Fx())
  2571. {
  2572. case 0: // CLRT 0000000000001000
  2573. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2574. emith_bic_r_imm(sr, T);
  2575. break;
  2576. case 1: // SETT 0000000000011000
  2577. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2578. emith_or_r_imm(sr, T);
  2579. break;
  2580. case 2: // CLRMAC 0000000000101000
  2581. emit_move_r_imm32(SHR_MACL, 0);
  2582. emit_move_r_imm32(SHR_MACH, 0);
  2583. break;
  2584. default:
  2585. goto default_;
  2586. }
  2587. goto end_op;
  2588. case 0x09:
  2589. switch (GET_Fx())
  2590. {
  2591. case 0: // NOP 0000000000001001
  2592. break;
  2593. case 1: // DIV0U 0000000000011001
  2594. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2595. emith_bic_r_imm(sr, M|Q|T);
  2596. break;
  2597. case 2: // MOVT Rn 0000nnnn00101001
  2598. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2599. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  2600. emith_clear_msb(tmp2, sr, 31);
  2601. break;
  2602. default:
  2603. goto default_;
  2604. }
  2605. goto end_op;
  2606. case 0x0a:
  2607. switch (GET_Fx())
  2608. {
  2609. case 0: // STS MACH,Rn 0000nnnn00001010
  2610. tmp2 = SHR_MACH;
  2611. break;
  2612. case 1: // STS MACL,Rn 0000nnnn00011010
  2613. tmp2 = SHR_MACL;
  2614. break;
  2615. case 2: // STS PR,Rn 0000nnnn00101010
  2616. tmp2 = SHR_PR;
  2617. break;
  2618. default:
  2619. goto default_;
  2620. }
  2621. emit_move_r_r(GET_Rn(), tmp2);
  2622. goto end_op;
  2623. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  2624. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  2625. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  2626. emit_indirect_indexed_read(sh2, GET_Rn(), SHR_R0, GET_Rm(), op & 3);
  2627. goto end_op;
  2628. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  2629. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
  2630. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  2631. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  2632. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  2633. emith_sh2_macl(tmp3, tmp4, tmp, tmp2, sr);
  2634. rcache_free_tmp(tmp2);
  2635. rcache_free_tmp(tmp);
  2636. goto end_op;
  2637. }
  2638. goto default_;
  2639. /////////////////////////////////////////////
  2640. case 0x01: // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  2641. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), (op & 0x0f) * 4, 2);
  2642. goto end_op;
  2643. case 0x02:
  2644. switch (op & 0x0f)
  2645. {
  2646. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  2647. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  2648. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  2649. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, op & 3);
  2650. goto end_op;
  2651. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  2652. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  2653. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  2654. emit_memhandler_write_rr(sh2, GET_Rm(), GET_Rn(), 0, (op & 3) | MF_PREDECR);
  2655. goto end_op;
  2656. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  2657. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2658. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2659. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2660. emith_bic_r_imm(sr, M|Q|T);
  2661. emith_tst_r_imm(tmp2, (1<<31));
  2662. EMITH_SJMP_START(DCOND_EQ);
  2663. emith_or_r_imm_c(DCOND_NE, sr, Q);
  2664. EMITH_SJMP_END(DCOND_EQ);
  2665. emith_tst_r_imm(tmp3, (1<<31));
  2666. EMITH_SJMP_START(DCOND_EQ);
  2667. emith_or_r_imm_c(DCOND_NE, sr, M);
  2668. EMITH_SJMP_END(DCOND_EQ);
  2669. emith_teq_r_r(tmp2, tmp3);
  2670. EMITH_SJMP_START(DCOND_PL);
  2671. emith_or_r_imm_c(DCOND_MI, sr, T);
  2672. EMITH_SJMP_END(DCOND_PL);
  2673. goto end_op;
  2674. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  2675. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2676. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2677. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2678. emith_bic_r_imm(sr, T);
  2679. emith_tst_r_r(tmp2, tmp3);
  2680. emit_or_t_if_eq(sr);
  2681. goto end_op;
  2682. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  2683. if (GET_Rm() != GET_Rn()) {
  2684. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2685. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  2686. emith_and_r_r_r(tmp, tmp3, tmp2);
  2687. }
  2688. goto end_op;
  2689. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  2690. #if PROPAGATE_CONSTANTS
  2691. if (GET_Rn() == GET_Rm()) {
  2692. gconst_new(GET_Rn(), 0);
  2693. goto end_op;
  2694. }
  2695. #endif
  2696. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2697. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  2698. emith_eor_r_r_r(tmp, tmp3, tmp2);
  2699. goto end_op;
  2700. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  2701. if (GET_Rm() != GET_Rn()) {
  2702. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2703. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  2704. emith_or_r_r_r(tmp, tmp3, tmp2);
  2705. }
  2706. goto end_op;
  2707. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  2708. tmp = rcache_get_tmp();
  2709. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2710. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2711. emith_eor_r_r_r(tmp, tmp2, tmp3);
  2712. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2713. emith_bic_r_imm(sr, T);
  2714. emith_tst_r_imm(tmp, 0x000000ff);
  2715. EMITH_SJMP_START(DCOND_EQ);
  2716. emith_tst_r_imm_c(DCOND_NE, tmp, 0x0000ff00);
  2717. EMITH_SJMP_START(DCOND_EQ);
  2718. emith_tst_r_imm_c(DCOND_NE, tmp, 0x00ff0000);
  2719. EMITH_SJMP_START(DCOND_EQ);
  2720. emith_tst_r_imm_c(DCOND_NE, tmp, 0xff000000);
  2721. EMITH_SJMP_END(DCOND_EQ);
  2722. EMITH_SJMP_END(DCOND_EQ);
  2723. EMITH_SJMP_END(DCOND_EQ);
  2724. emit_or_t_if_eq(sr);
  2725. rcache_free_tmp(tmp);
  2726. goto end_op;
  2727. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  2728. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2729. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  2730. emith_lsr(tmp, tmp3, 16);
  2731. emith_or_r_r_lsl(tmp, tmp2, 16);
  2732. goto end_op;
  2733. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  2734. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  2735. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2736. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2737. tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  2738. if (op & 1) {
  2739. emith_sext(tmp, tmp2, 16);
  2740. } else
  2741. emith_clear_msb(tmp, tmp2, 16);
  2742. tmp2 = rcache_get_tmp();
  2743. if (op & 1) {
  2744. emith_sext(tmp2, tmp3, 16);
  2745. } else
  2746. emith_clear_msb(tmp2, tmp3, 16);
  2747. emith_mul(tmp, tmp, tmp2);
  2748. rcache_free_tmp(tmp2);
  2749. goto end_op;
  2750. }
  2751. goto default_;
  2752. /////////////////////////////////////////////
  2753. case 0x03:
  2754. switch (op & 0x0f)
  2755. {
  2756. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  2757. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  2758. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  2759. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  2760. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  2761. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2762. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2763. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2764. emith_bic_r_imm(sr, T);
  2765. emith_cmp_r_r(tmp2, tmp3);
  2766. switch (op & 0x07)
  2767. {
  2768. case 0x00: // CMP/EQ
  2769. emit_or_t_if_eq(sr);
  2770. break;
  2771. case 0x02: // CMP/HS
  2772. EMITH_SJMP_START(DCOND_LO);
  2773. emith_or_r_imm_c(DCOND_HS, sr, T);
  2774. EMITH_SJMP_END(DCOND_LO);
  2775. break;
  2776. case 0x03: // CMP/GE
  2777. EMITH_SJMP_START(DCOND_LT);
  2778. emith_or_r_imm_c(DCOND_GE, sr, T);
  2779. EMITH_SJMP_END(DCOND_LT);
  2780. break;
  2781. case 0x06: // CMP/HI
  2782. EMITH_SJMP_START(DCOND_LS);
  2783. emith_or_r_imm_c(DCOND_HI, sr, T);
  2784. EMITH_SJMP_END(DCOND_LS);
  2785. break;
  2786. case 0x07: // CMP/GT
  2787. EMITH_SJMP_START(DCOND_LE);
  2788. emith_or_r_imm_c(DCOND_GT, sr, T);
  2789. EMITH_SJMP_END(DCOND_LE);
  2790. break;
  2791. }
  2792. goto end_op;
  2793. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  2794. // Q1 = carry(Rn = (Rn << 1) | T)
  2795. // if Q ^ M
  2796. // Q2 = carry(Rn += Rm)
  2797. // else
  2798. // Q2 = carry(Rn -= Rm)
  2799. // Q = M ^ Q1 ^ Q2
  2800. // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
  2801. tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2802. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp);
  2803. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2804. emith_tpop_carry(sr, 0);
  2805. emith_adcf_r_r_r(tmp2, tmp, tmp);
  2806. emith_tpush_carry(sr, 0); // keep Q1 in T for now
  2807. tmp4 = rcache_get_tmp();
  2808. emith_and_r_r_imm(tmp4, sr, M);
  2809. emith_eor_r_r_lsr(sr, tmp4, M_SHIFT - Q_SHIFT); // Q ^= M
  2810. rcache_free_tmp(tmp4);
  2811. // add or sub, invert T if carry to get Q1 ^ Q2
  2812. // in: (Q ^ M) passed in Q, Q1 in T
  2813. emith_sh2_div1_step(tmp2, tmp3, sr);
  2814. emith_bic_r_imm(sr, Q);
  2815. emith_tst_r_imm(sr, M);
  2816. EMITH_SJMP_START(DCOND_EQ);
  2817. emith_or_r_imm_c(DCOND_NE, sr, Q); // Q = M
  2818. EMITH_SJMP_END(DCOND_EQ);
  2819. emith_tst_r_imm(sr, T);
  2820. EMITH_SJMP_START(DCOND_EQ);
  2821. emith_eor_r_imm_c(DCOND_NE, sr, Q); // Q = M ^ Q1 ^ Q2
  2822. EMITH_SJMP_END(DCOND_EQ);
  2823. emith_eor_r_imm(sr, T); // T = !(Q1 ^ Q2)
  2824. goto end_op;
  2825. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  2826. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2827. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2828. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  2829. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  2830. emith_mul_u64(tmp3, tmp4, tmp, tmp2);
  2831. goto end_op;
  2832. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  2833. #if PROPAGATE_CONSTANTS
  2834. if (GET_Rn() == GET_Rm()) {
  2835. gconst_new(GET_Rn(), 0);
  2836. goto end_op;
  2837. }
  2838. #endif
  2839. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  2840. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2841. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  2842. if (op & 4) {
  2843. emith_add_r_r_r(tmp, tmp3, tmp2);
  2844. } else
  2845. emith_sub_r_r_r(tmp, tmp3, tmp2);
  2846. goto end_op;
  2847. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  2848. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  2849. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2850. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  2851. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2852. if (op & 4) { // adc
  2853. emith_tpop_carry(sr, 0);
  2854. emith_adcf_r_r_r(tmp, tmp3, tmp2);
  2855. emith_tpush_carry(sr, 0);
  2856. } else {
  2857. emith_tpop_carry(sr, 1);
  2858. emith_sbcf_r_r_r(tmp, tmp3, tmp2);
  2859. emith_tpush_carry(sr, 1);
  2860. }
  2861. goto end_op;
  2862. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  2863. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  2864. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2865. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  2866. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2867. emith_bic_r_imm(sr, T);
  2868. if (op & 4) {
  2869. emith_addf_r_r_r(tmp, tmp3, tmp2);
  2870. } else
  2871. emith_subf_r_r_r(tmp, tmp3, tmp2);
  2872. EMITH_SJMP_START(DCOND_VC);
  2873. emith_or_r_imm_c(DCOND_VS, sr, T);
  2874. EMITH_SJMP_END(DCOND_VC);
  2875. goto end_op;
  2876. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  2877. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2878. tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  2879. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE, NULL);
  2880. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE, NULL);
  2881. emith_mul_s64(tmp3, tmp4, tmp, tmp2);
  2882. goto end_op;
  2883. }
  2884. goto default_;
  2885. /////////////////////////////////////////////
  2886. case 0x04:
  2887. switch (op & 0x0f)
  2888. {
  2889. case 0x00:
  2890. switch (GET_Fx())
  2891. {
  2892. case 0: // SHLL Rn 0100nnnn00000000
  2893. case 2: // SHAL Rn 0100nnnn00100000
  2894. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  2895. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2896. emith_tpop_carry(sr, 0); // dummy
  2897. emith_lslf(tmp, tmp2, 1);
  2898. emith_tpush_carry(sr, 0);
  2899. goto end_op;
  2900. case 1: // DT Rn 0100nnnn00010000
  2901. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2902. emith_bic_r_imm(sr, T);
  2903. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  2904. emith_subf_r_r_imm(tmp, tmp2, 1);
  2905. emit_or_t_if_eq(sr);
  2906. goto end_op;
  2907. }
  2908. goto default_;
  2909. case 0x01:
  2910. switch (GET_Fx())
  2911. {
  2912. case 0: // SHLR Rn 0100nnnn00000001
  2913. case 2: // SHAR Rn 0100nnnn00100001
  2914. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  2915. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2916. emith_tpop_carry(sr, 0); // dummy
  2917. if (op & 0x20) {
  2918. emith_asrf(tmp, tmp2, 1);
  2919. } else
  2920. emith_lsrf(tmp, tmp2, 1);
  2921. emith_tpush_carry(sr, 0);
  2922. goto end_op;
  2923. case 1: // CMP/PZ Rn 0100nnnn00010001
  2924. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2925. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2926. emith_bic_r_imm(sr, T);
  2927. emith_cmp_r_imm(tmp, 0);
  2928. EMITH_SJMP_START(DCOND_LT);
  2929. emith_or_r_imm_c(DCOND_GE, sr, T);
  2930. EMITH_SJMP_END(DCOND_LT);
  2931. goto end_op;
  2932. }
  2933. goto default_;
  2934. case 0x02:
  2935. case 0x03:
  2936. switch (op & 0x3f)
  2937. {
  2938. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  2939. tmp = SHR_MACH;
  2940. break;
  2941. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  2942. tmp = SHR_MACL;
  2943. break;
  2944. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  2945. tmp = SHR_PR;
  2946. break;
  2947. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  2948. tmp = SHR_SR;
  2949. break;
  2950. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  2951. tmp = SHR_GBR;
  2952. break;
  2953. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  2954. tmp = SHR_VBR;
  2955. break;
  2956. default:
  2957. goto default_;
  2958. }
  2959. tmp3 = rcache_get_reg_arg(1, tmp, &tmp4);
  2960. if (tmp == SHR_SR) {
  2961. emith_clear_msb(tmp3, tmp4, 22); // reserved bits defined by ISA as 0
  2962. } else if (tmp3 != tmp4)
  2963. emith_move_r_r(tmp3, tmp4);
  2964. emit_memhandler_write_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_PREDECR);
  2965. goto end_op;
  2966. case 0x04:
  2967. case 0x05:
  2968. switch (op & 0x3f)
  2969. {
  2970. case 0x04: // ROTL Rn 0100nnnn00000100
  2971. case 0x05: // ROTR Rn 0100nnnn00000101
  2972. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  2973. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2974. emith_tpop_carry(sr, 0); // dummy
  2975. if (op & 1) {
  2976. emith_rorf(tmp, tmp2, 1);
  2977. } else
  2978. emith_rolf(tmp, tmp2, 1);
  2979. emith_tpush_carry(sr, 0);
  2980. goto end_op;
  2981. case 0x24: // ROTCL Rn 0100nnnn00100100
  2982. case 0x25: // ROTCR Rn 0100nnnn00100101
  2983. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, NULL);
  2984. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2985. emith_tpop_carry(sr, 0);
  2986. if (op & 1) {
  2987. emith_rorcf(tmp);
  2988. } else
  2989. emith_rolcf(tmp);
  2990. emith_tpush_carry(sr, 0);
  2991. goto end_op;
  2992. case 0x15: // CMP/PL Rn 0100nnnn00010101
  2993. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  2994. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  2995. emith_bic_r_imm(sr, T);
  2996. emith_cmp_r_imm(tmp, 0);
  2997. EMITH_SJMP_START(DCOND_LE);
  2998. emith_or_r_imm_c(DCOND_GT, sr, T);
  2999. EMITH_SJMP_END(DCOND_LE);
  3000. goto end_op;
  3001. }
  3002. goto default_;
  3003. case 0x06:
  3004. case 0x07:
  3005. switch (op & 0x3f)
  3006. {
  3007. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  3008. tmp = SHR_MACH;
  3009. break;
  3010. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  3011. tmp = SHR_MACL;
  3012. break;
  3013. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  3014. tmp = SHR_PR;
  3015. break;
  3016. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  3017. tmp = SHR_SR;
  3018. break;
  3019. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  3020. tmp = SHR_GBR;
  3021. break;
  3022. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  3023. tmp = SHR_VBR;
  3024. break;
  3025. default:
  3026. goto default_;
  3027. }
  3028. if (tmp == SHR_SR) {
  3029. tmp2 = emit_memhandler_read_rr(sh2, SHR_TMP, GET_Rn(), 0, 2 | MF_POSTINCR);
  3030. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3031. emith_write_sr(sr, tmp2);
  3032. rcache_free_tmp(tmp2);
  3033. drcf.test_irq = 1;
  3034. } else
  3035. emit_memhandler_read_rr(sh2, tmp, GET_Rn(), 0, 2 | MF_POSTINCR);
  3036. goto end_op;
  3037. case 0x08:
  3038. case 0x09:
  3039. switch (GET_Fx())
  3040. {
  3041. case 0: // SHLL2 Rn 0100nnnn00001000
  3042. // SHLR2 Rn 0100nnnn00001001
  3043. tmp = 2;
  3044. break;
  3045. case 1: // SHLL8 Rn 0100nnnn00011000
  3046. // SHLR8 Rn 0100nnnn00011001
  3047. tmp = 8;
  3048. break;
  3049. case 2: // SHLL16 Rn 0100nnnn00101000
  3050. // SHLR16 Rn 0100nnnn00101001
  3051. tmp = 16;
  3052. break;
  3053. default:
  3054. goto default_;
  3055. }
  3056. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp3);
  3057. if (op & 1) {
  3058. emith_lsr(tmp2, tmp3, tmp);
  3059. } else
  3060. emith_lsl(tmp2, tmp3, tmp);
  3061. goto end_op;
  3062. case 0x0a:
  3063. switch (GET_Fx())
  3064. {
  3065. case 0: // LDS Rm,MACH 0100mmmm00001010
  3066. tmp2 = SHR_MACH;
  3067. break;
  3068. case 1: // LDS Rm,MACL 0100mmmm00011010
  3069. tmp2 = SHR_MACL;
  3070. break;
  3071. case 2: // LDS Rm,PR 0100mmmm00101010
  3072. tmp2 = SHR_PR;
  3073. break;
  3074. default:
  3075. goto default_;
  3076. }
  3077. emit_move_r_r(tmp2, GET_Rn());
  3078. goto end_op;
  3079. case 0x0b:
  3080. switch (GET_Fx())
  3081. {
  3082. case 1: // TAS.B @Rn 0100nnnn00011011
  3083. // XXX: is TAS working on 32X?
  3084. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3085. tmp = emit_memhandler_read(0);
  3086. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3087. emith_bic_r_imm(sr, T);
  3088. emith_cmp_r_imm(tmp, 0);
  3089. emit_or_t_if_eq(sr);
  3090. emith_or_r_imm(tmp, 0x80);
  3091. tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
  3092. emith_move_r_r(tmp2, tmp);
  3093. rcache_free_tmp(tmp);
  3094. rcache_get_reg_arg(0, GET_Rn(), NULL);
  3095. emit_memhandler_write(0);
  3096. break;
  3097. default:
  3098. goto default_;
  3099. }
  3100. goto end_op;
  3101. case 0x0e:
  3102. switch (GET_Fx())
  3103. {
  3104. case 0: // LDC Rm,SR 0100mmmm00001110
  3105. tmp2 = SHR_SR;
  3106. break;
  3107. case 1: // LDC Rm,GBR 0100mmmm00011110
  3108. tmp2 = SHR_GBR;
  3109. break;
  3110. case 2: // LDC Rm,VBR 0100mmmm00101110
  3111. tmp2 = SHR_VBR;
  3112. break;
  3113. default:
  3114. goto default_;
  3115. }
  3116. if (tmp2 == SHR_SR) {
  3117. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3118. tmp = rcache_get_reg(GET_Rn(), RC_GR_READ, NULL);
  3119. emith_write_sr(sr, tmp);
  3120. drcf.test_irq = 1;
  3121. } else
  3122. emit_move_r_r(tmp2, GET_Rn());
  3123. goto end_op;
  3124. case 0x0f: // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  3125. emit_indirect_read_double(sh2, &tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
  3126. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3127. tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW, NULL);
  3128. tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW, NULL);
  3129. emith_sh2_macw(tmp3, tmp4, tmp, tmp2, sr);
  3130. rcache_free_tmp(tmp2);
  3131. rcache_free_tmp(tmp);
  3132. goto end_op;
  3133. }
  3134. goto default_;
  3135. /////////////////////////////////////////////
  3136. case 0x05: // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  3137. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2);
  3138. goto end_op;
  3139. /////////////////////////////////////////////
  3140. case 0x06:
  3141. switch (op & 0x0f)
  3142. {
  3143. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  3144. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  3145. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  3146. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  3147. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  3148. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  3149. tmp = ((op & 7) >= 4 && GET_Rn() != GET_Rm()) ? MF_POSTINCR : 0;
  3150. emit_memhandler_read_rr(sh2, GET_Rn(), GET_Rm(), 0, (op & 3) | tmp);
  3151. goto end_op;
  3152. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  3153. emit_move_r_r(GET_Rn(), GET_Rm());
  3154. goto end_op;
  3155. case 0x07 ... 0x0f:
  3156. tmp = rcache_get_reg(GET_Rm(), RC_GR_READ, NULL);
  3157. tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE, NULL);
  3158. switch (op & 0x0f)
  3159. {
  3160. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  3161. emith_mvn_r_r(tmp2, tmp);
  3162. break;
  3163. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  3164. tmp3 = tmp2;
  3165. if (tmp == tmp2)
  3166. tmp3 = rcache_get_tmp();
  3167. tmp4 = rcache_get_tmp();
  3168. emith_lsr(tmp3, tmp, 16);
  3169. emith_or_r_r_lsl(tmp3, tmp, 24);
  3170. emith_and_r_r_imm(tmp4, tmp, 0xff00);
  3171. emith_or_r_r_lsl(tmp3, tmp4, 8);
  3172. emith_rol(tmp2, tmp3, 16);
  3173. rcache_free_tmp(tmp4);
  3174. if (tmp == tmp2)
  3175. rcache_free_tmp(tmp3);
  3176. break;
  3177. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  3178. emith_rol(tmp2, tmp, 16);
  3179. break;
  3180. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  3181. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3182. emith_tpop_carry(sr, 1);
  3183. emith_negcf_r_r(tmp2, tmp);
  3184. emith_tpush_carry(sr, 1);
  3185. break;
  3186. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  3187. emith_neg_r_r(tmp2, tmp);
  3188. break;
  3189. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  3190. emith_clear_msb(tmp2, tmp, 24);
  3191. break;
  3192. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  3193. emith_clear_msb(tmp2, tmp, 16);
  3194. break;
  3195. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  3196. emith_sext(tmp2, tmp, 8);
  3197. break;
  3198. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  3199. emith_sext(tmp2, tmp, 16);
  3200. break;
  3201. }
  3202. goto end_op;
  3203. }
  3204. goto default_;
  3205. /////////////////////////////////////////////
  3206. case 0x07: // ADD #imm,Rn 0111nnnniiiiiiii
  3207. tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW, &tmp2);
  3208. if (op & 0x80) { // adding negative
  3209. emith_sub_r_r_imm(tmp, tmp2, -op & 0xff);
  3210. } else
  3211. emith_add_r_r_imm(tmp, tmp2, op & 0xff);
  3212. goto end_op;
  3213. /////////////////////////////////////////////
  3214. case 0x08:
  3215. switch (op & 0x0f00)
  3216. {
  3217. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  3218. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  3219. tmp = (op & 0x100) >> 8;
  3220. emit_memhandler_write_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  3221. goto end_op;
  3222. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  3223. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  3224. tmp = (op & 0x100) >> 8;
  3225. emit_memhandler_read_rr(sh2, SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
  3226. goto end_op;
  3227. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  3228. tmp2 = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  3229. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3230. emith_bic_r_imm(sr, T);
  3231. emith_cmp_r_imm(tmp2, (s8)(op & 0xff));
  3232. emit_or_t_if_eq(sr);
  3233. goto end_op;
  3234. }
  3235. goto default_;
  3236. /////////////////////////////////////////////
  3237. case 0x0c:
  3238. switch (op & 0x0f00)
  3239. {
  3240. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  3241. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  3242. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  3243. tmp = (op & 0x300) >> 8;
  3244. emit_memhandler_write_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  3245. goto end_op;
  3246. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  3247. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  3248. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  3249. tmp = (op & 0x300) >> 8;
  3250. emit_memhandler_read_rr(sh2, SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
  3251. goto end_op;
  3252. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  3253. tmp = rcache_get_reg(SHR_R0, RC_GR_READ, NULL);
  3254. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3255. emith_bic_r_imm(sr, T);
  3256. emith_tst_r_imm(tmp, op & 0xff);
  3257. emit_or_t_if_eq(sr);
  3258. goto end_op;
  3259. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  3260. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  3261. emith_and_r_r_imm(tmp, tmp2, (op & 0xff));
  3262. goto end_op;
  3263. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  3264. if (op & 0xff) {
  3265. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  3266. emith_eor_r_r_imm(tmp, tmp2, (op & 0xff));
  3267. }
  3268. goto end_op;
  3269. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  3270. if (op & 0xff) {
  3271. tmp = rcache_get_reg(SHR_R0, RC_GR_RMW, &tmp2);
  3272. emith_or_r_r_imm(tmp, tmp2, (op & 0xff));
  3273. }
  3274. goto end_op;
  3275. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  3276. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3277. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3278. emith_bic_r_imm(sr, T);
  3279. emith_tst_r_imm(tmp, op & 0xff);
  3280. emit_or_t_if_eq(sr);
  3281. rcache_free_tmp(tmp);
  3282. goto end_op;
  3283. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  3284. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3285. tmp2 = rcache_get_tmp_arg(1);
  3286. emith_and_r_r_imm(tmp2, tmp, (op & 0xff));
  3287. goto end_rmw_op;
  3288. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  3289. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3290. tmp2 = rcache_get_tmp_arg(1);
  3291. emith_eor_r_r_imm(tmp2, tmp, (op & 0xff));
  3292. goto end_rmw_op;
  3293. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  3294. tmp = emit_indirect_indexed_read(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3295. tmp2 = rcache_get_tmp_arg(1);
  3296. emith_or_r_r_imm(tmp2, tmp, (op & 0xff));
  3297. end_rmw_op:
  3298. rcache_free_tmp(tmp);
  3299. emit_indirect_indexed_write(sh2, SHR_TMP, SHR_R0, SHR_GBR, 0);
  3300. goto end_op;
  3301. }
  3302. goto default_;
  3303. /////////////////////////////////////////////
  3304. case 0x0e: // MOV #imm,Rn 1110nnnniiiiiiii
  3305. emit_move_r_imm32(GET_Rn(), (s8)op);
  3306. goto end_op;
  3307. default:
  3308. default_:
  3309. if (!(op_flags[i] & OF_B_IN_DS)) {
  3310. elprintf_sh2(sh2, EL_ANOMALY,
  3311. "drc: illegal op %04x @ %08x", op, pc - 2);
  3312. exit(1);
  3313. }
  3314. }
  3315. end_op:
  3316. rcache_unlock_all();
  3317. cycles += opd->cycles;
  3318. if (op_flags[i+1] & OF_DELAY_OP) {
  3319. do_host_disasm(tcache_id);
  3320. continue;
  3321. }
  3322. // test irq?
  3323. if (drcf.test_irq && !drcf.pending_branch_direct) {
  3324. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3325. FLUSH_CYCLES(sr);
  3326. if (!drcf.pending_branch_indirect)
  3327. emit_move_r_imm32(SHR_PC, pc);
  3328. rcache_flush();
  3329. emith_call(sh2_drc_test_irq);
  3330. if (pc < end_pc) // mark next insns as entry point for RTE
  3331. op_flags[i+1] |= OF_BTARGET;
  3332. drcf.test_irq = 0;
  3333. }
  3334. // branch handling (with/without delay)
  3335. if (drcf.pending_branch_direct)
  3336. {
  3337. struct op_data *opd_b =
  3338. (op_flags[i] & OF_DELAY_OP) ? opd-1 : opd;
  3339. u32 target_pc = opd_b->imm;
  3340. int cond = -1;
  3341. void *target = NULL;
  3342. int ctaken = 0;
  3343. if (OP_ISBRACND(opd_b->op)) {
  3344. ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
  3345. }
  3346. cycles += ctaken; // assume branch taken
  3347. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3348. FLUSH_CYCLES(sr);
  3349. rcache_clean();
  3350. // emit condition test for conditional branch
  3351. if (OP_ISBRACND(opd_b->op)) {
  3352. cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
  3353. if (delay_dep_fw & BITMASK1(SHR_T))
  3354. emith_tst_r_imm(sr, T_save);
  3355. else
  3356. emith_tst_r_imm(sr, T);
  3357. }
  3358. // no modification of host status/flags between here and branching!
  3359. #if LINK_BRANCHES
  3360. v = find_in_array(branch_target_pc, branch_target_count, target_pc);
  3361. if (v >= 0)
  3362. {
  3363. // local branch
  3364. if (branch_target_ptr[v]) {
  3365. // jumps back can be linked here since host PC is already known
  3366. target = branch_target_ptr[v];
  3367. } else if (branch_patch_count < MAX_LOCAL_BRANCHES) {
  3368. target = tcache_ptr;
  3369. branch_patch_pc[branch_patch_count] = target_pc;
  3370. branch_patch_ptr[branch_patch_count] = target;
  3371. branch_patch_count++;
  3372. }
  3373. else
  3374. dbg(1, "warning: too many local branches");
  3375. }
  3376. #endif
  3377. if (target == NULL)
  3378. {
  3379. // can't resolve branch locally, make a block exit
  3380. emit_move_r_imm32(SHR_PC, target_pc);
  3381. rcache_clean();
  3382. target = dr_prepare_ext_branch(block->entryp, target_pc, sh2->is_slave, tcache_id);
  3383. if (target == NULL)
  3384. return NULL;
  3385. }
  3386. if (cond != -1) {
  3387. emith_jump_cond_patchable(cond, target);
  3388. }
  3389. else {
  3390. emith_jump_patchable(target);
  3391. rcache_invalidate();
  3392. }
  3393. // branch not taken, correct cycle count
  3394. if (ctaken)
  3395. emith_add_r_imm(sr, ctaken << 12);
  3396. drcf.pending_branch_direct = 0;
  3397. }
  3398. else if (drcf.pending_branch_indirect) {
  3399. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3400. FLUSH_CYCLES(sr);
  3401. rcache_flush();
  3402. emith_jump(sh2_drc_dispatcher);
  3403. drcf.pending_branch_indirect = 0;
  3404. }
  3405. do_host_disasm(tcache_id);
  3406. }
  3407. // check the last op
  3408. if (op_flags[i-1] & OF_DELAY_OP)
  3409. opd = &ops[i-2];
  3410. else
  3411. opd = &ops[i-1];
  3412. if (! OP_ISBRAUC(opd->op))
  3413. {
  3414. void *target;
  3415. s32 tmp = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3416. FLUSH_CYCLES(tmp);
  3417. emit_move_r_imm32(SHR_PC, pc);
  3418. rcache_flush();
  3419. target = dr_prepare_ext_branch(block->entryp, pc, sh2->is_slave, tcache_id);
  3420. if (target == NULL)
  3421. return NULL;
  3422. emith_jump_patchable(target);
  3423. }
  3424. // link local branches
  3425. for (i = 0; i < branch_patch_count; i++) {
  3426. void *target;
  3427. int t;
  3428. t = find_in_array(branch_target_pc, branch_target_count, branch_patch_pc[i]);
  3429. target = branch_target_ptr[t];
  3430. if (target == NULL) {
  3431. // flush pc and go back to dispatcher (this should no longer happen)
  3432. dbg(1, "stray branch to %08x %p", branch_patch_pc[i], tcache_ptr);
  3433. target = tcache_ptr;
  3434. emit_move_r_imm32(SHR_PC, branch_patch_pc[i]);
  3435. rcache_flush();
  3436. emith_jump(sh2_drc_dispatcher);
  3437. }
  3438. emith_jump_patch(branch_patch_ptr[i], target);
  3439. }
  3440. dr_mark_memory(1, block, tcache_id, 0);
  3441. tcache_ptrs[tcache_id] = tcache_ptr;
  3442. host_instructions_updated(block_entry_ptr, tcache_ptr);
  3443. do_host_disasm(tcache_id);
  3444. dbg(2, " block #%d,%d -> %p tcache %d/%d, insns %d -> %d %.3f",
  3445. tcache_id, blkid_main, tcache_ptr,
  3446. tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id],
  3447. insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
  3448. if ((sh2->pc & 0xc6000000) == 0x02000000) { // ROM
  3449. dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
  3450. Pico32x.emu_flags |= P32XF_DRC_ROM_C;
  3451. }
  3452. /*
  3453. printf("~~~\n");
  3454. tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
  3455. do_host_disasm(tcache_id);
  3456. printf("~~~\n");
  3457. */
  3458. #if (DRC_DEBUG)
  3459. fflush(stdout);
  3460. #endif
  3461. return block_entry_ptr;
  3462. }
  3463. static void sh2_generate_utils(void)
  3464. {
  3465. int arg0, arg1, arg2, arg3, sr, tmp;
  3466. host_arg2reg(arg0, 0);
  3467. host_arg2reg(arg1, 1);
  3468. host_arg2reg(arg2, 2);
  3469. host_arg2reg(arg3, 3);
  3470. emith_move_r_r(arg0, arg0); // nop
  3471. emith_move_r_r(arg1, arg1); // nop
  3472. emith_move_r_r(arg2, arg2); // nop
  3473. emith_move_r_r(arg3, arg3); // nop
  3474. // sh2_drc_write8(u32 a, u32 d)
  3475. sh2_drc_write8 = (void *)tcache_ptr;
  3476. emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
  3477. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  3478. // sh2_drc_write16(u32 a, u32 d)
  3479. sh2_drc_write16 = (void *)tcache_ptr;
  3480. emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
  3481. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  3482. // sh2_drc_write32(u32 a, u32 d)
  3483. sh2_drc_write32 = (void *)tcache_ptr;
  3484. emith_ctx_read_ptr(arg2, offsetof(SH2, write32_tab));
  3485. emith_sh2_wcall(arg0, arg1, arg2, arg3);
  3486. // d = sh2_drc_read8(u32 a)
  3487. sh2_drc_read8 = (void *)tcache_ptr;
  3488. emith_ctx_read_ptr(arg1, offsetof(SH2, read8_map));
  3489. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  3490. EMITH_SJMP_START(DCOND_CS);
  3491. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  3492. emith_eor_r_imm_c(DCOND_CC, arg0, 1);
  3493. emith_read8_r_r_r_c(DCOND_CC, RET_REG, arg0, arg2);
  3494. emith_ret_c(DCOND_CC);
  3495. EMITH_SJMP_END(DCOND_CS);
  3496. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  3497. emith_jump_reg(arg2);
  3498. // d = sh2_drc_read16(u32 a)
  3499. sh2_drc_read16 = (void *)tcache_ptr;
  3500. emith_ctx_read_ptr(arg1, offsetof(SH2, read16_map));
  3501. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  3502. EMITH_SJMP_START(DCOND_CS);
  3503. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  3504. emith_read16_r_r_r_c(DCOND_CC, RET_REG, arg0, arg2);
  3505. emith_ret_c(DCOND_CC);
  3506. EMITH_SJMP_END(DCOND_CS);
  3507. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  3508. emith_jump_reg(arg2);
  3509. // d = sh2_drc_read32(u32 a)
  3510. sh2_drc_read32 = (void *)tcache_ptr;
  3511. emith_ctx_read_ptr(arg1, offsetof(SH2, read32_map));
  3512. emith_sh2_rcall(arg0, arg1, arg2, arg3);
  3513. EMITH_SJMP_START(DCOND_CS);
  3514. emith_and_r_r_c(DCOND_CC, arg0, arg3);
  3515. emith_read_r_r_r_c(DCOND_CC, RET_REG, arg0, arg2);
  3516. emith_ror_c(DCOND_CC, RET_REG, RET_REG, 16);
  3517. emith_ret_c(DCOND_CC);
  3518. EMITH_SJMP_END(DCOND_CS);
  3519. emith_move_r_r_ptr(arg1, CONTEXT_REG);
  3520. emith_jump_reg(arg2);
  3521. // sh2_drc_exit(void)
  3522. sh2_drc_exit = (void *)tcache_ptr;
  3523. emit_do_static_regs(1, arg2);
  3524. emith_sh2_drc_exit();
  3525. // sh2_drc_dispatcher(void)
  3526. sh2_drc_dispatcher = (void *)tcache_ptr;
  3527. emith_ctx_read(arg0, SHR_PC * 4);
  3528. #if BRANCH_CACHE
  3529. // check if PC is in branch target cache
  3530. emith_and_r_r_imm(arg1, arg0, (ARRAY_SIZE(sh2s->branch_cache)-1)*4);
  3531. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 2 : 1);
  3532. emith_read_r_r_offs(arg2, arg1, offsetof(SH2, branch_cache));
  3533. emith_cmp_r_r(arg2, arg0);
  3534. EMITH_SJMP_START(DCOND_NE);
  3535. emith_read_r_r_offs_ptr_c(DCOND_EQ, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  3536. emith_jump_reg_c(DCOND_EQ, RET_REG);
  3537. EMITH_SJMP_END(DCOND_NE);
  3538. #endif
  3539. emith_ctx_read(arg1, offsetof(SH2, is_slave));
  3540. emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
  3541. emith_call(dr_lookup_block);
  3542. #if BRANCH_CACHE
  3543. // store PC and block entry ptr (in arg0) in branch target cache
  3544. emith_tst_r_r_ptr(RET_REG, RET_REG);
  3545. EMITH_SJMP_START(DCOND_EQ);
  3546. emith_ctx_read_c(DCOND_NE, arg2, SHR_PC * 4);
  3547. emith_and_r_r_imm(arg1, arg2, (ARRAY_SIZE(sh2s->branch_cache)-1)*4);
  3548. emith_add_r_r_r_lsl_ptr(arg1, CONTEXT_REG, arg1, sizeof(void *) == 8 ? 2 : 1);
  3549. emith_write_r_r_offs_c(DCOND_NE, arg2, arg1, offsetof(SH2, branch_cache));
  3550. emith_write_r_r_offs_ptr_c(DCOND_NE, RET_REG, arg1, offsetof(SH2, branch_cache) + sizeof(void *));
  3551. EMITH_SJMP_END(DCOND_EQ);
  3552. #endif
  3553. emit_block_entry();
  3554. // lookup failed, call sh2_translate()
  3555. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  3556. emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
  3557. emith_call(sh2_translate);
  3558. emit_block_entry();
  3559. // XXX: can't translate, fail
  3560. emith_call(dr_failure);
  3561. // sh2_drc_test_irq(void)
  3562. // assumes it's called from main function (may jump to dispatcher)
  3563. sh2_drc_test_irq = (void *)tcache_ptr;
  3564. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  3565. sr = rcache_get_reg(SHR_SR, RC_GR_READ, NULL);
  3566. emith_lsr(arg0, sr, I_SHIFT);
  3567. emith_and_r_imm(arg0, 0x0f);
  3568. emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
  3569. EMITH_SJMP_START(DCOND_GT);
  3570. emith_ret_c(DCOND_LE); // nope, return
  3571. EMITH_SJMP_END(DCOND_GT);
  3572. // adjust SP
  3573. tmp = rcache_get_reg(SHR_SP, RC_GR_RMW, NULL);
  3574. emith_sub_r_imm(tmp, 4*2);
  3575. rcache_clean();
  3576. // push SR
  3577. tmp = rcache_get_reg_arg(0, SHR_SP, NULL);
  3578. emith_add_r_imm(tmp, 4);
  3579. tmp = rcache_get_reg_arg(1, SHR_SR, NULL);
  3580. emith_clear_msb(tmp, tmp, 22);
  3581. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  3582. emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
  3583. rcache_invalidate();
  3584. // push PC
  3585. rcache_get_reg_arg(0, SHR_SP, NULL);
  3586. emith_ctx_read(arg1, SHR_PC * 4);
  3587. emith_move_r_r_ptr(arg2, CONTEXT_REG);
  3588. emith_call(p32x_sh2_write32);
  3589. rcache_invalidate();
  3590. // update I, cycles, do callback
  3591. emith_ctx_read(arg1, offsetof(SH2, pending_level));
  3592. sr = rcache_get_reg(SHR_SR, RC_GR_RMW, NULL);
  3593. emith_bic_r_imm(sr, I);
  3594. emith_or_r_r_lsl(sr, arg1, I_SHIFT);
  3595. emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
  3596. rcache_flush();
  3597. emith_move_r_r_ptr(arg0, CONTEXT_REG);
  3598. emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
  3599. // obtain new PC
  3600. emith_lsl(arg0, RET_REG, 2);
  3601. emith_ctx_read(arg1, SHR_VBR * 4);
  3602. emith_add_r_r(arg0, arg1);
  3603. tmp = emit_memhandler_read(2);
  3604. emith_ctx_write(tmp, SHR_PC * 4);
  3605. #if defined(__i386__) || defined(__x86_64__)
  3606. emith_add_r_r_ptr_imm(xSP, xSP, sizeof(void *)); // fix stack
  3607. #endif
  3608. emith_jump(sh2_drc_dispatcher);
  3609. rcache_invalidate();
  3610. // sh2_drc_entry(SH2 *sh2)
  3611. sh2_drc_entry = (void *)tcache_ptr;
  3612. emith_sh2_drc_entry();
  3613. emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
  3614. emit_do_static_regs(0, arg2);
  3615. emith_call(sh2_drc_test_irq);
  3616. emith_jump(sh2_drc_dispatcher);
  3617. #ifdef PDB_NET
  3618. // debug
  3619. #define MAKE_READ_WRAPPER(func) { \
  3620. void *tmp = (void *)tcache_ptr; \
  3621. emith_push_ret(); \
  3622. emith_call(func); \
  3623. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  3624. emith_addf_r_r(arg2, arg0); \
  3625. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  3626. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  3627. emith_adc_r_imm(arg2, 0x01000000); \
  3628. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  3629. emith_pop_and_ret(); \
  3630. func = tmp; \
  3631. }
  3632. #define MAKE_WRITE_WRAPPER(func) { \
  3633. void *tmp = (void *)tcache_ptr; \
  3634. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
  3635. emith_addf_r_r(arg2, arg1); \
  3636. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
  3637. emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
  3638. emith_adc_r_imm(arg2, 0x01000000); \
  3639. emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
  3640. emith_move_r_r_ptr(arg2, CONTEXT_REG); \
  3641. emith_jump(func); \
  3642. func = tmp; \
  3643. }
  3644. MAKE_READ_WRAPPER(sh2_drc_read8);
  3645. MAKE_READ_WRAPPER(sh2_drc_read16);
  3646. MAKE_READ_WRAPPER(sh2_drc_read32);
  3647. MAKE_WRITE_WRAPPER(sh2_drc_write8);
  3648. MAKE_WRITE_WRAPPER(sh2_drc_write16);
  3649. MAKE_WRITE_WRAPPER(sh2_drc_write32);
  3650. #endif
  3651. rcache_invalidate();
  3652. #if (DRC_DEBUG & 4)
  3653. host_dasm_new_symbol(sh2_drc_entry);
  3654. host_dasm_new_symbol(sh2_drc_dispatcher);
  3655. host_dasm_new_symbol(sh2_drc_exit);
  3656. host_dasm_new_symbol(sh2_drc_test_irq);
  3657. host_dasm_new_symbol(sh2_drc_write8);
  3658. host_dasm_new_symbol(sh2_drc_write16);
  3659. host_dasm_new_symbol(sh2_drc_write32);
  3660. host_dasm_new_symbol(sh2_drc_read8);
  3661. host_dasm_new_symbol(sh2_drc_read16);
  3662. host_dasm_new_symbol(sh2_drc_read32);
  3663. #endif
  3664. }
  3665. static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 nolit)
  3666. {
  3667. struct block_link *bl;
  3668. u32 i;
  3669. dbg(2, " killing entry %08x-%08x,%08x-%08x, blkid %d,%d",
  3670. bd->addr, bd->addr + bd->size, bd->addr_lit, bd->addr_lit + bd->size_lit,
  3671. tcache_id, bd - block_tables[tcache_id]);
  3672. if (bd->addr == 0 || bd->entry_count == 0) {
  3673. dbg(1, " killing dead block!? %08x", bd->addr);
  3674. return;
  3675. }
  3676. // remove from hash table, make incoming links unresolved, revoke outgoing links
  3677. for (i = 0; i < bd->entry_count; i++) {
  3678. if (bd->active)
  3679. rm_from_hashlist(&bd->entryp[i], tcache_id);
  3680. for (bl = bd->entryp[i].o_links; bl != NULL; ) {
  3681. struct block_link *bl_next = bl->o_next;
  3682. if (bl->target) {
  3683. if (bl->prev)
  3684. bl->prev->next = bl->next;
  3685. else
  3686. bl->target->links = bl->next;
  3687. if (bl->next)
  3688. bl->next->prev = bl->prev;
  3689. bl->target = NULL;
  3690. } else if (bd->active)
  3691. rm_from_hashlist_unresolved(bl, tcache_id);
  3692. // free bl
  3693. bl->jump = NULL;
  3694. bl->next = blink_free[bl->tcache_id];
  3695. blink_free[bl->tcache_id] = bl;
  3696. bl = bl_next;
  3697. }
  3698. bd->entryp[i].o_links = NULL;
  3699. for (bl = bd->entryp[i].links; bl != NULL; ) {
  3700. struct block_link *bl_next = bl->next;
  3701. dbg(2, "- unlink from %p to pc %08x", bl->jump, bl->target_pc);
  3702. emith_jump_patch(bl->jump, sh2_drc_dispatcher);
  3703. // update cpu caches since the previous jump target doesn't exist anymore
  3704. host_instructions_updated(bl->jump, bl->jump+4);
  3705. add_to_hashlist_unresolved(bl, tcache_id);
  3706. bl = bl_next;
  3707. }
  3708. bd->entryp[i].links = NULL;
  3709. }
  3710. if (bd->active)
  3711. dr_mark_memory(-1, bd, tcache_id, nolit);
  3712. bd->addr = bd->size = bd->addr_lit = bd->size_lit = 0;
  3713. bd->entry_count = 0;
  3714. bd->active = 0;
  3715. rm_from_block_lists(bd);
  3716. }
  3717. static void sh2_smc_rm_blocks(u32 a, int tcache_id, u32 shift)
  3718. {
  3719. struct block_list **blist, *entry, *next;
  3720. u32 mask = ram_sizes[tcache_id] - 1;
  3721. u32 wtmask = ~0x20000000; // writethrough area mask
  3722. u32 start_addr, end_addr;
  3723. u32 start_lit, end_lit;
  3724. struct block_desc *block;
  3725. #if (DRC_DEBUG & 2)
  3726. int removed = 0;
  3727. #endif
  3728. // need to check cached and writethrough area
  3729. a &= wtmask;
  3730. blist = &inval_lookup[tcache_id][(a & mask) / INVAL_PAGE_SIZE];
  3731. entry = *blist;
  3732. while (entry != NULL) {
  3733. next = entry->next;
  3734. block = entry->block;
  3735. start_addr = block->addr & wtmask;
  3736. end_addr = start_addr + block->size;
  3737. start_lit = block->addr_lit & wtmask;
  3738. end_lit = start_lit + block->size_lit;
  3739. if ((start_addr <= a && a < end_addr) ||
  3740. (start_lit <= a && a < end_lit))
  3741. {
  3742. dbg(2, "smc remove @%08x", a);
  3743. end_addr = (start_lit <= a && block->size_lit ? a : 0);
  3744. sh2_smc_rm_block_entry(block, tcache_id, end_addr);
  3745. #if (DRC_DEBUG & 2)
  3746. removed = 1;
  3747. #endif
  3748. }
  3749. entry = next;
  3750. }
  3751. #if (DRC_DEBUG & 2)
  3752. if (!removed)
  3753. dbg(2, "rm_blocks called @%08x, no work?", a);
  3754. #endif
  3755. #if BRANCH_CACHE
  3756. if (tcache_id)
  3757. memset32(sh2s[tcache_id-1].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  3758. else {
  3759. memset32(sh2s[0].branch_cache, -1, sizeof(sh2s[0].branch_cache)/4);
  3760. memset32(sh2s[1].branch_cache, -1, sizeof(sh2s[1].branch_cache)/4);
  3761. }
  3762. #endif
  3763. }
  3764. void sh2_drc_wcheck_ram(unsigned int a, int val, SH2 *sh2)
  3765. {
  3766. dbg(2, "%csh2 smc check @%08x v=%d", sh2->is_slave ? 's' : 'm', a, val);
  3767. sh2_smc_rm_blocks(a, 0, SH2_DRCBLK_RAM_SHIFT);
  3768. }
  3769. void sh2_drc_wcheck_da(unsigned int a, int val, SH2 *sh2)
  3770. {
  3771. int cpuid = sh2->is_slave;
  3772. dbg(2, "%csh2 smc check @%08x v=%d", cpuid ? 's' : 'm', a, val);
  3773. sh2_smc_rm_blocks(a, 1 + cpuid, SH2_DRCBLK_DA_SHIFT);
  3774. }
  3775. int sh2_execute_drc(SH2 *sh2c, int cycles)
  3776. {
  3777. int ret_cycles;
  3778. // cycles are kept in SHR_SR unused bits (upper 20)
  3779. // bit11 contains T saved for delay slot
  3780. // others are usual SH2 flags
  3781. sh2c->sr &= 0x3f3;
  3782. sh2c->sr |= cycles << 12;
  3783. sh2_drc_entry(sh2c);
  3784. // TODO: irq cycles
  3785. ret_cycles = (signed int)sh2c->sr >> 12;
  3786. if (ret_cycles > 0)
  3787. dbg(1, "warning: drc returned with cycles: %d", ret_cycles);
  3788. sh2c->sr &= 0x3f3;
  3789. return ret_cycles;
  3790. }
  3791. static void block_stats(void)
  3792. {
  3793. #if (DRC_DEBUG & 2)
  3794. int c, b, i;
  3795. long total = 0;
  3796. printf("block stats:\n");
  3797. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  3798. for (i = 0; i < block_counts[b]; i++)
  3799. if (block_tables[b][i].addr != 0)
  3800. total += block_tables[b][i].refcount;
  3801. for (i = block_limit[b]; i < block_max_counts[b]; i++)
  3802. if (block_tables[b][i].addr != 0)
  3803. total += block_tables[b][i].refcount;
  3804. }
  3805. printf("total: %ld\n",total);
  3806. for (c = 0; c < 20; c++) {
  3807. struct block_desc *blk, *maxb = NULL;
  3808. int max = 0;
  3809. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  3810. for (i = 0; i < block_counts[b]; i++) {
  3811. blk = &block_tables[b][i];
  3812. if (blk->addr != 0 && blk->refcount > max) {
  3813. max = blk->refcount;
  3814. maxb = blk;
  3815. }
  3816. }
  3817. for (i = block_limit[b]; i < block_max_counts[b]; i++) {
  3818. blk = &block_tables[b][i];
  3819. if (blk->addr != 0 && blk->refcount > max) {
  3820. max = blk->refcount;
  3821. maxb = blk;
  3822. }
  3823. }
  3824. }
  3825. if (maxb == NULL)
  3826. break;
  3827. printf("%08x %p %9d %2.3f%%\n", maxb->addr, maxb->tcache_ptr, maxb->refcount,
  3828. (double)maxb->refcount / total * 100.0);
  3829. maxb->refcount = 0;
  3830. }
  3831. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  3832. for (i = 0; i < block_counts[b]; i++)
  3833. block_tables[b][i].refcount = 0;
  3834. for (i = block_limit[b]; i < block_max_counts[b]; i++)
  3835. block_tables[b][i].refcount = 0;
  3836. }
  3837. #endif
  3838. }
  3839. void entry_stats(void)
  3840. {
  3841. #if (DRC_DEBUG & 32)
  3842. int c, b, i, j;
  3843. long total = 0;
  3844. printf("block entry stats:\n");
  3845. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  3846. for (i = 0; i < block_counts[b]; i++)
  3847. for (j = 0; j < block_tables[b][i].entry_count; j++)
  3848. total += block_tables[b][i].entryp[j].entry_count;
  3849. for (i = block_limit[b]; i < block_max_counts[b]; i++)
  3850. for (j = 0; j < block_tables[b][i].entry_count; j++)
  3851. total += block_tables[b][i].entryp[j].entry_count;
  3852. }
  3853. printf("total: %ld\n",total);
  3854. for (c = 0; c < 20; c++) {
  3855. struct block_desc *blk;
  3856. struct block_entry *maxb = NULL;
  3857. int max = 0;
  3858. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  3859. for (i = 0; i < block_counts[b]; i++) {
  3860. blk = &block_tables[b][i];
  3861. for (j = 0; j < blk->entry_count; j++)
  3862. if (blk->entryp[j].entry_count > max) {
  3863. max = blk->entryp[j].entry_count;
  3864. maxb = &blk->entryp[j];
  3865. }
  3866. }
  3867. for (i = block_limit[b]; i < block_max_counts[b]; i++) {
  3868. blk = &block_tables[b][i];
  3869. for (j = 0; j < blk->entry_count; j++)
  3870. if (blk->entryp[j].entry_count > max) {
  3871. max = blk->entryp[j].entry_count;
  3872. maxb = &blk->entryp[j];
  3873. }
  3874. }
  3875. }
  3876. if (maxb == NULL)
  3877. break;
  3878. printf("%08x %p %9d %2.3f%%\n", maxb->pc, maxb->tcache_ptr, maxb->entry_count,
  3879. (double)100 * maxb->entry_count / total);
  3880. maxb->entry_count = 0;
  3881. }
  3882. for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
  3883. for (i = 0; i < block_counts[b]; i++)
  3884. for (j = 0; j < block_tables[b][i].entry_count; j++)
  3885. block_tables[b][i].entryp[j].entry_count = 0;
  3886. for (i = block_limit[b]; i < block_max_counts[b]; i++)
  3887. for (j = 0; j < block_tables[b][i].entry_count; j++)
  3888. block_tables[b][i].entryp[j].entry_count = 0;
  3889. }
  3890. #endif
  3891. }
  3892. static void backtrace(void)
  3893. {
  3894. #if (DRC_DEBUG & 1024)
  3895. int i;
  3896. printf("backtrace master:\n");
  3897. for (i = 0; i < ARRAY_SIZE(csh2[0]); i++)
  3898. SH2_DUMP(&csh2[0][i], "bt msh2");
  3899. printf("backtrace slave:\n");
  3900. for (i = 0; i < ARRAY_SIZE(csh2[1]); i++)
  3901. SH2_DUMP(&csh2[1][i], "bt ssh2");
  3902. #endif
  3903. }
  3904. static void state_dump(void)
  3905. {
  3906. #if (DRC_DEBUG & 2048)
  3907. int i;
  3908. SH2_DUMP(&sh2s[0], "master");
  3909. printf("VBR msh2: %x\n", sh2s[0].vbr);
  3910. for (i = 0; i < 0x60; i++) {
  3911. printf("%08x ",p32x_sh2_read32(sh2s[0].vbr + i*4, &sh2s[0]));
  3912. if ((i+1) % 8 == 0) printf("\n");
  3913. }
  3914. printf("stack msh2: %x\n", sh2s[0].r[15]);
  3915. for (i = -0x30; i < 0x30; i++) {
  3916. printf("%08x ",p32x_sh2_read32(sh2s[0].r[15] + i*4, &sh2s[0]));
  3917. if ((i+1) % 8 == 0) printf("\n");
  3918. }
  3919. printf("branch cache master:\n");
  3920. for (i = 0; i < ARRAY_SIZE(sh2s[0].branch_cache); i++) {
  3921. printf("%08x ",sh2s[0].branch_cache[i].pc);
  3922. if ((i+1) % 8 == 0) printf("\n");
  3923. }
  3924. SH2_DUMP(&sh2s[1], "slave");
  3925. printf("VBR ssh2: %x\n", sh2s[1].vbr);
  3926. for (i = 0; i < 0x60; i++) {
  3927. printf("%08x ",p32x_sh2_read32(sh2s[1].vbr + i*4, &sh2s[1]));
  3928. if ((i+1) % 8 == 0) printf("\n");
  3929. }
  3930. printf("stack ssh2: %x\n", sh2s[1].r[15]);
  3931. for (i = -0x30; i < 0x30; i++) {
  3932. printf("%08x ",p32x_sh2_read32(sh2s[1].r[15] + i*4, &sh2s[1]));
  3933. if ((i+1) % 8 == 0) printf("\n");
  3934. }
  3935. printf("branch cache slave:\n");
  3936. for (i = 0; i < ARRAY_SIZE(sh2s[1].branch_cache); i++) {
  3937. printf("%08x ",sh2s[1].branch_cache[i].pc);
  3938. if ((i+1) % 8 == 0) printf("\n");
  3939. }
  3940. #endif
  3941. }
  3942. void sh2_drc_flush_all(void)
  3943. {
  3944. backtrace();
  3945. state_dump();
  3946. block_stats();
  3947. entry_stats();
  3948. flush_tcache(0);
  3949. flush_tcache(1);
  3950. flush_tcache(2);
  3951. Pico32x.emu_flags &= ~P32XF_DRC_ROM_C;
  3952. }
  3953. void sh2_drc_mem_setup(SH2 *sh2)
  3954. {
  3955. // fill the convenience pointers
  3956. sh2->p_bios = sh2->is_slave ? Pico32xMem->sh2_rom_s.w : Pico32xMem->sh2_rom_m.w;
  3957. sh2->p_da = sh2->data_array;
  3958. sh2->p_sdram = Pico32xMem->sdram;
  3959. sh2->p_rom = Pico.rom;
  3960. // sh2->p_dram filled in dram bank switching
  3961. sh2->p_drcblk_da = Pico32xMem->drcblk_da[!!sh2->is_slave];
  3962. sh2->p_drcblk_ram = Pico32xMem->drcblk_ram;
  3963. }
  3964. void sh2_drc_frame(void)
  3965. {
  3966. }
  3967. int sh2_drc_init(SH2 *sh2)
  3968. {
  3969. int i;
  3970. if (block_tables[0] == NULL)
  3971. {
  3972. for (i = 0; i < TCACHE_BUFFERS; i++) {
  3973. block_tables[i] = calloc(block_max_counts[i], sizeof(*block_tables[0]));
  3974. if (block_tables[i] == NULL)
  3975. goto fail;
  3976. // max 2 block links (exits) per block
  3977. block_link_pool[i] = calloc(block_link_pool_max_counts[i],
  3978. sizeof(*block_link_pool[0]));
  3979. if (block_link_pool[i] == NULL)
  3980. goto fail;
  3981. inval_lookup[i] = calloc(ram_sizes[i] / INVAL_PAGE_SIZE,
  3982. sizeof(inval_lookup[0]));
  3983. if (inval_lookup[i] == NULL)
  3984. goto fail;
  3985. hash_tables[i] = calloc(hash_table_sizes[i], sizeof(*hash_tables[0]));
  3986. if (hash_tables[i] == NULL)
  3987. goto fail;
  3988. unresolved_links[i] = calloc(hash_table_sizes[i], sizeof(*unresolved_links[0]));
  3989. if (unresolved_links[i] == NULL)
  3990. goto fail;
  3991. }
  3992. memset(block_counts, 0, sizeof(block_counts));
  3993. for (i = 0; i < ARRAY_SIZE(block_counts); i++) {
  3994. block_limit[i] = block_max_counts[i] - 1;
  3995. }
  3996. memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
  3997. for (i = 0; i < ARRAY_SIZE(blink_free); i++) {
  3998. blink_free[i] = NULL;
  3999. }
  4000. drc_cmn_init();
  4001. rcache_init();
  4002. tcache_ptr = tcache;
  4003. sh2_generate_utils();
  4004. host_instructions_updated(tcache, tcache_ptr);
  4005. tcache_bases[0] = tcache_ptrs[0] = tcache_ptr;
  4006. tcache_limit[0] = tcache_bases[0] + tcache_sizes[0] - (tcache_ptr-tcache);
  4007. for (i = 1; i < ARRAY_SIZE(tcache_bases); i++) {
  4008. tcache_bases[i] = tcache_ptrs[i] = tcache_bases[i - 1] + tcache_sizes[i - 1];
  4009. tcache_limit[i] = tcache_bases[i] + tcache_sizes[i];
  4010. }
  4011. #if (DRC_DEBUG & 4)
  4012. for (i = 0; i < ARRAY_SIZE(block_tables); i++)
  4013. tcache_dsm_ptrs[i] = tcache_bases[i];
  4014. // disasm the utils
  4015. tcache_dsm_ptrs[0] = tcache;
  4016. do_host_disasm(0);
  4017. fflush(stdout);
  4018. #endif
  4019. #if (DRC_DEBUG & 1)
  4020. hash_collisions = 0;
  4021. #endif
  4022. }
  4023. memset(sh2->branch_cache, -1, sizeof(sh2->branch_cache));
  4024. return 0;
  4025. fail:
  4026. sh2_drc_finish(sh2);
  4027. return -1;
  4028. }
  4029. void sh2_drc_finish(SH2 *sh2)
  4030. {
  4031. struct block_list *bl, *bn;
  4032. int i;
  4033. if (block_tables[0] == NULL)
  4034. return;
  4035. sh2_drc_flush_all();
  4036. for (i = 0; i < TCACHE_BUFFERS; i++) {
  4037. #if (DRC_DEBUG & 4)
  4038. printf("~~~ tcache %d\n", i);
  4039. #if 0
  4040. tcache_dsm_ptrs[i] = tcache_bases[i];
  4041. tcache_ptr = tcache_ptrs[i];
  4042. do_host_disasm(i);
  4043. if (tcache_limit[i] < tcache_bases[i] + tcache_sizes[i]) {
  4044. tcache_dsm_ptrs[i] = tcache_limit[i];
  4045. tcache_ptr = tcache_bases[i] + tcache_sizes[i];
  4046. do_host_disasm(i);
  4047. }
  4048. #endif
  4049. printf("max links: %d\n", block_link_pool_counts[i]);
  4050. #endif
  4051. if (block_tables[i] != NULL)
  4052. free(block_tables[i]);
  4053. block_tables[i] = NULL;
  4054. if (block_link_pool[i] != NULL)
  4055. free(block_link_pool[i]);
  4056. block_link_pool[i] = NULL;
  4057. blink_free[i] = NULL;
  4058. if (inval_lookup[i] != NULL)
  4059. free(inval_lookup[i]);
  4060. inval_lookup[i] = NULL;
  4061. if (hash_tables[i] != NULL) {
  4062. free(hash_tables[i]);
  4063. hash_tables[i] = NULL;
  4064. }
  4065. }
  4066. for (bl = blist_free; bl; bl = bn) {
  4067. bn = bl->next;
  4068. free(bl);
  4069. }
  4070. blist_free = NULL;
  4071. drc_cmn_cleanup();
  4072. }
  4073. #endif /* DRC_SH2 */
  4074. static void *dr_get_pc_base(u32 pc, int is_slave)
  4075. {
  4076. void *ret = NULL;
  4077. u32 mask = 0;
  4078. if ((pc & ~0x7ff) == 0) {
  4079. // BIOS
  4080. ret = is_slave ? Pico32xMem->sh2_rom_s.w : Pico32xMem->sh2_rom_m.w;
  4081. mask = 0x7ff;
  4082. }
  4083. else if ((pc & 0xfffff000) == 0xc0000000) {
  4084. // data array
  4085. ret = sh2s[is_slave].data_array;
  4086. mask = 0xfff;
  4087. }
  4088. else if ((pc & 0xc6000000) == 0x06000000) {
  4089. // SDRAM
  4090. ret = Pico32xMem->sdram;
  4091. mask = 0x03ffff;
  4092. }
  4093. else if ((pc & 0xc6000000) == 0x02000000) {
  4094. // ROM
  4095. if ((pc & 0x3fffff) < Pico.romsize)
  4096. ret = Pico.rom;
  4097. mask = 0x3fffff;
  4098. }
  4099. if (ret == NULL)
  4100. return (void *)-1; // NULL is valid value
  4101. return (char *)ret - (pc & ~mask);
  4102. }
  4103. void scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
  4104. u32 *base_literals_out, u32 *end_literals_out)
  4105. {
  4106. u16 *dr_pc_base;
  4107. u32 pc, op, tmp;
  4108. u32 end_pc, end_literals = 0;
  4109. u32 lowest_literal = 0;
  4110. u32 lowest_mova = 0;
  4111. struct op_data *opd;
  4112. int next_is_delay = 0;
  4113. int end_block = 0;
  4114. int i, i_end;
  4115. memset(op_flags, 0, sizeof(*op_flags) * BLOCK_INSN_LIMIT);
  4116. op_flags[0] |= OF_BTARGET; // block start is always a target
  4117. dr_pc_base = dr_get_pc_base(base_pc, is_slave);
  4118. // 1st pass: disassemble
  4119. for (i = 0, pc = base_pc; ; i++, pc += 2) {
  4120. // we need an ops[] entry after the last one initialized,
  4121. // so do it before end_block checks
  4122. opd = &ops[i];
  4123. opd->op = OP_UNHANDLED;
  4124. opd->rm = -1;
  4125. opd->source = opd->dest = 0;
  4126. opd->cycles = 1;
  4127. opd->imm = 0;
  4128. if (next_is_delay) {
  4129. op_flags[i] |= OF_DELAY_OP;
  4130. next_is_delay = 0;
  4131. }
  4132. else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
  4133. break;
  4134. else if ((lowest_mova && lowest_mova <= pc) ||
  4135. (lowest_literal && lowest_literal <= pc))
  4136. break; // text area collides with data area
  4137. op = FETCH_OP(pc);
  4138. switch ((op & 0xf000) >> 12)
  4139. {
  4140. /////////////////////////////////////////////
  4141. case 0x00:
  4142. switch (op & 0x0f)
  4143. {
  4144. case 0x02:
  4145. switch (GET_Fx())
  4146. {
  4147. case 0: // STC SR,Rn 0000nnnn00000010
  4148. tmp = SHR_SR;
  4149. break;
  4150. case 1: // STC GBR,Rn 0000nnnn00010010
  4151. tmp = SHR_GBR;
  4152. break;
  4153. case 2: // STC VBR,Rn 0000nnnn00100010
  4154. tmp = SHR_VBR;
  4155. break;
  4156. default:
  4157. goto undefined;
  4158. }
  4159. opd->op = OP_MOVE;
  4160. opd->source = BITMASK1(tmp);
  4161. opd->dest = BITMASK1(GET_Rn());
  4162. break;
  4163. case 0x03:
  4164. CHECK_UNHANDLED_BITS(0xd0, undefined);
  4165. // BRAF Rm 0000mmmm00100011
  4166. // BSRF Rm 0000mmmm00000011
  4167. opd->op = OP_BRANCH_RF;
  4168. opd->rm = GET_Rn();
  4169. opd->source = BITMASK2(SHR_PC, opd->rm);
  4170. opd->dest = BITMASK1(SHR_PC);
  4171. if (!(op & 0x20))
  4172. opd->dest |= BITMASK1(SHR_PR);
  4173. opd->cycles = 2;
  4174. next_is_delay = 1;
  4175. if (!(opd->dest & BITMASK1(SHR_PR)))
  4176. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4177. else
  4178. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  4179. break;
  4180. case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
  4181. case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
  4182. case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
  4183. opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
  4184. opd->dest = BITMASK1(SHR_MEM);
  4185. break;
  4186. case 0x07:
  4187. // MUL.L Rm,Rn 0000nnnnmmmm0111
  4188. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4189. opd->dest = BITMASK1(SHR_MACL);
  4190. opd->cycles = 2;
  4191. break;
  4192. case 0x08:
  4193. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4194. switch (GET_Fx())
  4195. {
  4196. case 0: // CLRT 0000000000001000
  4197. opd->op = OP_SETCLRT;
  4198. opd->dest = BITMASK1(SHR_T);
  4199. opd->imm = 0;
  4200. break;
  4201. case 1: // SETT 0000000000011000
  4202. opd->op = OP_SETCLRT;
  4203. opd->dest = BITMASK1(SHR_T);
  4204. opd->imm = 1;
  4205. break;
  4206. case 2: // CLRMAC 0000000000101000
  4207. opd->dest = BITMASK3(SHR_T, SHR_MACL, SHR_MACH);
  4208. break;
  4209. default:
  4210. goto undefined;
  4211. }
  4212. break;
  4213. case 0x09:
  4214. switch (GET_Fx())
  4215. {
  4216. case 0: // NOP 0000000000001001
  4217. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4218. break;
  4219. case 1: // DIV0U 0000000000011001
  4220. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4221. opd->dest = BITMASK2(SHR_SR, SHR_T);
  4222. break;
  4223. case 2: // MOVT Rn 0000nnnn00101001
  4224. opd->source = BITMASK1(SHR_T);
  4225. opd->dest = BITMASK1(GET_Rn());
  4226. break;
  4227. default:
  4228. goto undefined;
  4229. }
  4230. break;
  4231. case 0x0a:
  4232. switch (GET_Fx())
  4233. {
  4234. case 0: // STS MACH,Rn 0000nnnn00001010
  4235. tmp = SHR_MACH;
  4236. break;
  4237. case 1: // STS MACL,Rn 0000nnnn00011010
  4238. tmp = SHR_MACL;
  4239. break;
  4240. case 2: // STS PR,Rn 0000nnnn00101010
  4241. tmp = SHR_PR;
  4242. break;
  4243. default:
  4244. goto undefined;
  4245. }
  4246. opd->op = OP_MOVE;
  4247. opd->source = BITMASK1(tmp);
  4248. opd->dest = BITMASK1(GET_Rn());
  4249. break;
  4250. case 0x0b:
  4251. CHECK_UNHANDLED_BITS(0xf00, undefined);
  4252. switch (GET_Fx())
  4253. {
  4254. case 0: // RTS 0000000000001011
  4255. opd->op = OP_BRANCH_R;
  4256. opd->rm = SHR_PR;
  4257. opd->source = BITMASK1(opd->rm);
  4258. opd->dest = BITMASK1(SHR_PC);
  4259. opd->cycles = 2;
  4260. next_is_delay = 1;
  4261. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4262. break;
  4263. case 1: // SLEEP 0000000000011011
  4264. opd->op = OP_SLEEP;
  4265. end_block = 1;
  4266. break;
  4267. case 2: // RTE 0000000000101011
  4268. opd->op = OP_RTE;
  4269. opd->source = BITMASK1(SHR_SP);
  4270. opd->dest = BITMASK3(SHR_SP, SHR_SR, SHR_PC);
  4271. opd->cycles = 4;
  4272. next_is_delay = 1;
  4273. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4274. break;
  4275. default:
  4276. goto undefined;
  4277. }
  4278. break;
  4279. case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
  4280. case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
  4281. case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
  4282. opd->source = BITMASK3(GET_Rm(), SHR_R0, SHR_MEM);
  4283. opd->dest = BITMASK1(GET_Rn());
  4284. break;
  4285. case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
  4286. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  4287. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  4288. opd->cycles = 3;
  4289. break;
  4290. default:
  4291. goto undefined;
  4292. }
  4293. break;
  4294. /////////////////////////////////////////////
  4295. case 0x01:
  4296. // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
  4297. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4298. opd->dest = BITMASK1(SHR_MEM);
  4299. opd->imm = (op & 0x0f) * 4;
  4300. break;
  4301. /////////////////////////////////////////////
  4302. case 0x02:
  4303. switch (op & 0x0f)
  4304. {
  4305. case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
  4306. case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
  4307. case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
  4308. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4309. opd->dest = BITMASK1(SHR_MEM);
  4310. break;
  4311. case 0x04: // MOV.B Rm,@-Rn 0010nnnnmmmm0100
  4312. case 0x05: // MOV.W Rm,@-Rn 0010nnnnmmmm0101
  4313. case 0x06: // MOV.L Rm,@-Rn 0010nnnnmmmm0110
  4314. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4315. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  4316. break;
  4317. case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
  4318. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4319. opd->dest = BITMASK1(SHR_SR);
  4320. break;
  4321. case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
  4322. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4323. opd->dest = BITMASK1(SHR_T);
  4324. break;
  4325. case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
  4326. case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
  4327. case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
  4328. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4329. opd->dest = BITMASK1(GET_Rn());
  4330. break;
  4331. case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
  4332. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4333. opd->dest = BITMASK1(SHR_T);
  4334. break;
  4335. case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
  4336. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4337. opd->dest = BITMASK1(GET_Rn());
  4338. break;
  4339. case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
  4340. case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
  4341. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4342. opd->dest = BITMASK1(SHR_MACL);
  4343. break;
  4344. default:
  4345. goto undefined;
  4346. }
  4347. break;
  4348. /////////////////////////////////////////////
  4349. case 0x03:
  4350. switch (op & 0x0f)
  4351. {
  4352. case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
  4353. case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
  4354. case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
  4355. case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
  4356. case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
  4357. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4358. opd->dest = BITMASK1(SHR_T);
  4359. break;
  4360. case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
  4361. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_SR);
  4362. opd->dest = BITMASK2(GET_Rn(), SHR_SR);
  4363. break;
  4364. case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
  4365. case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
  4366. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4367. opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
  4368. opd->cycles = 2;
  4369. break;
  4370. case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
  4371. case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
  4372. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4373. opd->dest = BITMASK1(GET_Rn());
  4374. break;
  4375. case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
  4376. case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
  4377. opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
  4378. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4379. break;
  4380. case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
  4381. case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
  4382. opd->source = BITMASK2(GET_Rm(), GET_Rn());
  4383. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4384. break;
  4385. default:
  4386. goto undefined;
  4387. }
  4388. break;
  4389. /////////////////////////////////////////////
  4390. case 0x04:
  4391. switch (op & 0x0f)
  4392. {
  4393. case 0x00:
  4394. switch (GET_Fx())
  4395. {
  4396. case 0: // SHLL Rn 0100nnnn00000000
  4397. case 2: // SHAL Rn 0100nnnn00100000
  4398. opd->source = BITMASK1(GET_Rn());
  4399. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4400. break;
  4401. case 1: // DT Rn 0100nnnn00010000
  4402. opd->source = BITMASK1(GET_Rn());
  4403. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4404. break;
  4405. default:
  4406. goto undefined;
  4407. }
  4408. break;
  4409. case 0x01:
  4410. switch (GET_Fx())
  4411. {
  4412. case 0: // SHLR Rn 0100nnnn00000001
  4413. case 2: // SHAR Rn 0100nnnn00100001
  4414. opd->source = BITMASK1(GET_Rn());
  4415. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4416. break;
  4417. case 1: // CMP/PZ Rn 0100nnnn00010001
  4418. opd->source = BITMASK1(GET_Rn());
  4419. opd->dest = BITMASK1(SHR_T);
  4420. break;
  4421. default:
  4422. goto undefined;
  4423. }
  4424. break;
  4425. case 0x02:
  4426. case 0x03:
  4427. switch (op & 0x3f)
  4428. {
  4429. case 0x02: // STS.L MACH,@-Rn 0100nnnn00000010
  4430. tmp = SHR_MACH;
  4431. break;
  4432. case 0x12: // STS.L MACL,@-Rn 0100nnnn00010010
  4433. tmp = SHR_MACL;
  4434. break;
  4435. case 0x22: // STS.L PR,@-Rn 0100nnnn00100010
  4436. tmp = SHR_PR;
  4437. break;
  4438. case 0x03: // STC.L SR,@-Rn 0100nnnn00000011
  4439. tmp = SHR_SR;
  4440. opd->cycles = 2;
  4441. break;
  4442. case 0x13: // STC.L GBR,@-Rn 0100nnnn00010011
  4443. tmp = SHR_GBR;
  4444. opd->cycles = 2;
  4445. break;
  4446. case 0x23: // STC.L VBR,@-Rn 0100nnnn00100011
  4447. tmp = SHR_VBR;
  4448. opd->cycles = 2;
  4449. break;
  4450. default:
  4451. goto undefined;
  4452. }
  4453. opd->source = BITMASK2(GET_Rn(), tmp);
  4454. opd->dest = BITMASK2(GET_Rn(), SHR_MEM);
  4455. break;
  4456. case 0x04:
  4457. case 0x05:
  4458. switch (op & 0x3f)
  4459. {
  4460. case 0x04: // ROTL Rn 0100nnnn00000100
  4461. case 0x05: // ROTR Rn 0100nnnn00000101
  4462. opd->source = BITMASK1(GET_Rn());
  4463. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4464. break;
  4465. case 0x24: // ROTCL Rn 0100nnnn00100100
  4466. case 0x25: // ROTCR Rn 0100nnnn00100101
  4467. opd->source = BITMASK2(GET_Rn(), SHR_T);
  4468. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4469. break;
  4470. case 0x15: // CMP/PL Rn 0100nnnn00010101
  4471. opd->source = BITMASK1(GET_Rn());
  4472. opd->dest = BITMASK1(SHR_T);
  4473. break;
  4474. default:
  4475. goto undefined;
  4476. }
  4477. break;
  4478. case 0x06:
  4479. case 0x07:
  4480. switch (op & 0x3f)
  4481. {
  4482. case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
  4483. tmp = SHR_MACH;
  4484. break;
  4485. case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
  4486. tmp = SHR_MACL;
  4487. break;
  4488. case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
  4489. tmp = SHR_PR;
  4490. break;
  4491. case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
  4492. tmp = SHR_SR;
  4493. opd->cycles = 3;
  4494. break;
  4495. case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
  4496. tmp = SHR_GBR;
  4497. opd->cycles = 3;
  4498. break;
  4499. case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
  4500. tmp = SHR_VBR;
  4501. opd->cycles = 3;
  4502. break;
  4503. default:
  4504. goto undefined;
  4505. }
  4506. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  4507. opd->dest = BITMASK2(GET_Rn(), tmp);
  4508. break;
  4509. case 0x08:
  4510. case 0x09:
  4511. switch (GET_Fx())
  4512. {
  4513. case 0:
  4514. // SHLL2 Rn 0100nnnn00001000
  4515. // SHLR2 Rn 0100nnnn00001001
  4516. break;
  4517. case 1:
  4518. // SHLL8 Rn 0100nnnn00011000
  4519. // SHLR8 Rn 0100nnnn00011001
  4520. break;
  4521. case 2:
  4522. // SHLL16 Rn 0100nnnn00101000
  4523. // SHLR16 Rn 0100nnnn00101001
  4524. break;
  4525. default:
  4526. goto undefined;
  4527. }
  4528. opd->source = BITMASK1(GET_Rn());
  4529. opd->dest = BITMASK1(GET_Rn());
  4530. break;
  4531. case 0x0a:
  4532. switch (GET_Fx())
  4533. {
  4534. case 0: // LDS Rm,MACH 0100mmmm00001010
  4535. tmp = SHR_MACH;
  4536. break;
  4537. case 1: // LDS Rm,MACL 0100mmmm00011010
  4538. tmp = SHR_MACL;
  4539. break;
  4540. case 2: // LDS Rm,PR 0100mmmm00101010
  4541. tmp = SHR_PR;
  4542. break;
  4543. default:
  4544. goto undefined;
  4545. }
  4546. opd->op = OP_MOVE;
  4547. opd->source = BITMASK1(GET_Rn());
  4548. opd->dest = BITMASK1(tmp);
  4549. break;
  4550. case 0x0b:
  4551. switch (GET_Fx())
  4552. {
  4553. case 0: // JSR @Rm 0100mmmm00001011
  4554. opd->dest = BITMASK1(SHR_PR);
  4555. case 2: // JMP @Rm 0100mmmm00101011
  4556. opd->op = OP_BRANCH_R;
  4557. opd->rm = GET_Rn();
  4558. opd->source = BITMASK1(opd->rm);
  4559. opd->dest |= BITMASK1(SHR_PC);
  4560. opd->cycles = 2;
  4561. next_is_delay = 1;
  4562. if (!(opd->dest & BITMASK1(SHR_PR)))
  4563. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4564. else
  4565. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  4566. break;
  4567. case 1: // TAS.B @Rn 0100nnnn00011011
  4568. opd->source = BITMASK2(GET_Rn(), SHR_MEM);
  4569. opd->dest = BITMASK2(SHR_T, SHR_MEM);
  4570. opd->cycles = 4;
  4571. break;
  4572. default:
  4573. goto undefined;
  4574. }
  4575. break;
  4576. case 0x0e:
  4577. switch (GET_Fx())
  4578. {
  4579. case 0: // LDC Rm,SR 0100mmmm00001110
  4580. tmp = SHR_SR;
  4581. break;
  4582. case 1: // LDC Rm,GBR 0100mmmm00011110
  4583. tmp = SHR_GBR;
  4584. break;
  4585. case 2: // LDC Rm,VBR 0100mmmm00101110
  4586. tmp = SHR_VBR;
  4587. break;
  4588. default:
  4589. goto undefined;
  4590. }
  4591. opd->op = OP_MOVE;
  4592. opd->source = BITMASK1(GET_Rn());
  4593. opd->dest = BITMASK1(tmp);
  4594. break;
  4595. case 0x0f:
  4596. // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
  4597. opd->source = BITMASK6(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH, SHR_MEM);
  4598. opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
  4599. opd->cycles = 3;
  4600. break;
  4601. default:
  4602. goto undefined;
  4603. }
  4604. break;
  4605. /////////////////////////////////////////////
  4606. case 0x05:
  4607. // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
  4608. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  4609. opd->dest = BITMASK1(GET_Rn());
  4610. opd->imm = (op & 0x0f) * 4;
  4611. break;
  4612. /////////////////////////////////////////////
  4613. case 0x06:
  4614. switch (op & 0x0f)
  4615. {
  4616. case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
  4617. case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
  4618. case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
  4619. opd->dest = BITMASK2(GET_Rm(), GET_Rn());
  4620. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  4621. break;
  4622. case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
  4623. case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
  4624. case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
  4625. opd->dest = BITMASK1(GET_Rn());
  4626. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  4627. break;
  4628. case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
  4629. opd->source = BITMASK2(GET_Rm(), SHR_T);
  4630. opd->dest = BITMASK2(GET_Rn(), SHR_T);
  4631. break;
  4632. case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
  4633. opd->op = OP_MOVE;
  4634. goto arith_rmrn;
  4635. case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
  4636. case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
  4637. case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
  4638. case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
  4639. case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
  4640. case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
  4641. case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
  4642. case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
  4643. arith_rmrn:
  4644. opd->source = BITMASK1(GET_Rm());
  4645. opd->dest = BITMASK1(GET_Rn());
  4646. break;
  4647. }
  4648. break;
  4649. /////////////////////////////////////////////
  4650. case 0x07:
  4651. // ADD #imm,Rn 0111nnnniiiiiiii
  4652. opd->source = opd->dest = BITMASK1(GET_Rn());
  4653. opd->imm = (s8)op;
  4654. break;
  4655. /////////////////////////////////////////////
  4656. case 0x08:
  4657. switch (op & 0x0f00)
  4658. {
  4659. case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
  4660. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  4661. opd->dest = BITMASK1(SHR_MEM);
  4662. opd->imm = (op & 0x0f);
  4663. break;
  4664. case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
  4665. opd->source = BITMASK2(GET_Rm(), SHR_R0);
  4666. opd->dest = BITMASK1(SHR_MEM);
  4667. opd->imm = (op & 0x0f) * 2;
  4668. break;
  4669. case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
  4670. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  4671. opd->dest = BITMASK1(SHR_R0);
  4672. opd->imm = (op & 0x0f);
  4673. break;
  4674. case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
  4675. opd->source = BITMASK2(GET_Rm(), SHR_MEM);
  4676. opd->dest = BITMASK1(SHR_R0);
  4677. opd->imm = (op & 0x0f) * 2;
  4678. break;
  4679. case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
  4680. opd->source = BITMASK1(SHR_R0);
  4681. opd->dest = BITMASK1(SHR_T);
  4682. opd->imm = (s8)op;
  4683. break;
  4684. case 0x0d00: // BT/S label 10001101dddddddd
  4685. case 0x0f00: // BF/S label 10001111dddddddd
  4686. next_is_delay = 1;
  4687. // fallthrough
  4688. case 0x0900: // BT label 10001001dddddddd
  4689. case 0x0b00: // BF label 10001011dddddddd
  4690. opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
  4691. opd->source = BITMASK2(SHR_PC, SHR_T);
  4692. opd->dest = BITMASK1(SHR_PC);
  4693. opd->imm = ((signed int)(op << 24) >> 23);
  4694. opd->imm += pc + 4;
  4695. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
  4696. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  4697. break;
  4698. default:
  4699. goto undefined;
  4700. }
  4701. break;
  4702. /////////////////////////////////////////////
  4703. case 0x09:
  4704. // MOV.W @(disp,PC),Rn 1001nnnndddddddd
  4705. opd->op = OP_LOAD_POOL;
  4706. tmp = pc + 2;
  4707. if (op_flags[i] & OF_DELAY_OP) {
  4708. if (ops[i-1].op == OP_BRANCH)
  4709. tmp = ops[i-1].imm;
  4710. else if (ops[i-1].op != OP_BRANCH_N)
  4711. tmp = 0;
  4712. }
  4713. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  4714. opd->dest = BITMASK1(GET_Rn());
  4715. if (tmp) {
  4716. opd->imm = tmp + 2 + (op & 0xff) * 2;
  4717. if (lowest_literal == 0 || opd->imm < lowest_literal)
  4718. lowest_literal = opd->imm;
  4719. }
  4720. opd->size = 1;
  4721. break;
  4722. /////////////////////////////////////////////
  4723. case 0x0b:
  4724. // BSR label 1011dddddddddddd
  4725. opd->dest = BITMASK1(SHR_PR);
  4726. case 0x0a:
  4727. // BRA label 1010dddddddddddd
  4728. opd->op = OP_BRANCH;
  4729. opd->source = BITMASK1(SHR_PC);
  4730. opd->dest |= BITMASK1(SHR_PC);
  4731. opd->imm = ((signed int)(op << 20) >> 19);
  4732. opd->imm += pc + 4;
  4733. opd->cycles = 2;
  4734. next_is_delay = 1;
  4735. if (!(opd->dest & BITMASK1(SHR_PR))) {
  4736. if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2) {
  4737. op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
  4738. if (opd->imm <= pc)
  4739. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4740. } else
  4741. end_block = !(op_flags[i+1+next_is_delay] & OF_BTARGET);
  4742. } else
  4743. op_flags[i+1+next_is_delay] |= OF_BTARGET;
  4744. break;
  4745. /////////////////////////////////////////////
  4746. case 0x0c:
  4747. switch (op & 0x0f00)
  4748. {
  4749. case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
  4750. case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
  4751. case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
  4752. opd->source = BITMASK2(SHR_GBR, SHR_R0);
  4753. opd->dest = BITMASK1(SHR_MEM);
  4754. opd->size = (op & 0x300) >> 8;
  4755. opd->imm = (op & 0xff) << opd->size;
  4756. break;
  4757. case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
  4758. case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
  4759. case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
  4760. opd->source = BITMASK2(SHR_GBR, SHR_MEM);
  4761. opd->dest = BITMASK1(SHR_R0);
  4762. opd->size = (op & 0x300) >> 8;
  4763. opd->imm = (op & 0xff) << opd->size;
  4764. break;
  4765. case 0x0300: // TRAPA #imm 11000011iiiiiiii
  4766. opd->op = OP_TRAPA;
  4767. opd->source = BITMASK3(SHR_SP, SHR_PC, SHR_SR);
  4768. opd->dest = BITMASK2(SHR_SP, SHR_PC);
  4769. opd->imm = (op & 0xff);
  4770. opd->cycles = 8;
  4771. op_flags[i+1] |= OF_BTARGET;
  4772. break;
  4773. case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
  4774. opd->op = OP_MOVA;
  4775. tmp = pc + 2;
  4776. if (op_flags[i] & OF_DELAY_OP) {
  4777. if (ops[i-1].op == OP_BRANCH)
  4778. tmp = ops[i-1].imm;
  4779. else if (ops[i-1].op != OP_BRANCH_N)
  4780. tmp = 0;
  4781. }
  4782. opd->dest = BITMASK1(SHR_R0);
  4783. if (tmp) {
  4784. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  4785. if (opd->imm >= base_pc) {
  4786. if (lowest_mova == 0 || opd->imm < lowest_mova)
  4787. lowest_mova = opd->imm;
  4788. }
  4789. }
  4790. break;
  4791. case 0x0800: // TST #imm,R0 11001000iiiiiiii
  4792. opd->source = BITMASK1(SHR_R0);
  4793. opd->dest = BITMASK1(SHR_T);
  4794. opd->imm = op & 0xff;
  4795. break;
  4796. case 0x0900: // AND #imm,R0 11001001iiiiiiii
  4797. opd->source = opd->dest = BITMASK1(SHR_R0);
  4798. opd->imm = op & 0xff;
  4799. break;
  4800. case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
  4801. opd->source = opd->dest = BITMASK1(SHR_R0);
  4802. opd->imm = op & 0xff;
  4803. break;
  4804. case 0x0b00: // OR #imm,R0 11001011iiiiiiii
  4805. opd->source = opd->dest = BITMASK1(SHR_R0);
  4806. opd->imm = op & 0xff;
  4807. break;
  4808. case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
  4809. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  4810. opd->dest = BITMASK1(SHR_T);
  4811. opd->imm = op & 0xff;
  4812. opd->cycles = 3;
  4813. break;
  4814. case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
  4815. case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
  4816. case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
  4817. opd->source = BITMASK3(SHR_GBR, SHR_R0, SHR_MEM);
  4818. opd->dest = BITMASK1(SHR_MEM);
  4819. opd->imm = op & 0xff;
  4820. opd->cycles = 3;
  4821. break;
  4822. default:
  4823. goto undefined;
  4824. }
  4825. break;
  4826. /////////////////////////////////////////////
  4827. case 0x0d:
  4828. // MOV.L @(disp,PC),Rn 1101nnnndddddddd
  4829. opd->op = OP_LOAD_POOL;
  4830. tmp = pc + 2;
  4831. if (op_flags[i] & OF_DELAY_OP) {
  4832. if (ops[i-1].op == OP_BRANCH)
  4833. tmp = ops[i-1].imm;
  4834. else if (ops[i-1].op != OP_BRANCH_N)
  4835. tmp = 0;
  4836. }
  4837. opd->source = BITMASK2(SHR_PC, SHR_MEM);
  4838. opd->dest = BITMASK1(GET_Rn());
  4839. if (tmp) {
  4840. opd->imm = (tmp + 2 + (op & 0xff) * 4) & ~3;
  4841. if (lowest_literal == 0 || opd->imm < lowest_literal)
  4842. lowest_literal = opd->imm;
  4843. }
  4844. opd->size = 2;
  4845. break;
  4846. /////////////////////////////////////////////
  4847. case 0x0e:
  4848. // MOV #imm,Rn 1110nnnniiiiiiii
  4849. opd->dest = BITMASK1(GET_Rn());
  4850. opd->imm = (s8)op;
  4851. break;
  4852. default:
  4853. undefined:
  4854. opd->op = OP_UNDEFINED;
  4855. // an unhandled instruction is probably not code if it's not the 1st insn
  4856. if (!(op_flags[i] & OF_DELAY_OP) && pc != base_pc)
  4857. goto end;
  4858. break;
  4859. }
  4860. if (op_flags[i] & OF_DELAY_OP) {
  4861. switch (opd->op) {
  4862. case OP_BRANCH:
  4863. case OP_BRANCH_N:
  4864. case OP_BRANCH_CT:
  4865. case OP_BRANCH_CF:
  4866. case OP_BRANCH_R:
  4867. case OP_BRANCH_RF:
  4868. elprintf(EL_ANOMALY, "%csh2 drc: branch in DS @ %08x",
  4869. is_slave ? 's' : 'm', pc);
  4870. opd->op = OP_UNDEFINED;
  4871. op_flags[i] |= OF_B_IN_DS;
  4872. next_is_delay = 0;
  4873. break;
  4874. }
  4875. }
  4876. }
  4877. end:
  4878. i_end = i;
  4879. end_pc = pc;
  4880. // 2nd pass: some analysis
  4881. lowest_literal = end_literals = lowest_mova = 0;
  4882. for (i = 0; i < i_end; i++) {
  4883. opd = &ops[i];
  4884. // propagate T (TODO: DIV0U)
  4885. if ((opd->op == OP_SETCLRT && !opd->imm) || opd->op == OP_BRANCH_CT)
  4886. op_flags[i + 1] |= OF_T_CLEAR;
  4887. else if ((opd->op == OP_SETCLRT && opd->imm) || opd->op == OP_BRANCH_CF)
  4888. op_flags[i + 1] |= OF_T_SET;
  4889. if ((op_flags[i] & OF_BTARGET) || (opd->dest & BITMASK1(SHR_T)))
  4890. op_flags[i] &= ~(OF_T_SET | OF_T_CLEAR);
  4891. else
  4892. op_flags[i + 1] |= op_flags[i] & (OF_T_SET | OF_T_CLEAR);
  4893. if ((opd->op == OP_BRANCH_CT && (op_flags[i] & OF_T_CLEAR)) ||
  4894. (opd->op == OP_BRANCH_CF && (op_flags[i] & OF_T_SET)))
  4895. opd->op = OP_BRANCH_N;
  4896. else if ((opd->op == OP_BRANCH_CT && (op_flags[i] & OF_T_SET)) ||
  4897. (opd->op == OP_BRANCH_CF && (op_flags[i] & OF_T_CLEAR))) {
  4898. opd->op = OP_BRANCH;
  4899. if (op_flags[i + 1] & OF_DELAY_OP)
  4900. opd->cycles = 2;
  4901. else
  4902. opd->cycles = 3;
  4903. }
  4904. // "overscan" detection: unreachable code after unconditional branch
  4905. // this can happen if the insn after a forward branch isn't a local target
  4906. if (OP_ISBRAUC(opd->op)) {
  4907. if (op_flags[i + 1] & OF_DELAY_OP) {
  4908. if (i_end > i + 2 && !(op_flags[i + 2] & OF_BTARGET))
  4909. i_end = i + 2;
  4910. } else {
  4911. if (i_end > i + 1 && !(op_flags[i + 1] & OF_BTARGET))
  4912. i_end = i + 1;
  4913. }
  4914. }
  4915. // literal pool size detection
  4916. if (opd->op == OP_MOVA && opd->imm >= base_pc)
  4917. if (lowest_mova == 0 || opd->imm < lowest_mova)
  4918. lowest_mova = opd->imm;
  4919. if (opd->op == OP_LOAD_POOL) {
  4920. if (opd->imm >= base_pc && opd->imm < end_pc + MAX_LITERAL_OFFSET) {
  4921. if (end_literals < opd->imm + opd->size * 2)
  4922. end_literals = opd->imm + opd->size * 2;
  4923. if (lowest_literal == 0 || lowest_literal > opd->imm)
  4924. lowest_literal = opd->imm;
  4925. if (opd->size == 2) {
  4926. // tweak for NFL: treat a 32bit literal as an address and check if it
  4927. // points to the literal space. In that case handle it like MOVA.
  4928. tmp = FETCH32(opd->imm) & ~0x20000000; // MUST ignore wt bit here
  4929. if (tmp >= end_pc && tmp < end_pc + MAX_LITERAL_OFFSET)
  4930. if (lowest_mova == 0 || tmp < lowest_mova)
  4931. lowest_mova = tmp;
  4932. }
  4933. }
  4934. }
  4935. }
  4936. end_pc = base_pc + i_end * 2;
  4937. // end_literals is used to decide to inline a literal or not
  4938. // XXX: need better detection if this actually is used in write
  4939. if (lowest_literal >= base_pc) {
  4940. if (lowest_literal < end_pc) {
  4941. dbg(1, "warning: lowest_literal=%08x < end_pc=%08x", lowest_literal, end_pc);
  4942. // TODO: does this always mean end_pc covers data?
  4943. }
  4944. }
  4945. if (lowest_mova >= base_pc) {
  4946. if (lowest_mova < end_literals) {
  4947. dbg(1, "warning: mova=%08x < end_literals=%08x", lowest_mova, end_literals);
  4948. end_literals = lowest_mova;
  4949. }
  4950. if (lowest_mova < end_pc) {
  4951. dbg(1, "warning: mova=%08x < end_pc=%08x", lowest_mova, end_pc);
  4952. end_literals = end_pc;
  4953. }
  4954. }
  4955. if (lowest_literal >= end_literals)
  4956. lowest_literal = end_literals;
  4957. *end_pc_out = end_pc;
  4958. if (base_literals_out != NULL)
  4959. *base_literals_out = (lowest_literal ?: end_pc);
  4960. if (end_literals_out != NULL)
  4961. *end_literals_out = (end_literals ?: end_pc);
  4962. }
  4963. // vim:shiftwidth=2:ts=2:expandtab