octeon_qlm.c 179 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Marvell International Ltd.
  4. */
  5. #include <dm.h>
  6. #include <time.h>
  7. #include <linux/delay.h>
  8. #include <mach/cvmx-regs.h>
  9. #include <mach/octeon-model.h>
  10. #include <mach/cvmx-fuse.h>
  11. #include <mach/cvmx-qlm.h>
  12. #include <mach/octeon_qlm.h>
  13. #include <mach/cvmx-pcie.h>
  14. #include <mach/cvmx-bgxx-defs.h>
  15. #include <mach/cvmx-ciu-defs.h>
  16. #include <mach/cvmx-gmxx-defs.h>
  17. #include <mach/cvmx-gserx-defs.h>
  18. #include <mach/cvmx-mio-defs.h>
  19. #include <mach/cvmx-pciercx-defs.h>
  20. #include <mach/cvmx-pemx-defs.h>
  21. #include <mach/cvmx-pexp-defs.h>
  22. #include <mach/cvmx-rst-defs.h>
  23. #include <mach/cvmx-sata-defs.h>
  24. #include <mach/cvmx-sli-defs.h>
  25. #include <mach/cvmx-sriomaintx-defs.h>
  26. #include <mach/cvmx-sriox-defs.h>
  27. DECLARE_GLOBAL_DATA_PTR;
  28. /** 2.5GHz with 100MHz reference clock */
  29. #define R_2_5G_REFCLK100 0x0
  30. /** 5.0GHz with 100MHz reference clock */
  31. #define R_5G_REFCLK100 0x1
  32. /** 8.0GHz with 100MHz reference clock */
  33. #define R_8G_REFCLK100 0x2
  34. /** 1.25GHz with 156.25MHz reference clock */
  35. #define R_125G_REFCLK15625_KX 0x3
  36. /** 3.125Ghz with 156.25MHz reference clock (XAUI) */
  37. #define R_3125G_REFCLK15625_XAUI 0x4
  38. /** 10.3125GHz with 156.25MHz reference clock (XFI/XLAUI) */
  39. #define R_103125G_REFCLK15625_KR 0x5
  40. /** 1.25GHz with 156.25MHz reference clock (SGMII) */
  41. #define R_125G_REFCLK15625_SGMII 0x6
  42. /** 5GHz with 156.25MHz reference clock (QSGMII) */
  43. #define R_5G_REFCLK15625_QSGMII 0x7
  44. /** 6.25GHz with 156.25MHz reference clock (RXAUI/25G) */
  45. #define R_625G_REFCLK15625_RXAUI 0x8
  46. /** 2.5GHz with 125MHz reference clock */
  47. #define R_2_5G_REFCLK125 0x9
  48. /** 5GHz with 125MHz reference clock */
  49. #define R_5G_REFCLK125 0xa
  50. /** 8GHz with 125MHz reference clock */
  51. #define R_8G_REFCLK125 0xb
  52. /** Must be last, number of modes */
  53. #define R_NUM_LANE_MODES 0xc
  54. int cvmx_qlm_is_ref_clock(int qlm, int reference_mhz)
  55. {
  56. int ref_clock = cvmx_qlm_measure_clock(qlm);
  57. int mhz = ref_clock / 1000000;
  58. int range = reference_mhz / 10;
  59. return ((mhz >= reference_mhz - range) && (mhz <= reference_mhz + range));
  60. }
  61. static int __get_qlm_spd(int qlm, int speed)
  62. {
  63. int qlm_spd = 0xf;
  64. if (cvmx_qlm_is_ref_clock(qlm, 100)) {
  65. if (speed == 1250)
  66. qlm_spd = 0x3;
  67. else if (speed == 2500)
  68. qlm_spd = 0x2;
  69. else if (speed == 5000)
  70. qlm_spd = 0x0;
  71. else
  72. qlm_spd = 0xf;
  73. } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
  74. if (speed == 1250)
  75. qlm_spd = 0xa;
  76. else if (speed == 2500)
  77. qlm_spd = 0x9;
  78. else if (speed == 3125)
  79. qlm_spd = 0x8;
  80. else if (speed == 5000)
  81. qlm_spd = 0x6;
  82. else if (speed == 6250)
  83. qlm_spd = 0x5;
  84. else
  85. qlm_spd = 0xf;
  86. } else if (cvmx_qlm_is_ref_clock(qlm, 156)) {
  87. if (speed == 1250)
  88. qlm_spd = 0x4;
  89. else if (speed == 2500)
  90. qlm_spd = 0x7;
  91. else if (speed == 3125)
  92. qlm_spd = 0xe;
  93. else if (speed == 3750)
  94. qlm_spd = 0xd;
  95. else if (speed == 5000)
  96. qlm_spd = 0xb;
  97. else if (speed == 6250)
  98. qlm_spd = 0xc;
  99. else
  100. qlm_spd = 0xf;
  101. } else if (cvmx_qlm_is_ref_clock(qlm, 161)) {
  102. if (speed == 6316)
  103. qlm_spd = 0xc;
  104. }
  105. return qlm_spd;
  106. }
  107. static void __set_qlm_pcie_mode_61xx(int pcie_port, int root_complex)
  108. {
  109. int rc = root_complex ? 1 : 0;
  110. int ep = root_complex ? 0 : 1;
  111. cvmx_ciu_soft_prst1_t soft_prst1;
  112. cvmx_ciu_soft_prst_t soft_prst;
  113. cvmx_mio_rst_ctlx_t rst_ctl;
  114. if (pcie_port) {
  115. soft_prst1.u64 = csr_rd(CVMX_CIU_SOFT_PRST1);
  116. soft_prst1.s.soft_prst = 1;
  117. csr_wr(CVMX_CIU_SOFT_PRST1, soft_prst1.u64);
  118. } else {
  119. soft_prst.u64 = csr_rd(CVMX_CIU_SOFT_PRST);
  120. soft_prst.s.soft_prst = 1;
  121. csr_wr(CVMX_CIU_SOFT_PRST, soft_prst.u64);
  122. }
  123. rst_ctl.u64 = csr_rd(CVMX_MIO_RST_CTLX(pcie_port));
  124. rst_ctl.s.prst_link = rc;
  125. rst_ctl.s.rst_link = ep;
  126. rst_ctl.s.prtmode = rc;
  127. rst_ctl.s.rst_drv = rc;
  128. rst_ctl.s.rst_rcv = 0;
  129. rst_ctl.s.rst_chip = ep;
  130. csr_wr(CVMX_MIO_RST_CTLX(pcie_port), rst_ctl.u64);
  131. if (root_complex == 0) {
  132. if (pcie_port) {
  133. soft_prst1.u64 = csr_rd(CVMX_CIU_SOFT_PRST1);
  134. soft_prst1.s.soft_prst = 0;
  135. csr_wr(CVMX_CIU_SOFT_PRST1, soft_prst1.u64);
  136. } else {
  137. soft_prst.u64 = csr_rd(CVMX_CIU_SOFT_PRST);
  138. soft_prst.s.soft_prst = 0;
  139. csr_wr(CVMX_CIU_SOFT_PRST, soft_prst.u64);
  140. }
  141. }
  142. }
  143. /**
  144. * Configure qlm speed and mode. MIO_QLMX_CFG[speed,mode] are not set
  145. * for CN61XX.
  146. *
  147. * @param qlm The QLM to configure
  148. * @param speed The speed the QLM needs to be configured in Mhz.
  149. * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
  150. * QLM 0: 0 = PCIe0 1X4, 1 = Reserved, 2 = SGMII1, 3 = XAUI1
  151. * QLM 1: 0 = PCIe1 1x2, 1 = PCIe(0/1) 2x1, 2 - 3 = Reserved
  152. * QLM 2: 0 - 1 = Reserved, 2 = SGMII0, 3 = XAUI0
  153. * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP
  154. * mode.
  155. * @param pcie2x1 Only used when QLM1 is in PCIE2x1 mode. The QLM_SPD has a
  156. * different value on how PEMx needs to be configured:
  157. * 0x0 - both PEM0 & PEM1 are in gen1 mode.
  158. * 0x1 - PEM0 in gen2 and PEM1 in gen1 mode.
  159. * 0x2 - PEM0 in gen1 and PEM1 in gen2 mode.
  160. * 0x3 - both PEM0 & PEM1 are in gen2 mode.
  161. * SPEED value is ignored in this mode. QLM_SPD is set based on
  162. * pcie2x1 value in this mode.
  163. *
  164. * Return: Return 0 on success or -1.
  165. */
  166. static int octeon_configure_qlm_cn61xx(int qlm, int speed, int mode, int rc, int pcie2x1)
  167. {
  168. cvmx_mio_qlmx_cfg_t qlm_cfg;
  169. /* The QLM speed varies for SGMII/XAUI and PCIe mode. And depends on
  170. * reference clock.
  171. */
  172. if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
  173. return -1;
  174. if (qlm < 3) {
  175. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
  176. } else {
  177. debug("WARNING: Invalid QLM(%d) passed\n", qlm);
  178. return -1;
  179. }
  180. switch (qlm) {
  181. /* SGMII/XAUI mode */
  182. case 2: {
  183. if (mode < 2) {
  184. qlm_cfg.s.qlm_spd = 0xf;
  185. break;
  186. }
  187. qlm_cfg.s.qlm_spd = __get_qlm_spd(qlm, speed);
  188. qlm_cfg.s.qlm_cfg = mode;
  189. break;
  190. }
  191. case 1: {
  192. if (mode == 1) { /* 2x1 mode */
  193. cvmx_mio_qlmx_cfg_t qlm0;
  194. /* When QLM0 is configured as PCIe(QLM_CFG=0x0)
  195. * and enabled (QLM_SPD != 0xf), QLM1 cannot be
  196. * configured as PCIe 2x1 mode (QLM_CFG=0x1)
  197. * and enabled (QLM_SPD != 0xf).
  198. */
  199. qlm0.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
  200. if (qlm0.s.qlm_spd != 0xf && qlm0.s.qlm_cfg == 0) {
  201. debug("Invalid mode(%d) for QLM(%d) as QLM1 is PCIe mode\n",
  202. mode, qlm);
  203. qlm_cfg.s.qlm_spd = 0xf;
  204. break;
  205. }
  206. /* Set QLM_SPD based on reference clock and mode */
  207. if (cvmx_qlm_is_ref_clock(qlm, 100)) {
  208. if (pcie2x1 == 0x3)
  209. qlm_cfg.s.qlm_spd = 0x0;
  210. else if (pcie2x1 == 0x1)
  211. qlm_cfg.s.qlm_spd = 0x2;
  212. else if (pcie2x1 == 0x2)
  213. qlm_cfg.s.qlm_spd = 0x1;
  214. else if (pcie2x1 == 0x0)
  215. qlm_cfg.s.qlm_spd = 0x3;
  216. else
  217. qlm_cfg.s.qlm_spd = 0xf;
  218. } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
  219. if (pcie2x1 == 0x3)
  220. qlm_cfg.s.qlm_spd = 0x4;
  221. else if (pcie2x1 == 0x1)
  222. qlm_cfg.s.qlm_spd = 0x6;
  223. else if (pcie2x1 == 0x2)
  224. qlm_cfg.s.qlm_spd = 0x9;
  225. else if (pcie2x1 == 0x0)
  226. qlm_cfg.s.qlm_spd = 0x7;
  227. else
  228. qlm_cfg.s.qlm_spd = 0xf;
  229. }
  230. qlm_cfg.s.qlm_cfg = mode;
  231. csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
  232. /* Set PCIe mode bits */
  233. __set_qlm_pcie_mode_61xx(0, rc);
  234. __set_qlm_pcie_mode_61xx(1, rc);
  235. return 0;
  236. } else if (mode > 1) {
  237. debug("Invalid mode(%d) for QLM(%d).\n", mode, qlm);
  238. qlm_cfg.s.qlm_spd = 0xf;
  239. break;
  240. }
  241. /* Set speed and mode for PCIe 1x2 mode. */
  242. if (cvmx_qlm_is_ref_clock(qlm, 100)) {
  243. if (speed == 5000)
  244. qlm_cfg.s.qlm_spd = 0x1;
  245. else if (speed == 2500)
  246. qlm_cfg.s.qlm_spd = 0x2;
  247. else
  248. qlm_cfg.s.qlm_spd = 0xf;
  249. } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
  250. if (speed == 5000)
  251. qlm_cfg.s.qlm_spd = 0x4;
  252. else if (speed == 2500)
  253. qlm_cfg.s.qlm_spd = 0x6;
  254. else
  255. qlm_cfg.s.qlm_spd = 0xf;
  256. } else {
  257. qlm_cfg.s.qlm_spd = 0xf;
  258. }
  259. qlm_cfg.s.qlm_cfg = mode;
  260. csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
  261. /* Set PCIe mode bits */
  262. __set_qlm_pcie_mode_61xx(1, rc);
  263. return 0;
  264. }
  265. case 0: {
  266. /* QLM_CFG = 0x1 - Reserved */
  267. if (mode == 1) {
  268. qlm_cfg.s.qlm_spd = 0xf;
  269. break;
  270. }
  271. /* QLM_CFG = 0x0 - PCIe 1x4(PEM0) */
  272. if (mode == 0 && speed != 5000 && speed != 2500) {
  273. qlm_cfg.s.qlm_spd = 0xf;
  274. break;
  275. }
  276. /* Set speed and mode */
  277. qlm_cfg.s.qlm_spd = __get_qlm_spd(qlm, speed);
  278. qlm_cfg.s.qlm_cfg = mode;
  279. csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
  280. /* Set PCIe mode bits */
  281. if (mode == 0)
  282. __set_qlm_pcie_mode_61xx(0, rc);
  283. return 0;
  284. }
  285. default:
  286. debug("WARNING: Invalid QLM(%d) passed\n", qlm);
  287. qlm_cfg.s.qlm_spd = 0xf;
  288. }
  289. csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
  290. return 0;
  291. }
  292. /* qlm : DLM to configure
  293. * baud_mhz : speed of the DLM
  294. * ref_clk_sel : reference clock speed selection where:
  295. * 0: 100MHz
  296. * 1: 125MHz
  297. * 2: 156.25MHz
  298. *
  299. * ref_clk_input: reference clock input where:
  300. * 0: DLMC_REF_CLK0_[P,N]
  301. * 1: DLMC_REF_CLK1_[P,N]
  302. * 2: DLM0_REF_CLK_[P,N] (only valid for QLM 0)
  303. * is_sff7000_rxaui : boolean to indicate whether qlm is RXAUI on SFF7000
  304. */
  305. static int __dlm_setup_pll_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input,
  306. int is_sff7000_rxaui)
  307. {
  308. cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
  309. cvmx_gserx_dlmx_ref_ssp_en_t dlmx_ref_ssp_en;
  310. cvmx_gserx_dlmx_mpll_en_t dlmx_mpll_en;
  311. cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
  312. cvmx_gserx_dlmx_tx_amplitude_t tx_amplitude;
  313. cvmx_gserx_dlmx_tx_preemph_t tx_preemph;
  314. cvmx_gserx_dlmx_rx_eq_t rx_eq;
  315. cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
  316. cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
  317. int gmx_ref_clk = 100;
  318. debug("%s(%d, %d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input,
  319. is_sff7000_rxaui);
  320. if (ref_clk_sel == 1)
  321. gmx_ref_clk = 125;
  322. else if (ref_clk_sel == 2)
  323. gmx_ref_clk = 156;
  324. if (qlm != 0 && ref_clk_input == 2) {
  325. printf("%s: Error: can only use reference clock inputs 0 or 1 for DLM %d\n",
  326. __func__, qlm);
  327. return -1;
  328. }
  329. /* Hardware defaults are invalid */
  330. tx_amplitude.u64 = csr_rd(CVMX_GSERX_DLMX_TX_AMPLITUDE(qlm, 0));
  331. if (is_sff7000_rxaui) {
  332. tx_amplitude.s.tx0_amplitude = 100;
  333. tx_amplitude.s.tx1_amplitude = 100;
  334. } else {
  335. tx_amplitude.s.tx0_amplitude = 65;
  336. tx_amplitude.s.tx1_amplitude = 65;
  337. }
  338. csr_wr(CVMX_GSERX_DLMX_TX_AMPLITUDE(qlm, 0), tx_amplitude.u64);
  339. tx_preemph.u64 = csr_rd(CVMX_GSERX_DLMX_TX_PREEMPH(qlm, 0));
  340. if (is_sff7000_rxaui) {
  341. tx_preemph.s.tx0_preemph = 0;
  342. tx_preemph.s.tx1_preemph = 0;
  343. } else {
  344. tx_preemph.s.tx0_preemph = 22;
  345. tx_preemph.s.tx1_preemph = 22;
  346. }
  347. csr_wr(CVMX_GSERX_DLMX_TX_PREEMPH(qlm, 0), tx_preemph.u64);
  348. rx_eq.u64 = csr_rd(CVMX_GSERX_DLMX_RX_EQ(qlm, 0));
  349. rx_eq.s.rx0_eq = 0;
  350. rx_eq.s.rx1_eq = 0;
  351. csr_wr(CVMX_GSERX_DLMX_RX_EQ(qlm, 0), rx_eq.u64);
  352. /* 1. Write GSER0_DLM0_REF_USE_PAD[REF_USE_PAD] = 1 (to select
  353. * reference-clock input)
  354. * The documentation for this register in the HRM is useless since
  355. * it says it selects between two different clocks that are not
  356. * documented anywhere. What it really does is select between
  357. * DLM0_REF_CLK_[P,N] if 1 and DLMC_REF_CLK[0,1]_[P,N] if 0.
  358. *
  359. * This register must be 0 for DLMs 1 and 2 and can only be 1 for
  360. * DLM 0.
  361. */
  362. csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(0, 0), ((ref_clk_input == 2) && (qlm == 0)) ? 1 : 0);
  363. /* Reference clock was already chosen before we got here */
  364. /* 2. Write GSER0_DLM0_REFCLK_SEL[REFCLK_SEL] if required for
  365. * reference-clock selection.
  366. *
  367. * If GSERX_DLMX_REF_USE_PAD is 1 then this register is ignored.
  368. */
  369. csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(0, 0), ref_clk_input & 1);
  370. /* Reference clock was already chosen before we got here */
  371. /* 3. If required, write GSER0_DLM0_REF_CLKDIV2[REF_CLKDIV2] (must be
  372. * set if reference clock > 100 MHz)
  373. */
  374. /* Apply workaround for Errata (G-20669) MPLL may not come up. */
  375. ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
  376. if (gmx_ref_clk == 100)
  377. ref_clkdiv2.s.ref_clkdiv2 = 0;
  378. else
  379. ref_clkdiv2.s.ref_clkdiv2 = 1;
  380. csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
  381. /* 1. Ensure GSER(0)_DLM(0..2)_PHY_RESET[PHY_RESET] is set. */
  382. dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
  383. dlmx_phy_reset.s.phy_reset = 1;
  384. csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
  385. /* 2. If SGMII or QSGMII or RXAUI (i.e. if DLM0) set
  386. * GSER(0)_DLM(0)_MPLL_EN[MPLL_EN] to one.
  387. */
  388. /* 7. Set GSER0_DLM0_MPLL_EN[MPLL_EN] = 1 */
  389. dlmx_mpll_en.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_EN(0, 0));
  390. dlmx_mpll_en.s.mpll_en = 1;
  391. csr_wr(CVMX_GSERX_DLMX_MPLL_EN(0, 0), dlmx_mpll_en.u64);
  392. /* 3. Set GSER(0)_DLM(0..2)_MPLL_MULTIPLIER[MPLL_MULTIPLIER]
  393. * to the value in the preceding table, which is different
  394. * than the desired setting prescribed by the HRM.
  395. */
  396. mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
  397. if (gmx_ref_clk == 100)
  398. mpll_multiplier.s.mpll_multiplier = 35;
  399. else if (gmx_ref_clk == 125)
  400. mpll_multiplier.s.mpll_multiplier = 56;
  401. else
  402. mpll_multiplier.s.mpll_multiplier = 45;
  403. debug("%s: Setting mpll multiplier to %u for DLM%d, baud %d, clock rate %uMHz\n",
  404. __func__, mpll_multiplier.s.mpll_multiplier, qlm, baud_mhz, gmx_ref_clk);
  405. csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
  406. /* 5. Clear GSER0_DLM0_TEST_POWERDOWN[TEST_POWERDOWN] */
  407. dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
  408. dlmx_test_powerdown.s.test_powerdown = 0;
  409. csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
  410. /* 6. Set GSER0_DLM0_REF_SSP_EN[REF_SSP_EN] = 1 */
  411. dlmx_ref_ssp_en.u64 = csr_rd(CVMX_GSERX_DLMX_REF_SSP_EN(qlm, 0));
  412. dlmx_ref_ssp_en.s.ref_ssp_en = 1;
  413. csr_wr(CVMX_GSERX_DLMX_REF_SSP_EN(0, 0), dlmx_ref_ssp_en.u64);
  414. /* 8. Clear GSER0_DLM0_PHY_RESET[PHY_RESET] = 0 */
  415. dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
  416. dlmx_phy_reset.s.phy_reset = 0;
  417. csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
  418. /* 5. If PCIe or SATA (i.e. if DLM1 or DLM2), set both MPLL_EN
  419. * and MPLL_EN_OVRD to one in GSER(0)_PHY(1..2)_OVRD_IN_LO.
  420. */
  421. /* 6. Decrease MPLL_MULTIPLIER by one continually until it
  422. * reaches the desired long-term setting, ensuring that each
  423. * MPLL_MULTIPLIER value is constant for at least 1 msec before
  424. * changing to the next value. The desired long-term setting is
  425. * as indicated in HRM tables 21-1, 21-2, and 21-3. This is not
  426. * required with the HRM sequence.
  427. */
  428. mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
  429. __cvmx_qlm_set_mult(qlm, baud_mhz, mpll_multiplier.s.mpll_multiplier);
  430. /* 9. Poll until the MPLL locks. Wait for
  431. * GSER0_DLM0_MPLL_STATUS[MPLL_STATUS] = 1
  432. */
  433. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_MPLL_STATUS(qlm, 0),
  434. cvmx_gserx_dlmx_mpll_status_t, mpll_status, ==, 1, 10000)) {
  435. printf("PLL for DLM%d failed to lock\n", qlm);
  436. return -1;
  437. }
  438. return 0;
  439. }
  440. static int __dlm0_setup_tx_cn70xx(int speed, int ref_clk_sel)
  441. {
  442. int need0, need1;
  443. cvmx_gmxx_inf_mode_t mode0, mode1;
  444. cvmx_gserx_dlmx_tx_rate_t rate;
  445. cvmx_gserx_dlmx_tx_en_t en;
  446. cvmx_gserx_dlmx_tx_cm_en_t cm_en;
  447. cvmx_gserx_dlmx_tx_data_en_t data_en;
  448. cvmx_gserx_dlmx_tx_reset_t tx_reset;
  449. debug("%s(%d, %d)\n", __func__, speed, ref_clk_sel);
  450. mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
  451. mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
  452. /* Which lanes do we need? */
  453. need0 = (mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED);
  454. need1 = (mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED) ||
  455. (mode0.s.mode == CVMX_GMX_INF_MODE_RXAUI);
  456. /* 1. Write GSER0_DLM0_TX_RATE[TXn_RATE] (Set according to required
  457. * data rate (see Table 21-1).
  458. */
  459. rate.u64 = csr_rd(CVMX_GSERX_DLMX_TX_RATE(0, 0));
  460. debug("%s: speed: %d\n", __func__, speed);
  461. switch (speed) {
  462. case 1250:
  463. case 2500:
  464. switch (ref_clk_sel) {
  465. case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
  466. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  467. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  468. rate.s.tx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
  469. rate.s.tx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
  470. break;
  471. default:
  472. printf("Invalid reference clock select %d\n", ref_clk_sel);
  473. return -1;
  474. }
  475. break;
  476. case 3125:
  477. switch (ref_clk_sel) {
  478. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  479. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  480. rate.s.tx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
  481. rate.s.tx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
  482. break;
  483. default:
  484. printf("Invalid reference clock select %d\n", ref_clk_sel);
  485. return -1;
  486. }
  487. break;
  488. case 5000: /* QSGMII only */
  489. switch (ref_clk_sel) {
  490. case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
  491. rate.s.tx0_rate = 0;
  492. rate.s.tx1_rate = 0;
  493. break;
  494. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  495. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  496. rate.s.tx0_rate = 0;
  497. rate.s.tx1_rate = 0;
  498. break;
  499. default:
  500. printf("Invalid reference clock select %d\n", ref_clk_sel);
  501. return -1;
  502. }
  503. break;
  504. case 6250:
  505. switch (ref_clk_sel) {
  506. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  507. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  508. rate.s.tx0_rate = 0;
  509. rate.s.tx1_rate = 0;
  510. break;
  511. default:
  512. printf("Invalid reference clock select %d\n", ref_clk_sel);
  513. return -1;
  514. }
  515. break;
  516. default:
  517. printf("%s: Invalid rate %d\n", __func__, speed);
  518. return -1;
  519. }
  520. debug("%s: tx 0 rate: %d, tx 1 rate: %d\n", __func__, rate.s.tx0_rate, rate.s.tx1_rate);
  521. csr_wr(CVMX_GSERX_DLMX_TX_RATE(0, 0), rate.u64);
  522. /* 2. Set GSER0_DLM0_TX_EN[TXn_EN] = 1 */
  523. en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_EN(0, 0));
  524. en.s.tx0_en = need0;
  525. en.s.tx1_en = need1;
  526. csr_wr(CVMX_GSERX_DLMX_TX_EN(0, 0), en.u64);
  527. /* 3 set GSER0_DLM0_TX_CM_EN[TXn_CM_EN] = 1 */
  528. cm_en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_CM_EN(0, 0));
  529. cm_en.s.tx0_cm_en = need0;
  530. cm_en.s.tx1_cm_en = need1;
  531. csr_wr(CVMX_GSERX_DLMX_TX_CM_EN(0, 0), cm_en.u64);
  532. /* 4. Set GSER0_DLM0_TX_DATA_EN[TXn_DATA_EN] = 1 */
  533. data_en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_DATA_EN(0, 0));
  534. data_en.s.tx0_data_en = need0;
  535. data_en.s.tx1_data_en = need1;
  536. csr_wr(CVMX_GSERX_DLMX_TX_DATA_EN(0, 0), data_en.u64);
  537. /* 5. Clear GSER0_DLM0_TX_RESET[TXn_DATA_EN] = 0 */
  538. tx_reset.u64 = csr_rd(CVMX_GSERX_DLMX_TX_RESET(0, 0));
  539. tx_reset.s.tx0_reset = !need0;
  540. tx_reset.s.tx1_reset = !need1;
  541. csr_wr(CVMX_GSERX_DLMX_TX_RESET(0, 0), tx_reset.u64);
  542. /* 6. Poll GSER0_DLM0_TX_STATUS[TXn_STATUS, TXn_CM_STATUS] until both
  543. * are set to 1. This prevents GMX from transmitting until the DLM
  544. * is ready.
  545. */
  546. if (need0) {
  547. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
  548. cvmx_gserx_dlmx_tx_status_t, tx0_status, ==, 1, 10000)) {
  549. printf("DLM0 TX0 status fail\n");
  550. return -1;
  551. }
  552. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
  553. cvmx_gserx_dlmx_tx_status_t, tx0_cm_status, ==, 1,
  554. 10000)) {
  555. printf("DLM0 TX0 CM status fail\n");
  556. return -1;
  557. }
  558. }
  559. if (need1) {
  560. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
  561. cvmx_gserx_dlmx_tx_status_t, tx1_status, ==, 1, 10000)) {
  562. printf("DLM0 TX1 status fail\n");
  563. return -1;
  564. }
  565. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
  566. cvmx_gserx_dlmx_tx_status_t, tx1_cm_status, ==, 1,
  567. 10000)) {
  568. printf("DLM0 TX1 CM status fail\n");
  569. return -1;
  570. }
  571. }
  572. return 0;
  573. }
  574. static int __dlm0_setup_rx_cn70xx(int speed, int ref_clk_sel)
  575. {
  576. int need0, need1;
  577. cvmx_gmxx_inf_mode_t mode0, mode1;
  578. cvmx_gserx_dlmx_rx_rate_t rate;
  579. cvmx_gserx_dlmx_rx_pll_en_t pll_en;
  580. cvmx_gserx_dlmx_rx_data_en_t data_en;
  581. cvmx_gserx_dlmx_rx_reset_t rx_reset;
  582. debug("%s(%d, %d)\n", __func__, speed, ref_clk_sel);
  583. mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
  584. mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
  585. /* Which lanes do we need? */
  586. need0 = (mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED);
  587. need1 = (mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED) ||
  588. (mode0.s.mode == CVMX_GMX_INF_MODE_RXAUI);
  589. /* 1. Write GSER0_DLM0_RX_RATE[RXn_RATE] (must match the
  590. * GER0_DLM0_TX_RATE[TXn_RATE] setting).
  591. */
  592. rate.u64 = csr_rd(CVMX_GSERX_DLMX_RX_RATE(0, 0));
  593. switch (speed) {
  594. case 1250:
  595. case 2500:
  596. switch (ref_clk_sel) {
  597. case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
  598. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  599. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  600. rate.s.rx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
  601. rate.s.rx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
  602. break;
  603. default:
  604. printf("Invalid reference clock select %d\n", ref_clk_sel);
  605. return -1;
  606. }
  607. break;
  608. case 3125:
  609. switch (ref_clk_sel) {
  610. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  611. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  612. rate.s.rx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
  613. rate.s.rx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
  614. break;
  615. default:
  616. printf("Invalid reference clock select %d\n", ref_clk_sel);
  617. return -1;
  618. }
  619. break;
  620. case 5000: /* QSGMII only */
  621. switch (ref_clk_sel) {
  622. case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
  623. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  624. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  625. rate.s.rx0_rate = 0;
  626. rate.s.rx1_rate = 0;
  627. break;
  628. default:
  629. printf("Invalid reference clock select %d\n", ref_clk_sel);
  630. return -1;
  631. }
  632. break;
  633. case 6250:
  634. switch (ref_clk_sel) {
  635. case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
  636. case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
  637. rate.s.rx0_rate = 0;
  638. rate.s.rx1_rate = 0;
  639. break;
  640. default:
  641. printf("Invalid reference clock select %d\n", ref_clk_sel);
  642. return -1;
  643. }
  644. break;
  645. default:
  646. printf("%s: Invalid rate %d\n", __func__, speed);
  647. return -1;
  648. }
  649. debug("%s: rx 0 rate: %d, rx 1 rate: %d\n", __func__, rate.s.rx0_rate, rate.s.rx1_rate);
  650. csr_wr(CVMX_GSERX_DLMX_RX_RATE(0, 0), rate.u64);
  651. /* 2. Set GSER0_DLM0_RX_PLL_EN[RXn_PLL_EN] = 1 */
  652. pll_en.u64 = csr_rd(CVMX_GSERX_DLMX_RX_PLL_EN(0, 0));
  653. pll_en.s.rx0_pll_en = need0;
  654. pll_en.s.rx1_pll_en = need1;
  655. csr_wr(CVMX_GSERX_DLMX_RX_PLL_EN(0, 0), pll_en.u64);
  656. /* 3. Set GSER0_DLM0_RX_DATA_EN[RXn_DATA_EN] = 1 */
  657. data_en.u64 = csr_rd(CVMX_GSERX_DLMX_RX_DATA_EN(0, 0));
  658. data_en.s.rx0_data_en = need0;
  659. data_en.s.rx1_data_en = need1;
  660. csr_wr(CVMX_GSERX_DLMX_RX_DATA_EN(0, 0), data_en.u64);
  661. /* 4. Clear GSER0_DLM0_RX_RESET[RXn_DATA_EN] = 0. Now the GMX can be
  662. * enabled: set GMX(0..1)_INF_MODE[EN] = 1
  663. */
  664. rx_reset.u64 = csr_rd(CVMX_GSERX_DLMX_RX_RESET(0, 0));
  665. rx_reset.s.rx0_reset = !need0;
  666. rx_reset.s.rx1_reset = !need1;
  667. csr_wr(CVMX_GSERX_DLMX_RX_RESET(0, 0), rx_reset.u64);
  668. return 0;
  669. }
  670. static int a_clk;
  671. static int __dlm2_sata_uctl_init_cn70xx(void)
  672. {
  673. cvmx_sata_uctl_ctl_t uctl_ctl;
  674. const int MAX_A_CLK = 333000000; /* Max of 333Mhz */
  675. int divisor, a_clkdiv;
  676. /* Wait for all voltages to reach a stable stable. Ensure the
  677. * reference clock is up and stable.
  678. */
  679. /* 2. Wait for IOI reset to deassert. */
  680. /* 3. Optionally program the GPIO CSRs for SATA features.
  681. * a. For cold-presence detect:
  682. * i. Select a GPIO for the input and program GPIO_SATA_CTL[sel]
  683. * for port0 and port1.
  684. * ii. Select a GPIO for the output and program
  685. * GPIO_BIT_CFG*[OUTPUT_SEL] for port0 and port1.
  686. * b. For mechanical-presence detect, select a GPIO for the input
  687. * and program GPIO_SATA_CTL[SEL] for port0/port1.
  688. * c. For LED activity, select a GPIO for the output and program
  689. * GPIO_BIT_CFG*[OUTPUT_SEL] for port0/port1.
  690. */
  691. /* 4. Assert all resets:
  692. * a. UAHC reset: SATA_UCTL_CTL[UAHC_RST] = 1
  693. * a. UCTL reset: SATA_UCTL_CTL[UCTL_RST] = 1
  694. */
  695. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  696. uctl_ctl.s.sata_uahc_rst = 1;
  697. uctl_ctl.s.sata_uctl_rst = 1;
  698. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  699. /* 5. Configure the ACLK:
  700. * a. Reset the clock dividers: SATA_UCTL_CTL[A_CLKDIV_RST] = 1.
  701. * b. Select the ACLK frequency (400 MHz maximum)
  702. * i. SATA_UCTL_CTL[A_CLKDIV] = desired value,
  703. * ii. SATA_UCTL_CTL[A_CLKDIV_EN] = 1 to enable the ACLK,
  704. * c. Deassert the ACLK clock divider reset:
  705. * SATA_UCTL_CTL[A_CLKDIV_RST] = 0
  706. */
  707. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  708. uctl_ctl.s.a_clkdiv_rst = 1;
  709. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  710. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  711. divisor = (gd->bus_clk + MAX_A_CLK - 1) / MAX_A_CLK;
  712. if (divisor <= 4) {
  713. a_clkdiv = divisor - 1;
  714. } else if (divisor <= 6) {
  715. a_clkdiv = 4;
  716. divisor = 6;
  717. } else if (divisor <= 8) {
  718. a_clkdiv = 5;
  719. divisor = 8;
  720. } else if (divisor <= 16) {
  721. a_clkdiv = 6;
  722. divisor = 16;
  723. } else if (divisor <= 24) {
  724. a_clkdiv = 7;
  725. divisor = 24;
  726. } else {
  727. printf("Unable to determine SATA clock divisor\n");
  728. return -1;
  729. }
  730. /* Calculate the final clock rate */
  731. a_clk = gd->bus_clk / divisor;
  732. uctl_ctl.s.a_clkdiv_sel = a_clkdiv;
  733. uctl_ctl.s.a_clk_en = 1;
  734. uctl_ctl.s.a_clk_byp_sel = 0;
  735. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  736. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  737. uctl_ctl.s.a_clkdiv_rst = 0;
  738. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  739. udelay(1);
  740. return 0;
  741. }
  742. static int __sata_dlm_init_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
  743. {
  744. cvmx_gserx_sata_cfg_t sata_cfg;
  745. cvmx_gserx_sata_lane_rst_t sata_lane_rst;
  746. cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
  747. cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
  748. cvmx_gserx_sata_ref_ssp_en_t ref_ssp_en;
  749. cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
  750. cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
  751. cvmx_sata_uctl_shim_cfg_t shim_cfg;
  752. cvmx_gserx_phyx_ovrd_in_lo_t ovrd_in;
  753. cvmx_sata_uctl_ctl_t uctl_ctl;
  754. int sata_ref_clk;
  755. debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
  756. switch (ref_clk_sel) {
  757. case 0:
  758. sata_ref_clk = 100;
  759. break;
  760. case 1:
  761. sata_ref_clk = 125;
  762. break;
  763. case 2:
  764. sata_ref_clk = 156;
  765. break;
  766. default:
  767. printf("%s: Invalid reference clock select %d for qlm %d\n", __func__,
  768. ref_clk_sel, qlm);
  769. return -1;
  770. }
  771. /* 5. Set GSERX0_SATA_CFG[SATA_EN] = 1 to configure DLM2 multiplexing.
  772. */
  773. sata_cfg.u64 = csr_rd(CVMX_GSERX_SATA_CFG(0));
  774. sata_cfg.s.sata_en = 1;
  775. csr_wr(CVMX_GSERX_SATA_CFG(0), sata_cfg.u64);
  776. /* 1. Write GSER(0)_DLM2_REFCLK_SEL[REFCLK_SEL] if required for
  777. * reference-clock selection.
  778. */
  779. if (ref_clk_input < 2) {
  780. csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(qlm, 0), ref_clk_input);
  781. csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(qlm, 0), 0);
  782. } else {
  783. csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(qlm, 0), 1);
  784. }
  785. ref_ssp_en.u64 = csr_rd(CVMX_GSERX_SATA_REF_SSP_EN(0));
  786. ref_ssp_en.s.ref_ssp_en = 1;
  787. csr_wr(CVMX_GSERX_SATA_REF_SSP_EN(0), ref_ssp_en.u64);
  788. /* Apply workaround for Errata (G-20669) MPLL may not come up. */
  789. /* Set REF_CLKDIV2 based on the Ref Clock */
  790. ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
  791. if (sata_ref_clk == 100)
  792. ref_clkdiv2.s.ref_clkdiv2 = 0;
  793. else
  794. ref_clkdiv2.s.ref_clkdiv2 = 1;
  795. csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
  796. /* 1. Ensure GSER(0)_DLM(0..2)_PHY_RESET[PHY_RESET] is set. */
  797. dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
  798. dlmx_phy_reset.s.phy_reset = 1;
  799. csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
  800. /* 2. If SGMII or QSGMII or RXAUI (i.e. if DLM0) set
  801. * GSER(0)_DLM(0)_MPLL_EN[MPLL_EN] to one.
  802. */
  803. /* 3. Set GSER(0)_DLM(0..2)_MPLL_MULTIPLIER[MPLL_MULTIPLIER]
  804. * to the value in the preceding table, which is different
  805. * than the desired setting prescribed by the HRM.
  806. */
  807. mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
  808. if (sata_ref_clk == 100)
  809. mpll_multiplier.s.mpll_multiplier = 35;
  810. else
  811. mpll_multiplier.s.mpll_multiplier = 56;
  812. csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
  813. /* 3. Clear GSER0_DLM2_TEST_POWERDOWN[TEST_POWERDOWN] = 0 */
  814. dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
  815. dlmx_test_powerdown.s.test_powerdown = 0;
  816. csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
  817. /* 4. Clear either/both lane0 and lane1 resets:
  818. * GSER0_SATA_LANE_RST[L0_RST, L1_RST] = 0.
  819. */
  820. sata_lane_rst.u64 = csr_rd(CVMX_GSERX_SATA_LANE_RST(0));
  821. sata_lane_rst.s.l0_rst = 0;
  822. sata_lane_rst.s.l1_rst = 0;
  823. csr_wr(CVMX_GSERX_SATA_LANE_RST(0), sata_lane_rst.u64);
  824. udelay(1);
  825. /* 5. Clear GSER0_DLM2_PHY_RESET */
  826. dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
  827. dlmx_phy_reset.s.phy_reset = 0;
  828. csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
  829. /* 6. If PCIe or SATA (i.e. if DLM1 or DLM2), set both MPLL_EN
  830. * and MPLL_EN_OVRD to one in GSER(0)_PHY(1..2)_OVRD_IN_LO.
  831. */
  832. ovrd_in.u64 = csr_rd(CVMX_GSERX_PHYX_OVRD_IN_LO(qlm, 0));
  833. ovrd_in.s.mpll_en = 1;
  834. ovrd_in.s.mpll_en_ovrd = 1;
  835. csr_wr(CVMX_GSERX_PHYX_OVRD_IN_LO(qlm, 0), ovrd_in.u64);
  836. /* 7. Decrease MPLL_MULTIPLIER by one continually until it reaches
  837. * the desired long-term setting, ensuring that each MPLL_MULTIPLIER
  838. * value is constant for at least 1 msec before changing to the next
  839. * value. The desired long-term setting is as indicated in HRM tables
  840. * 21-1, 21-2, and 21-3. This is not required with the HRM
  841. * sequence.
  842. */
  843. mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
  844. if (sata_ref_clk == 100)
  845. mpll_multiplier.s.mpll_multiplier = 0x1e;
  846. else
  847. mpll_multiplier.s.mpll_multiplier = 0x30;
  848. csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
  849. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_MPLL_STATUS(qlm, 0),
  850. cvmx_gserx_dlmx_mpll_status_t, mpll_status, ==, 1, 10000)) {
  851. printf("ERROR: SATA MPLL failed to set\n");
  852. return -1;
  853. }
  854. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_RX_STATUS(qlm, 0), cvmx_gserx_dlmx_rx_status_t,
  855. rx0_status, ==, 1, 10000)) {
  856. printf("ERROR: SATA RX0_STATUS failed to set\n");
  857. return -1;
  858. }
  859. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_RX_STATUS(qlm, 0), cvmx_gserx_dlmx_rx_status_t,
  860. rx1_status, ==, 1, 10000)) {
  861. printf("ERROR: SATA RX1_STATUS failed to set\n");
  862. return -1;
  863. }
  864. /* 8. Deassert UCTL and UAHC resets:
  865. * a. SATA_UCTL_CTL[UCTL_RST] = 0
  866. * b. SATA_UCTL_CTL[UAHC_RST] = 0
  867. * c. Wait 10 ACLK cycles before accessing any ACLK-only registers.
  868. */
  869. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  870. uctl_ctl.s.sata_uctl_rst = 0;
  871. uctl_ctl.s.sata_uahc_rst = 0;
  872. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  873. udelay(1);
  874. /* 9. Enable conditional SCLK of UCTL by writing
  875. * SATA_UCTL_CTL[CSCLK_EN] = 1
  876. */
  877. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  878. uctl_ctl.s.csclk_en = 1;
  879. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  880. /* 10. Initialize UAHC as described in the AHCI Specification (UAHC_*
  881. * registers
  882. */
  883. /* set-up endian mode */
  884. shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
  885. shim_cfg.s.dma_endian_mode = 1;
  886. shim_cfg.s.csr_endian_mode = 3;
  887. csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
  888. return 0;
  889. }
  890. /**
  891. * Initializes DLM 4 for SATA
  892. *
  893. * @param qlm Must be 4.
  894. * @param baud_mhz Baud rate for SATA
  895. * @param ref_clk_sel Selects the speed of the reference clock where:
  896. * 0 = 100MHz, 1 = 125MHz and 2 = 156.25MHz
  897. * @param ref_clk_input Reference clock input where 0 = external QLM clock,
  898. * 1 = qlmc_ref_clk0 and 2 = qlmc_ref_clk1
  899. */
  900. static int __sata_dlm_init_cn73xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
  901. {
  902. cvmx_sata_uctl_shim_cfg_t shim_cfg;
  903. cvmx_gserx_refclk_sel_t refclk_sel;
  904. cvmx_gserx_phy_ctl_t phy_ctl;
  905. cvmx_gserx_rx_pwr_ctrl_p2_t pwr_ctrl_p2;
  906. cvmx_gserx_lanex_misc_cfg_0_t misc_cfg_0;
  907. cvmx_gserx_sata_lane_rst_t lane_rst;
  908. cvmx_gserx_pll_px_mode_0_t pmode_0;
  909. cvmx_gserx_pll_px_mode_1_t pmode_1;
  910. cvmx_gserx_lane_px_mode_0_t lane_pmode_0;
  911. cvmx_gserx_lane_px_mode_1_t lane_pmode_1;
  912. cvmx_gserx_cfg_t gserx_cfg;
  913. cvmx_sata_uctl_ctl_t uctl_ctl;
  914. int l;
  915. int i;
  916. /*
  917. * 1. Configure the SATA
  918. */
  919. /*
  920. * 2. Configure the QLM Reference clock
  921. * Set GSERX_REFCLK_SEL.COM_CLK_SEL to source reference clock
  922. * from the external clock mux.
  923. * GSERX_REFCLK_SEL.USE_COM1 to select qlmc_refclkn/p_1 or
  924. * leave clear to select qlmc_refclkn/p_0
  925. */
  926. refclk_sel.u64 = 0;
  927. if (ref_clk_input == 0) { /* External ref clock */
  928. refclk_sel.s.com_clk_sel = 0;
  929. refclk_sel.s.use_com1 = 0;
  930. } else if (ref_clk_input == 1) { /* Common reference clock 0 */
  931. refclk_sel.s.com_clk_sel = 1;
  932. refclk_sel.s.use_com1 = 0;
  933. } else { /* Common reference clock 1 */
  934. refclk_sel.s.com_clk_sel = 1;
  935. refclk_sel.s.use_com1 = 1;
  936. }
  937. if (ref_clk_sel != 0) {
  938. printf("Wrong reference clock selected for QLM4\n");
  939. return -1;
  940. }
  941. csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  942. /* Reset the QLM after changing the reference clock */
  943. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  944. phy_ctl.s.phy_reset = 1;
  945. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  946. udelay(1);
  947. /*
  948. * 3. Configure the QLM for SATA mode set GSERX_CFG.SATA
  949. */
  950. gserx_cfg.u64 = 0;
  951. gserx_cfg.s.sata = 1;
  952. csr_wr(CVMX_GSERX_CFG(qlm), gserx_cfg.u64);
  953. /*
  954. * 12. Clear the appropriate lane resets
  955. * clear GSERX_SATA_LANE_RST.LX_RST where X is the lane number 0-1.
  956. */
  957. lane_rst.u64 = csr_rd(CVMX_GSERX_SATA_LANE_RST(qlm));
  958. lane_rst.s.l0_rst = 0;
  959. lane_rst.s.l1_rst = 0;
  960. csr_wr(CVMX_GSERX_SATA_LANE_RST(qlm), lane_rst.u64);
  961. csr_rd(CVMX_GSERX_SATA_LANE_RST(qlm));
  962. udelay(1);
  963. /*
  964. * 4. Take the PHY out of reset
  965. * Write GSERX_PHY_CTL.PHY_RESET to a zero
  966. */
  967. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  968. phy_ctl.s.phy_reset = 0;
  969. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  970. /* Wait for reset to complete and the PLL to lock */
  971. /* PCIe mode doesn't become ready until the PEM block attempts to bring
  972. * the interface up. Skip this check for PCIe
  973. */
  974. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
  975. rst_rdy, ==, 1, 10000)) {
  976. printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
  977. return -1;
  978. }
  979. /* Workaround for errata GSER-30310: SATA HDD Not Ready due to
  980. * PHY SDLL/LDLL lockup at 3GHz
  981. */
  982. for (i = 0; i < 2; i++) {
  983. cvmx_gserx_slicex_pcie1_mode_t pcie1;
  984. cvmx_gserx_slicex_pcie2_mode_t pcie2;
  985. cvmx_gserx_slicex_pcie3_mode_t pcie3;
  986. pcie1.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE1_MODE(i, qlm));
  987. pcie1.s.rx_pi_bwsel = 1;
  988. pcie1.s.rx_ldll_bwsel = 1;
  989. pcie1.s.rx_sdll_bwsel = 1;
  990. csr_wr(CVMX_GSERX_SLICEX_PCIE1_MODE(i, qlm), pcie1.u64);
  991. pcie2.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE2_MODE(i, qlm));
  992. pcie2.s.rx_pi_bwsel = 1;
  993. pcie2.s.rx_ldll_bwsel = 1;
  994. pcie2.s.rx_sdll_bwsel = 1;
  995. csr_wr(CVMX_GSERX_SLICEX_PCIE2_MODE(i, qlm), pcie2.u64);
  996. pcie3.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE3_MODE(i, qlm));
  997. pcie3.s.rx_pi_bwsel = 1;
  998. pcie3.s.rx_ldll_bwsel = 1;
  999. pcie3.s.rx_sdll_bwsel = 1;
  1000. csr_wr(CVMX_GSERX_SLICEX_PCIE3_MODE(i, qlm), pcie3.u64);
  1001. }
  1002. /*
  1003. * 7. Change P2 termination
  1004. * Clear GSERX_RX_PWR_CTRL_P2.P2_RX_SUBBLK_PD[0] (Termination)
  1005. */
  1006. pwr_ctrl_p2.u64 = csr_rd(CVMX_GSERX_RX_PWR_CTRL_P2(qlm));
  1007. pwr_ctrl_p2.s.p2_rx_subblk_pd &= 0x1e;
  1008. csr_wr(CVMX_GSERX_RX_PWR_CTRL_P2(qlm), pwr_ctrl_p2.u64);
  1009. /*
  1010. * 8. Modify the Electrical IDLE Detect on delay
  1011. * Change GSERX_LANE(0..3)_MISC_CFG_0.EIE_DET_STL_ON_TIME to a 0x4
  1012. */
  1013. for (i = 0; i < 2; i++) {
  1014. misc_cfg_0.u64 = csr_rd(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm));
  1015. misc_cfg_0.s.eie_det_stl_on_time = 4;
  1016. csr_wr(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm), misc_cfg_0.u64);
  1017. }
  1018. /*
  1019. * 9. Modify the PLL and Lane Protocol Mode registers to configure
  1020. * the PHY for SATA.
  1021. * (Configure all 3 PLLs, doesn't matter what speed it is configured)
  1022. */
  1023. /* Errata (GSER-26724) SATA never indicates GSER QLM_STAT[RST_RDY]
  1024. * We program PLL_PX_MODE_0 last due to this errata
  1025. */
  1026. for (l = 0; l < 3; l++) {
  1027. pmode_1.u64 = csr_rd(CVMX_GSERX_PLL_PX_MODE_1(l, qlm));
  1028. lane_pmode_0.u64 = csr_rd(CVMX_GSERX_LANE_PX_MODE_0(l, qlm));
  1029. lane_pmode_1.u64 = csr_rd(CVMX_GSERX_LANE_PX_MODE_1(l, qlm));
  1030. pmode_1.s.pll_cpadj = 0x2;
  1031. pmode_1.s.pll_opr = 0x0;
  1032. pmode_1.s.pll_div = 0x1e;
  1033. pmode_1.s.pll_pcie3en = 0x0;
  1034. pmode_1.s.pll_16p5en = 0x0;
  1035. lane_pmode_0.s.ctle = 0x0;
  1036. lane_pmode_0.s.pcie = 0x0;
  1037. lane_pmode_0.s.tx_ldiv = 0x0;
  1038. lane_pmode_0.s.srate = 0;
  1039. lane_pmode_0.s.tx_mode = 0x3;
  1040. lane_pmode_0.s.rx_mode = 0x3;
  1041. lane_pmode_1.s.vma_mm = 1;
  1042. lane_pmode_1.s.vma_fine_cfg_sel = 0;
  1043. lane_pmode_1.s.cdr_fgain = 0xa;
  1044. lane_pmode_1.s.ph_acc_adj = 0x15;
  1045. if (l == R_2_5G_REFCLK100)
  1046. lane_pmode_0.s.rx_ldiv = 0x2;
  1047. else if (l == R_5G_REFCLK100)
  1048. lane_pmode_0.s.rx_ldiv = 0x1;
  1049. else
  1050. lane_pmode_0.s.rx_ldiv = 0x0;
  1051. csr_wr(CVMX_GSERX_PLL_PX_MODE_1(l, qlm), pmode_1.u64);
  1052. csr_wr(CVMX_GSERX_LANE_PX_MODE_0(l, qlm), lane_pmode_0.u64);
  1053. csr_wr(CVMX_GSERX_LANE_PX_MODE_1(l, qlm), lane_pmode_1.u64);
  1054. }
  1055. for (l = 0; l < 3; l++) {
  1056. pmode_0.u64 = csr_rd(CVMX_GSERX_PLL_PX_MODE_0(l, qlm));
  1057. pmode_0.s.pll_icp = 0x1;
  1058. pmode_0.s.pll_rloop = 0x3;
  1059. pmode_0.s.pll_pcs_div = 0x5;
  1060. csr_wr(CVMX_GSERX_PLL_PX_MODE_0(l, qlm), pmode_0.u64);
  1061. }
  1062. for (i = 0; i < 2; i++) {
  1063. cvmx_gserx_slicex_rx_sdll_ctrl_t rx_sdll;
  1064. rx_sdll.u64 = csr_rd(CVMX_GSERX_SLICEX_RX_SDLL_CTRL(i, qlm));
  1065. rx_sdll.s.pcs_sds_oob_clk_ctrl = 2;
  1066. rx_sdll.s.pcs_sds_rx_sdll_tune = 0;
  1067. rx_sdll.s.pcs_sds_rx_sdll_swsel = 0;
  1068. csr_wr(CVMX_GSERX_SLICEX_RX_SDLL_CTRL(i, qlm), rx_sdll.u64);
  1069. }
  1070. for (i = 0; i < 2; i++) {
  1071. cvmx_gserx_lanex_misc_cfg_0_t misc_cfg;
  1072. misc_cfg.u64 = csr_rd(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm));
  1073. misc_cfg.s.use_pma_polarity = 0;
  1074. misc_cfg.s.cfg_pcs_loopback = 0;
  1075. misc_cfg.s.pcs_tx_mode_ovrrd_en = 0;
  1076. misc_cfg.s.pcs_rx_mode_ovrrd_en = 0;
  1077. misc_cfg.s.cfg_eie_det_cnt = 0;
  1078. misc_cfg.s.eie_det_stl_on_time = 4;
  1079. misc_cfg.s.eie_det_stl_off_time = 0;
  1080. misc_cfg.s.tx_bit_order = 1;
  1081. misc_cfg.s.rx_bit_order = 1;
  1082. csr_wr(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm), misc_cfg.u64);
  1083. }
  1084. /* Wait for reset to complete and the PLL to lock */
  1085. /* PCIe mode doesn't become ready until the PEM block attempts to bring
  1086. * the interface up. Skip this check for PCIe
  1087. */
  1088. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
  1089. rst_rdy, ==, 1, 10000)) {
  1090. printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
  1091. return -1;
  1092. }
  1093. /* Poll GSERX_SATA_STATUS for P0_RDY = 1 */
  1094. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_SATA_STATUS(qlm), cvmx_gserx_sata_status_t,
  1095. p0_rdy, ==, 1, 10000)) {
  1096. printf("QLM4: Timeout waiting for GSERX_SATA_STATUS[p0_rdy]\n");
  1097. return -1;
  1098. }
  1099. /* Poll GSERX_SATA_STATUS for P1_RDY = 1 */
  1100. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_SATA_STATUS(qlm), cvmx_gserx_sata_status_t,
  1101. p1_rdy, ==, 1, 10000)) {
  1102. printf("QLM4: Timeout waiting for GSERX_SATA_STATUS[p1_rdy]\n");
  1103. return -1;
  1104. }
  1105. udelay(2000);
  1106. /* 6. Deassert UCTL and UAHC resets:
  1107. * a. SATA_UCTL_CTL[UCTL_RST] = 0
  1108. * b. SATA_UCTL_CTL[UAHC_RST] = 0
  1109. * c. Wait 10 ACLK cycles before accessing any ACLK-only registers.
  1110. */
  1111. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  1112. uctl_ctl.s.sata_uctl_rst = 0;
  1113. uctl_ctl.s.sata_uahc_rst = 0;
  1114. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  1115. udelay(1);
  1116. /* 7. Enable conditional SCLK of UCTL by writing
  1117. * SATA_UCTL_CTL[CSCLK_EN] = 1
  1118. */
  1119. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  1120. uctl_ctl.s.csclk_en = 1;
  1121. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  1122. /* set-up endian mode */
  1123. shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
  1124. shim_cfg.s.dma_endian_mode = 1;
  1125. shim_cfg.s.csr_endian_mode = 3;
  1126. csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
  1127. return 0;
  1128. }
  1129. static int __dlm2_sata_uahc_init_cn70xx(int baud_mhz)
  1130. {
  1131. cvmx_sata_uahc_gbl_cap_t gbl_cap;
  1132. cvmx_sata_uahc_px_sctl_t sctl;
  1133. cvmx_sata_uahc_gbl_pi_t pi;
  1134. cvmx_sata_uahc_px_cmd_t cmd;
  1135. cvmx_sata_uahc_px_sctl_t sctl0, sctl1;
  1136. cvmx_sata_uahc_px_ssts_t ssts;
  1137. cvmx_sata_uahc_px_tfd_t tfd;
  1138. cvmx_sata_uahc_gbl_timer1ms_t gbl_timer1ms;
  1139. u64 done;
  1140. int result = -1;
  1141. int retry_count = 0;
  1142. int spd;
  1143. /* From the synopsis data book, SATA_UAHC_GBL_TIMER1MS is the
  1144. * AMBA clock in MHz * 1000, which is a_clk(Hz) / 1000
  1145. */
  1146. gbl_timer1ms.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_TIMER1MS);
  1147. gbl_timer1ms.s.timv = a_clk / 1000;
  1148. csr_wr32(CVMX_SATA_UAHC_GBL_TIMER1MS, gbl_timer1ms.u32);
  1149. gbl_timer1ms.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_TIMER1MS);
  1150. /* Set-u global capabilities reg (GBL_CAP) */
  1151. gbl_cap.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_CAP);
  1152. debug("%s: SATA_UAHC_GBL_CAP before: 0x%x\n", __func__, gbl_cap.u32);
  1153. gbl_cap.s.sss = 1;
  1154. gbl_cap.s.smps = 1;
  1155. csr_wr32(CVMX_SATA_UAHC_GBL_CAP, gbl_cap.u32);
  1156. gbl_cap.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_CAP);
  1157. debug("%s: SATA_UAHC_GBL_CAP after: 0x%x\n", __func__, gbl_cap.u32);
  1158. /* Set-up global hba control reg (interrupt enables) */
  1159. /* Set-up port SATA control registers (speed limitation) */
  1160. if (baud_mhz == 1500)
  1161. spd = 1;
  1162. else if (baud_mhz == 3000)
  1163. spd = 2;
  1164. else
  1165. spd = 3;
  1166. sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
  1167. debug("%s: SATA_UAHC_P0_SCTL before: 0x%x\n", __func__, sctl.u32);
  1168. sctl.s.spd = spd;
  1169. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl.u32);
  1170. sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
  1171. debug("%s: SATA_UAHC_P0_SCTL after: 0x%x\n", __func__, sctl.u32);
  1172. sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
  1173. debug("%s: SATA_UAHC_P1_SCTL before: 0x%x\n", __func__, sctl.u32);
  1174. sctl.s.spd = spd;
  1175. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl.u32);
  1176. sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
  1177. debug("%s: SATA_UAHC_P1_SCTL after: 0x%x\n", __func__, sctl.u32);
  1178. /* Set-up ports implemented reg. */
  1179. pi.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_PI);
  1180. debug("%s: SATA_UAHC_GBL_PI before: 0x%x\n", __func__, pi.u32);
  1181. pi.s.pi = 3;
  1182. csr_wr32(CVMX_SATA_UAHC_GBL_PI, pi.u32);
  1183. pi.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_PI);
  1184. debug("%s: SATA_UAHC_GBL_PI after: 0x%x\n", __func__, pi.u32);
  1185. retry0:
  1186. /* Clear port SERR and IS registers */
  1187. csr_wr32(CVMX_SATA_UAHC_PX_SERR(0), csr_rd32(CVMX_SATA_UAHC_PX_SERR(0)));
  1188. csr_wr32(CVMX_SATA_UAHC_PX_IS(0), csr_rd32(CVMX_SATA_UAHC_PX_IS(0)));
  1189. /* Set spin-up, power on, FIS RX enable, start, active */
  1190. cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(0));
  1191. debug("%s: SATA_UAHC_P0_CMD before: 0x%x\n", __func__, cmd.u32);
  1192. cmd.s.fre = 1;
  1193. cmd.s.sud = 1;
  1194. cmd.s.pod = 1;
  1195. cmd.s.st = 1;
  1196. cmd.s.icc = 1;
  1197. cmd.s.fbscp = 1; /* Enable FIS-based switching */
  1198. csr_wr32(CVMX_SATA_UAHC_PX_CMD(0), cmd.u32);
  1199. cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(0));
  1200. debug("%s: SATA_UAHC_P0_CMD after: 0x%x\n", __func__, cmd.u32);
  1201. sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
  1202. sctl0.s.det = 1;
  1203. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
  1204. /* check status */
  1205. done = get_timer(0);
  1206. while (1) {
  1207. ssts.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SSTS(0));
  1208. if (ssts.s.ipm == 1 && ssts.s.det == 3) {
  1209. result = 0;
  1210. break;
  1211. } else if (get_timer(done) > 100) {
  1212. result = -1;
  1213. break;
  1214. }
  1215. udelay(100);
  1216. }
  1217. if (result != -1) {
  1218. /* Clear the PxSERR Register, by writing '1s' to each
  1219. * implemented bit location
  1220. */
  1221. csr_wr32(CVMX_SATA_UAHC_PX_SERR(0), -1);
  1222. /*
  1223. * Wait for indication that SATA drive is ready. This is
  1224. * determined via an examination of PxTFD.STS. If PxTFD.STS.BSY
  1225. * PxTFD.STS.DRQ, and PxTFD.STS.ERR are all '0', prior to the
  1226. * maximum allowed time as specified in the ATA/ATAPI-7
  1227. * specification, the device is ready.
  1228. */
  1229. /*
  1230. * Wait for the device to be ready. BSY(7), DRQ(3), and ERR(0)
  1231. * must be clear
  1232. */
  1233. done = get_timer(0);
  1234. while (1) {
  1235. tfd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_TFD(0));
  1236. if ((tfd.s.sts & 0x89) == 0) {
  1237. result = 0;
  1238. break;
  1239. } else if (get_timer(done) > 500) {
  1240. if (retry_count < 3) {
  1241. sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
  1242. sctl0.s.det = 1; /* Perform interface reset */
  1243. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
  1244. udelay(1000); /* 1ms dicated by AHCI 1.3 spec */
  1245. sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
  1246. sctl0.s.det = 0; /* Perform interface reset */
  1247. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
  1248. retry_count++;
  1249. goto retry0;
  1250. }
  1251. result = -1;
  1252. break;
  1253. }
  1254. udelay(100);
  1255. }
  1256. }
  1257. if (result == -1)
  1258. printf("SATA0: not available\n");
  1259. else
  1260. printf("SATA0: available\n");
  1261. sctl1.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
  1262. sctl1.s.det = 1;
  1263. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl1.u32);
  1264. result = -1;
  1265. retry_count = 0;
  1266. retry1:
  1267. /* Clear port SERR and IS registers */
  1268. csr_wr32(CVMX_SATA_UAHC_PX_SERR(1), csr_rd32(CVMX_SATA_UAHC_PX_SERR(1)));
  1269. csr_wr32(CVMX_SATA_UAHC_PX_IS(1), csr_rd32(CVMX_SATA_UAHC_PX_IS(1)));
  1270. /* Set spin-up, power on, FIS RX enable, start, active */
  1271. cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(1));
  1272. debug("%s: SATA_UAHC_P1_CMD before: 0x%x\n", __func__, cmd.u32);
  1273. cmd.s.fre = 1;
  1274. cmd.s.sud = 1;
  1275. cmd.s.pod = 1;
  1276. cmd.s.st = 1;
  1277. cmd.s.icc = 1;
  1278. cmd.s.fbscp = 1; /* Enable FIS-based switching */
  1279. csr_wr32(CVMX_SATA_UAHC_PX_CMD(1), cmd.u32);
  1280. cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(1));
  1281. debug("%s: SATA_UAHC_P1_CMD after: 0x%x\n", __func__, cmd.u32);
  1282. /* check status */
  1283. done = get_timer(0);
  1284. while (1) {
  1285. ssts.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SSTS(1));
  1286. if (ssts.s.ipm == 1 && ssts.s.det == 3) {
  1287. result = 0;
  1288. break;
  1289. } else if (get_timer(done) > 1000) {
  1290. result = -1;
  1291. break;
  1292. }
  1293. udelay(100);
  1294. }
  1295. if (result != -1) {
  1296. /* Clear the PxSERR Register, by writing '1s' to each
  1297. * implemented bit location
  1298. */
  1299. csr_wr32(CVMX_SATA_UAHC_PX_SERR(1), csr_rd32(CVMX_SATA_UAHC_PX_SERR(1)));
  1300. /*
  1301. * Wait for indication that SATA drive is ready. This is
  1302. * determined via an examination of PxTFD.STS. If PxTFD.STS.BSY
  1303. * PxTFD.STS.DRQ, and PxTFD.STS.ERR are all '0', prior to the
  1304. * maximum allowed time as specified in the ATA/ATAPI-7
  1305. * specification, the device is ready.
  1306. */
  1307. /*
  1308. * Wait for the device to be ready. BSY(7), DRQ(3), and ERR(0)
  1309. * must be clear
  1310. */
  1311. done = get_timer(0);
  1312. while (1) {
  1313. tfd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_TFD(1));
  1314. if ((tfd.s.sts & 0x89) == 0) {
  1315. result = 0;
  1316. break;
  1317. } else if (get_timer(done) > 500) {
  1318. if (retry_count < 3) {
  1319. sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
  1320. sctl0.s.det = 1; /* Perform interface reset */
  1321. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl0.u32);
  1322. udelay(1000); /* 1ms dicated by AHCI 1.3 spec */
  1323. sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
  1324. sctl0.s.det = 0; /* Perform interface reset */
  1325. csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl0.u32);
  1326. retry_count++;
  1327. goto retry1;
  1328. }
  1329. result = -1;
  1330. break;
  1331. }
  1332. udelay(100);
  1333. }
  1334. }
  1335. if (result == -1)
  1336. printf("SATA1: not available\n");
  1337. else
  1338. printf("SATA1: available\n");
  1339. return 0;
  1340. }
  1341. static int __sata_bist_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
  1342. {
  1343. cvmx_sata_uctl_bist_status_t bist_status;
  1344. cvmx_sata_uctl_ctl_t uctl_ctl;
  1345. cvmx_sata_uctl_shim_cfg_t shim_cfg;
  1346. u64 done;
  1347. int result = -1;
  1348. debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
  1349. bist_status.u64 = csr_rd(CVMX_SATA_UCTL_BIST_STATUS);
  1350. {
  1351. if (__dlm2_sata_uctl_init_cn70xx()) {
  1352. printf("ERROR: Failed to initialize SATA UCTL CSRs\n");
  1353. return -1;
  1354. }
  1355. if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  1356. result = __sata_dlm_init_cn73xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
  1357. else
  1358. result = __sata_dlm_init_cn70xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
  1359. if (result) {
  1360. printf("ERROR: Failed to initialize SATA GSER CSRs\n");
  1361. return -1;
  1362. }
  1363. uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
  1364. uctl_ctl.s.start_bist = 1;
  1365. csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
  1366. /* Set-up for a 1 sec timer. */
  1367. done = get_timer(0);
  1368. while (1) {
  1369. bist_status.u64 = csr_rd(CVMX_SATA_UCTL_BIST_STATUS);
  1370. if ((bist_status.s.uctl_xm_r_bist_ndone |
  1371. bist_status.s.uctl_xm_w_bist_ndone |
  1372. bist_status.s.uahc_p0_rxram_bist_ndone |
  1373. bist_status.s.uahc_p1_rxram_bist_ndone |
  1374. bist_status.s.uahc_p0_txram_bist_ndone |
  1375. bist_status.s.uahc_p1_txram_bist_ndone) == 0) {
  1376. result = 0;
  1377. break;
  1378. } else if (get_timer(done) > 1000) {
  1379. result = -1;
  1380. break;
  1381. }
  1382. udelay(100);
  1383. }
  1384. if (result == -1) {
  1385. printf("ERROR: SATA_UCTL_BIST_STATUS = 0x%llx\n",
  1386. (unsigned long long)bist_status.u64);
  1387. return -1;
  1388. }
  1389. debug("%s: Initializing UAHC\n", __func__);
  1390. if (__dlm2_sata_uahc_init_cn70xx(baud_mhz)) {
  1391. printf("ERROR: Failed to initialize SATA UAHC CSRs\n");
  1392. return -1;
  1393. }
  1394. }
  1395. /* Change CSR_ENDIAN_MODE to big endian to use Open Source AHCI SATA
  1396. * driver
  1397. */
  1398. shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
  1399. shim_cfg.s.csr_endian_mode = 1;
  1400. csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
  1401. return 0;
  1402. }
  1403. static int __setup_sata(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
  1404. {
  1405. debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
  1406. return __sata_bist_cn70xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
  1407. }
  1408. static int __dlmx_setup_pcie_cn70xx(int qlm, enum cvmx_qlm_mode mode, int gen2, int rc,
  1409. int ref_clk_sel, int ref_clk_input)
  1410. {
  1411. cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
  1412. cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
  1413. cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
  1414. cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
  1415. static const u8 ref_clk_mult[2] = { 35, 56 }; /* 100 & 125 MHz ref clock supported. */
  1416. debug("%s(%d, %d, %d, %d, %d, %d)\n", __func__, qlm, mode, gen2, rc, ref_clk_sel,
  1417. ref_clk_input);
  1418. if (rc == 0) {
  1419. debug("Skipping initializing PCIe dlm %d in endpoint mode\n", qlm);
  1420. return 0;
  1421. }
  1422. if (qlm > 0 && ref_clk_input > 1) {
  1423. printf("%s: Error: ref_clk_input can only be 0 or 1 for QLM %d\n",
  1424. __func__, qlm);
  1425. return -1;
  1426. }
  1427. if (ref_clk_sel > OCTEON_QLM_REF_CLK_125MHZ) {
  1428. printf("%s: Error: ref_clk_sel can only be 100 or 125 MHZ.\n", __func__);
  1429. return -1;
  1430. }
  1431. /* 1. Write GSER0_DLM(1..2)_REFCLK_SEL[REFCLK_SEL] if required for
  1432. * reference-clock selection
  1433. */
  1434. csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(qlm, 0), ref_clk_input);
  1435. /* 2. If required, write GSER0_DLM(1..2)_REF_CLKDIV2[REF_CLKDIV2] = 1
  1436. * (must be set if reference clock >= 100 MHz)
  1437. */
  1438. /* 4. Configure the PCIE PIPE:
  1439. * a. Write GSER0_PCIE_PIPE_PORT_SEL[PIPE_PORT_SEL] to configure the
  1440. * PCIE PIPE.
  1441. * 0x0 = disables all pipes
  1442. * 0x1 = enables pipe0 only (PEM0 4-lane)
  1443. * 0x2 = enables pipes 0 and 1 (PEM0 and PEM1 2-lanes each)
  1444. * 0x3 = enables pipes 0, 1, 2, and 3 (PEM0, PEM1, and PEM3 are
  1445. * one-lane each)
  1446. * b. Configure GSER0_PCIE_PIPE_PORT_SEL[CFG_PEM1_DLM2]. If PEM1 is
  1447. * to be configured, this bit must reflect which DLM it is logically
  1448. * tied to. This bit sets multiplexing logic in GSER, and it is used
  1449. * by the RST logic to determine when the MAC can come out of reset.
  1450. * 0 = PEM1 is tied to DLM1 (for 3 x 1 PCIe mode).
  1451. * 1 = PEM1 is tied to DLM2 (for all other PCIe modes).
  1452. */
  1453. if (qlm == 1) {
  1454. cvmx_gserx_pcie_pipe_port_sel_t pipe_port;
  1455. pipe_port.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_PORT_SEL(0));
  1456. pipe_port.s.cfg_pem1_dlm2 = (mode == CVMX_QLM_MODE_PCIE_1X1) ? 1 : 0;
  1457. pipe_port.s.pipe_port_sel =
  1458. (mode == CVMX_QLM_MODE_PCIE) ? 1 : /* PEM0 only */
  1459. (mode == CVMX_QLM_MODE_PCIE_1X2) ? 2 : /* PEM0-1 */
  1460. (mode == CVMX_QLM_MODE_PCIE_1X1) ? 3 : /* PEM0-2 */
  1461. (mode == CVMX_QLM_MODE_PCIE_2X1) ? 3 : /* PEM0-1 */
  1462. 0; /* PCIe disabled */
  1463. csr_wr(CVMX_GSERX_PCIE_PIPE_PORT_SEL(0), pipe_port.u64);
  1464. }
  1465. /* Apply workaround for Errata (G-20669) MPLL may not come up. */
  1466. /* Set REF_CLKDIV2 based on the Ref Clock */
  1467. ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
  1468. ref_clkdiv2.s.ref_clkdiv2 = ref_clk_sel > 0;
  1469. csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
  1470. /* 1. Ensure GSER(0)_DLM(0..2)_PHY_RESET[PHY_RESET] is set. */
  1471. dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
  1472. dlmx_phy_reset.s.phy_reset = 1;
  1473. csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
  1474. /* 2. If SGMII or QSGMII or RXAUI (i.e. if DLM0) set
  1475. * GSER(0)_DLM(0)_MPLL_EN[MPLL_EN] to one.
  1476. */
  1477. /* 3. Set GSER(0)_DLM(0..2)_MPLL_MULTIPLIER[MPLL_MULTIPLIER]
  1478. * to the value in the preceding table, which is different
  1479. * than the desired setting prescribed by the HRM.
  1480. */
  1481. mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
  1482. mpll_multiplier.s.mpll_multiplier = ref_clk_mult[ref_clk_sel];
  1483. debug("%s: Setting MPLL multiplier to %d\n", __func__,
  1484. (int)mpll_multiplier.s.mpll_multiplier);
  1485. csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
  1486. /* 5. Clear GSER0_DLM(1..2)_TEST_POWERDOWN. Configurations that only
  1487. * use DLM1 need not clear GSER0_DLM2_TEST_POWERDOWN
  1488. */
  1489. dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
  1490. dlmx_test_powerdown.s.test_powerdown = 0;
  1491. csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
  1492. /* 6. Clear GSER0_DLM(1..2)_PHY_RESET. Configurations that use only
  1493. * need DLM1 need not clear GSER0_DLM2_PHY_RESET
  1494. */
  1495. dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
  1496. dlmx_phy_reset.s.phy_reset = 0;
  1497. csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
  1498. /* 6. Decrease MPLL_MULTIPLIER by one continually until it reaches
  1499. * the desired long-term setting, ensuring that each MPLL_MULTIPLIER
  1500. * value is constant for at least 1 msec before changing to the next
  1501. * value. The desired long-term setting is as indicated in HRM tables
  1502. * 21-1, 21-2, and 21-3. This is not required with the HRM
  1503. * sequence.
  1504. */
  1505. /* This is set when initializing PCIe after soft reset is asserted. */
  1506. /* 7. Write the GSER0_PCIE_PIPE_RST register to take the appropriate
  1507. * PIPE out of reset. There is a PIPEn_RST bit for each PIPE. Clear
  1508. * the appropriate bits based on the configuration (reset is
  1509. * active high).
  1510. */
  1511. if (qlm == 1) {
  1512. cvmx_pemx_cfg_t pemx_cfg;
  1513. cvmx_pemx_on_t pemx_on;
  1514. cvmx_gserx_pcie_pipe_rst_t pipe_rst;
  1515. cvmx_rst_ctlx_t rst_ctl;
  1516. switch (mode) {
  1517. case CVMX_QLM_MODE_PCIE: /* PEM0 on DLM1 & DLM2 */
  1518. case CVMX_QLM_MODE_PCIE_1X2: /* PEM0 on DLM1 */
  1519. case CVMX_QLM_MODE_PCIE_1X1: /* PEM0 on DLM1 using lane 0 */
  1520. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
  1521. pemx_cfg.cn70xx.hostmd = rc;
  1522. if (mode == CVMX_QLM_MODE_PCIE_1X1) {
  1523. pemx_cfg.cn70xx.md =
  1524. gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
  1525. } else if (mode == CVMX_QLM_MODE_PCIE) {
  1526. pemx_cfg.cn70xx.md =
  1527. gen2 ? CVMX_PEM_MD_GEN2_4LANE : CVMX_PEM_MD_GEN1_4LANE;
  1528. } else {
  1529. pemx_cfg.cn70xx.md =
  1530. gen2 ? CVMX_PEM_MD_GEN2_2LANE : CVMX_PEM_MD_GEN1_2LANE;
  1531. }
  1532. csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
  1533. rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(0));
  1534. rst_ctl.s.rst_drv = 1;
  1535. csr_wr(CVMX_RST_CTLX(0), rst_ctl.u64);
  1536. /* PEM0 is on DLM1&2 which is pipe0 */
  1537. pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
  1538. pipe_rst.s.pipe0_rst = 0;
  1539. csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
  1540. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
  1541. pemx_on.s.pemon = 1;
  1542. csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
  1543. break;
  1544. case CVMX_QLM_MODE_PCIE_2X1: /* PEM0 and PEM1 on DLM1 */
  1545. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
  1546. pemx_cfg.cn70xx.hostmd = rc;
  1547. pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
  1548. csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
  1549. rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(0));
  1550. rst_ctl.s.rst_drv = 1;
  1551. csr_wr(CVMX_RST_CTLX(0), rst_ctl.u64);
  1552. /* PEM0 is on DLM1 which is pipe0 */
  1553. pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
  1554. pipe_rst.s.pipe0_rst = 0;
  1555. csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
  1556. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
  1557. pemx_on.s.pemon = 1;
  1558. csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
  1559. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
  1560. pemx_cfg.cn70xx.hostmd = 1;
  1561. pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
  1562. csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
  1563. rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
  1564. rst_ctl.s.rst_drv = 1;
  1565. csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
  1566. /* PEM1 is on DLM2 which is pipe1 */
  1567. pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
  1568. pipe_rst.s.pipe1_rst = 0;
  1569. csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
  1570. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
  1571. pemx_on.s.pemon = 1;
  1572. csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
  1573. break;
  1574. default:
  1575. break;
  1576. }
  1577. } else {
  1578. cvmx_pemx_cfg_t pemx_cfg;
  1579. cvmx_pemx_on_t pemx_on;
  1580. cvmx_gserx_pcie_pipe_rst_t pipe_rst;
  1581. cvmx_rst_ctlx_t rst_ctl;
  1582. switch (mode) {
  1583. case CVMX_QLM_MODE_PCIE_1X2: /* PEM1 on DLM2 */
  1584. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
  1585. pemx_cfg.cn70xx.hostmd = 1;
  1586. pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_2LANE : CVMX_PEM_MD_GEN1_2LANE;
  1587. csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
  1588. rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
  1589. rst_ctl.s.rst_drv = 1;
  1590. csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
  1591. /* PEM1 is on DLM1 lane 0, which is pipe1 */
  1592. pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
  1593. pipe_rst.s.pipe1_rst = 0;
  1594. csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
  1595. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
  1596. pemx_on.s.pemon = 1;
  1597. csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
  1598. break;
  1599. case CVMX_QLM_MODE_PCIE_2X1: /* PEM1 and PEM2 on DLM2 */
  1600. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
  1601. pemx_cfg.cn70xx.hostmd = 1;
  1602. pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
  1603. csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
  1604. rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
  1605. rst_ctl.s.rst_drv = 1;
  1606. csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
  1607. /* PEM1 is on DLM2 lane 0, which is pipe2 */
  1608. pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
  1609. pipe_rst.s.pipe2_rst = 0;
  1610. csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
  1611. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
  1612. pemx_on.s.pemon = 1;
  1613. csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
  1614. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
  1615. pemx_cfg.cn70xx.hostmd = 1;
  1616. pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
  1617. csr_wr(CVMX_PEMX_CFG(2), pemx_cfg.u64);
  1618. rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(2));
  1619. rst_ctl.s.rst_drv = 1;
  1620. csr_wr(CVMX_RST_CTLX(2), rst_ctl.u64);
  1621. /* PEM2 is on DLM2 lane 1, which is pipe3 */
  1622. pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
  1623. pipe_rst.s.pipe3_rst = 0;
  1624. csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
  1625. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
  1626. pemx_on.s.pemon = 1;
  1627. csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
  1628. break;
  1629. default:
  1630. break;
  1631. }
  1632. }
  1633. return 0;
  1634. }
  1635. /**
  1636. * Configure dlm speed and mode for cn70xx.
  1637. *
  1638. * @param qlm The DLM to configure
  1639. * @param speed The speed the DLM needs to be configured in Mhz.
  1640. * @param mode The DLM to be configured as SGMII/XAUI/PCIe.
  1641. * DLM 0: has 2 interfaces which can be configured as
  1642. * SGMII/QSGMII/RXAUI. Need to configure both at the
  1643. * same time. These are valid option
  1644. * CVMX_QLM_MODE_QSGMII,
  1645. * CVMX_QLM_MODE_SGMII_SGMII,
  1646. * CVMX_QLM_MODE_SGMII_DISABLED,
  1647. * CVMX_QLM_MODE_DISABLED_SGMII,
  1648. * CVMX_QLM_MODE_SGMII_QSGMII,
  1649. * CVMX_QLM_MODE_QSGMII_QSGMII,
  1650. * CVMX_QLM_MODE_QSGMII_DISABLED,
  1651. * CVMX_QLM_MODE_DISABLED_QSGMII,
  1652. * CVMX_QLM_MODE_QSGMII_SGMII,
  1653. * CVMX_QLM_MODE_RXAUI_1X2
  1654. *
  1655. * DLM 1: PEM0/1 in PCIE_1x4/PCIE_2x1/PCIE_1X1
  1656. * DLM 2: PEM0/1/2 in PCIE_1x4/PCIE_1x2/PCIE_2x1/PCIE_1x1
  1657. * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
  1658. * @param gen2 Only used for PCIe, gen2 = 1, in GEN2 mode else in GEN1 mode.
  1659. *
  1660. * @param ref_clk_input The reference-clock input to use to configure QLM
  1661. * @param ref_clk_sel The reference-clock selection to use to configure QLM
  1662. *
  1663. * Return: Return 0 on success or -1.
  1664. */
  1665. static int octeon_configure_qlm_cn70xx(int qlm, int speed, int mode, int rc, int gen2,
  1666. int ref_clk_sel, int ref_clk_input)
  1667. {
  1668. debug("%s(%d, %d, %d, %d, %d, %d, %d)\n", __func__, qlm, speed, mode, rc, gen2, ref_clk_sel,
  1669. ref_clk_input);
  1670. switch (qlm) {
  1671. case 0: {
  1672. int is_sff7000_rxaui = 0;
  1673. cvmx_gmxx_inf_mode_t inf_mode0, inf_mode1;
  1674. inf_mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
  1675. inf_mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
  1676. if (inf_mode0.s.en || inf_mode1.s.en) {
  1677. debug("DLM0 already configured\n");
  1678. return -1;
  1679. }
  1680. switch (mode) {
  1681. case CVMX_QLM_MODE_SGMII_SGMII:
  1682. debug(" Mode SGMII SGMII\n");
  1683. inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
  1684. inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
  1685. break;
  1686. case CVMX_QLM_MODE_SGMII_QSGMII:
  1687. debug(" Mode SGMII QSGMII\n");
  1688. inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
  1689. inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
  1690. break;
  1691. case CVMX_QLM_MODE_SGMII_DISABLED:
  1692. debug(" Mode SGMII Disabled\n");
  1693. inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
  1694. inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
  1695. break;
  1696. case CVMX_QLM_MODE_DISABLED_SGMII:
  1697. debug("Mode Disabled SGMII\n");
  1698. inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
  1699. inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
  1700. break;
  1701. case CVMX_QLM_MODE_QSGMII_SGMII:
  1702. debug(" Mode QSGMII SGMII\n");
  1703. inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
  1704. inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
  1705. break;
  1706. case CVMX_QLM_MODE_QSGMII_QSGMII:
  1707. debug(" Mode QSGMII QSGMII\n");
  1708. inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
  1709. inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
  1710. break;
  1711. case CVMX_QLM_MODE_QSGMII_DISABLED:
  1712. debug(" Mode QSGMII Disabled\n");
  1713. inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
  1714. inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
  1715. break;
  1716. case CVMX_QLM_MODE_DISABLED_QSGMII:
  1717. debug("Mode Disabled QSGMII\n");
  1718. inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
  1719. inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
  1720. break;
  1721. case CVMX_QLM_MODE_RXAUI:
  1722. debug(" Mode RXAUI\n");
  1723. inf_mode0.s.mode = CVMX_GMX_INF_MODE_RXAUI;
  1724. inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
  1725. break;
  1726. default:
  1727. debug(" Mode Disabled Disabled\n");
  1728. inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
  1729. inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
  1730. break;
  1731. }
  1732. csr_wr(CVMX_GMXX_INF_MODE(0), inf_mode0.u64);
  1733. csr_wr(CVMX_GMXX_INF_MODE(1), inf_mode1.u64);
  1734. /* Bringup the PLL */
  1735. if (__dlm_setup_pll_cn70xx(qlm, speed, ref_clk_sel, ref_clk_input,
  1736. is_sff7000_rxaui))
  1737. return -1;
  1738. /* TX Lanes */
  1739. if (__dlm0_setup_tx_cn70xx(speed, ref_clk_sel))
  1740. return -1;
  1741. /* RX Lanes */
  1742. if (__dlm0_setup_rx_cn70xx(speed, ref_clk_sel))
  1743. return -1;
  1744. /* Enable the interface */
  1745. inf_mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
  1746. if (inf_mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED)
  1747. inf_mode0.s.en = 1;
  1748. csr_wr(CVMX_GMXX_INF_MODE(0), inf_mode0.u64);
  1749. inf_mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
  1750. if (inf_mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED)
  1751. inf_mode1.s.en = 1;
  1752. csr_wr(CVMX_GMXX_INF_MODE(1), inf_mode1.u64);
  1753. break;
  1754. }
  1755. case 1:
  1756. switch (mode) {
  1757. case CVMX_QLM_MODE_PCIE: /* PEM0 on DLM1 & DLM2 */
  1758. debug(" Mode PCIe\n");
  1759. if (__dlmx_setup_pcie_cn70xx(1, mode, gen2, rc, ref_clk_sel, ref_clk_input))
  1760. return -1;
  1761. if (__dlmx_setup_pcie_cn70xx(2, mode, gen2, rc, ref_clk_sel, ref_clk_input))
  1762. return -1;
  1763. break;
  1764. case CVMX_QLM_MODE_PCIE_1X2: /* PEM0 on DLM1 */
  1765. case CVMX_QLM_MODE_PCIE_2X1: /* PEM0 & PEM1 on DLM1 */
  1766. case CVMX_QLM_MODE_PCIE_1X1: /* PEM0 on DLM1, only 1 lane */
  1767. debug(" Mode PCIe 1x2, 2x1 or 1x1\n");
  1768. if (__dlmx_setup_pcie_cn70xx(qlm, mode, gen2, rc, ref_clk_sel,
  1769. ref_clk_input))
  1770. return -1;
  1771. break;
  1772. case CVMX_QLM_MODE_DISABLED:
  1773. debug(" Mode disabled\n");
  1774. break;
  1775. default:
  1776. debug("DLM1 illegal mode specified\n");
  1777. return -1;
  1778. }
  1779. break;
  1780. case 2:
  1781. switch (mode) {
  1782. case CVMX_QLM_MODE_SATA_2X1:
  1783. debug("%s: qlm 2, mode is SATA 2x1\n", __func__);
  1784. /* DLM2 is SATA, PCIE2 is disabled */
  1785. if (__setup_sata(qlm, speed, ref_clk_sel, ref_clk_input))
  1786. return -1;
  1787. break;
  1788. case CVMX_QLM_MODE_PCIE:
  1789. debug(" Mode PCIe\n");
  1790. /* DLM2 is PCIE0, PCIE1-2 are disabled. */
  1791. /* Do nothing, its initialized in DLM1 */
  1792. break;
  1793. case CVMX_QLM_MODE_PCIE_1X2: /* PEM1 on DLM2 */
  1794. case CVMX_QLM_MODE_PCIE_2X1: /* PEM1 & PEM2 on DLM2 */
  1795. debug(" Mode PCIe 1x2 or 2x1\n");
  1796. if (__dlmx_setup_pcie_cn70xx(qlm, mode, gen2, rc, ref_clk_sel,
  1797. ref_clk_input))
  1798. return -1;
  1799. break;
  1800. case CVMX_QLM_MODE_DISABLED:
  1801. debug(" Mode Disabled\n");
  1802. break;
  1803. default:
  1804. debug("DLM2 illegal mode specified\n");
  1805. return -1;
  1806. }
  1807. default:
  1808. return -1;
  1809. }
  1810. return 0;
  1811. }
  1812. /**
  1813. * Disables DFE for the specified QLM lane(s).
  1814. * This function should only be called for low-loss channels.
  1815. *
  1816. * @param node Node to configure
  1817. * @param qlm QLM to configure
  1818. * @param lane Lane to configure, or -1 all lanes
  1819. * @param baud_mhz The speed the QLM needs to be configured in Mhz.
  1820. * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
  1821. */
  1822. void octeon_qlm_dfe_disable(int node, int qlm, int lane, int baud_mhz, int mode)
  1823. {
  1824. int num_lanes = cvmx_qlm_get_lanes(qlm);
  1825. int l;
  1826. cvmx_gserx_lanex_rx_loop_ctrl_t loop_ctrl;
  1827. cvmx_gserx_lanex_rx_valbbd_ctrl_0_t ctrl_0;
  1828. cvmx_gserx_lanex_rx_valbbd_ctrl_1_t ctrl_1;
  1829. cvmx_gserx_lanex_rx_valbbd_ctrl_2_t ctrl_2;
  1830. cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
  1831. /* Interfaces below 5Gbaud are already manually tuned. */
  1832. if (baud_mhz < 5000)
  1833. return;
  1834. /* Don't run on PCIe links, SATA or KR. These interfaces use training */
  1835. switch (mode) {
  1836. case CVMX_QLM_MODE_10G_KR_1X2:
  1837. case CVMX_QLM_MODE_10G_KR:
  1838. case CVMX_QLM_MODE_40G_KR4:
  1839. return;
  1840. case CVMX_QLM_MODE_PCIE_1X1:
  1841. case CVMX_QLM_MODE_PCIE_2X1:
  1842. case CVMX_QLM_MODE_PCIE_1X2:
  1843. case CVMX_QLM_MODE_PCIE:
  1844. case CVMX_QLM_MODE_PCIE_1X8:
  1845. return;
  1846. case CVMX_QLM_MODE_SATA_2X1:
  1847. return;
  1848. default:
  1849. break;
  1850. }
  1851. /* Updating pre_ctle minimum to 0. This works best for short channels */
  1852. lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
  1853. lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = 0;
  1854. csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
  1855. for (l = 0; l < num_lanes; l++) {
  1856. if (lane != -1 && lane != l)
  1857. continue;
  1858. /* 1. Write GSERX_LANEx_RX_LOOP_CTRL = 0x0270
  1859. * (var "loop_ctrl" with bits 8 & 1 cleared).
  1860. * bit<1> dfe_en_byp = 1'b0
  1861. */
  1862. loop_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm));
  1863. loop_ctrl.s.cfg_rx_lctrl = loop_ctrl.s.cfg_rx_lctrl & 0x3fd;
  1864. csr_wr_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm), loop_ctrl.u64);
  1865. /* 2. Write GSERX_LANEx_RX_VALBBD_CTRL_1 = 0x0000
  1866. * (var "ctrl1" with all bits cleared)
  1867. * bits<14:11> CFG_RX_DFE_C3_MVAL = 4'b0000
  1868. * bit<10> CFG_RX_DFE_C3_MSGN = 1'b0
  1869. * bits<9:6> CFG_RX_DFE_C2_MVAL = 4'b0000
  1870. * bit<5> CFG_RX_DFE_C2_MSGN = 1'b0
  1871. * bits<4:0> CFG_RX_DFE_C1_MVAL = 5'b00000
  1872. */
  1873. ctrl_1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_1(l, qlm));
  1874. ctrl_1.s.dfe_c3_mval = 0;
  1875. ctrl_1.s.dfe_c3_msgn = 0;
  1876. ctrl_1.s.dfe_c2_mval = 0;
  1877. ctrl_1.s.dfe_c2_msgn = 0;
  1878. ctrl_1.s.dfe_c2_mval = 0;
  1879. ctrl_1.s.dfe_c1_mval = 0;
  1880. ctrl_1.s.dfe_c1_msgn = 0;
  1881. csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_1(l, qlm), ctrl_1.u64);
  1882. /* 3. Write GSERX_LANEx_RX_VALBBD_CTRL_0 = 0x2400
  1883. * (var "ctrl0" with following bits set/cleared)
  1884. * bits<11:10> CFG_RX_DFE_GAIN = 0x1
  1885. * bits<9:6> CFG_RX_DFE_C5_MVAL = 4'b0000
  1886. * bit<5> CFG_RX_DFE_C5_MSGN = 1'b0
  1887. * bits<4:1> CFG_RX_DFE_C4_MVAL = 4'b0000
  1888. * bit<0> CFG_RX_DFE_C4_MSGN = 1'b0
  1889. */
  1890. ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm));
  1891. ctrl_0.s.dfe_gain = 0x1;
  1892. ctrl_0.s.dfe_c5_mval = 0;
  1893. ctrl_0.s.dfe_c5_msgn = 0;
  1894. ctrl_0.s.dfe_c4_mval = 0;
  1895. ctrl_0.s.dfe_c4_msgn = 0;
  1896. csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm), ctrl_0.u64);
  1897. /* 4. Write GSER(0..13)_LANE(0..3)_RX_VALBBD_CTRL_2 = 0x003F
  1898. * //enable DFE tap overrides
  1899. * bit<5> dfe_ovrd_en = 1
  1900. * bit<4> dfe_c5_ovrd_val = 1
  1901. * bit<3> dfe_c4_ovrd_val = 1
  1902. * bit<2> dfe_c3_ovrd_val = 1
  1903. * bit<1> dfe_c2_ovrd_val = 1
  1904. * bit<0> dfe_c1_ovrd_val = 1
  1905. */
  1906. ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_2(l, qlm));
  1907. ctrl_2.s.dfe_ovrd_en = 0x1;
  1908. ctrl_2.s.dfe_c5_ovrd_val = 0x1;
  1909. ctrl_2.s.dfe_c4_ovrd_val = 0x1;
  1910. ctrl_2.s.dfe_c3_ovrd_val = 0x1;
  1911. ctrl_2.s.dfe_c2_ovrd_val = 0x1;
  1912. ctrl_2.s.dfe_c1_ovrd_val = 0x1;
  1913. csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_2(l, qlm), ctrl_2.u64);
  1914. }
  1915. }
  1916. /**
  1917. * Disables DFE, uses fixed CTLE Peak value and AGC settings
  1918. * for the specified QLM lane(s).
  1919. * This function should only be called for low-loss channels.
  1920. * This function prevents Rx equalization from happening on all lanes in a QLM
  1921. * This function should be called for all lanes being used in the QLM.
  1922. *
  1923. * @param node Node to configure
  1924. * @param qlm QLM to configure
  1925. * @param lane Lane to configure, or -1 all lanes
  1926. * @param baud_mhz The speed the QLM needs to be configured in Mhz.
  1927. * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
  1928. * @param ctle_zero Equalizer Peaking control
  1929. * @param agc_pre_ctle Pre-CTLE gain
  1930. * @param agc_post_ctle Post-CTLE gain
  1931. * Return: Zero on success, negative on failure
  1932. */
  1933. int octeon_qlm_dfe_disable_ctle_agc(int node, int qlm, int lane, int baud_mhz, int mode,
  1934. int ctle_zero, int agc_pre_ctle, int agc_post_ctle)
  1935. {
  1936. int num_lanes = cvmx_qlm_get_lanes(qlm);
  1937. int l;
  1938. cvmx_gserx_lanex_rx_loop_ctrl_t loop_ctrl;
  1939. cvmx_gserx_lanex_rx_valbbd_ctrl_0_t ctrl_0;
  1940. cvmx_gserx_lanex_pwr_ctrl_t lanex_pwr_ctrl;
  1941. cvmx_gserx_lane_mode_t lmode;
  1942. cvmx_gserx_lane_px_mode_1_t px_mode_1;
  1943. cvmx_gserx_lanex_rx_cfg_5_t rx_cfg_5;
  1944. cvmx_gserx_lanex_rx_cfg_2_t rx_cfg_2;
  1945. cvmx_gserx_lanex_rx_ctle_ctrl_t ctle_ctrl;
  1946. /* Check tuning constraints */
  1947. if (ctle_zero < 0 || ctle_zero > 15) {
  1948. printf("Error: N%d.QLM%d: Invalid CTLE_ZERO(%d). Must be between -1 and 15.\n",
  1949. node, qlm, ctle_zero);
  1950. return -1;
  1951. }
  1952. if (agc_pre_ctle < 0 || agc_pre_ctle > 15) {
  1953. printf("Error: N%d.QLM%d: Invalid AGC_Pre_CTLE(%d)\n",
  1954. node, qlm, agc_pre_ctle);
  1955. return -1;
  1956. }
  1957. if (agc_post_ctle < 0 || agc_post_ctle > 15) {
  1958. printf("Error: N%d.QLM%d: Invalid AGC_Post_CTLE(%d)\n",
  1959. node, qlm, agc_post_ctle);
  1960. return -1;
  1961. }
  1962. /* Interfaces below 5Gbaud are already manually tuned. */
  1963. if (baud_mhz < 5000)
  1964. return 0;
  1965. /* Don't run on PCIe links, SATA or KR. These interfaces use training */
  1966. switch (mode) {
  1967. case CVMX_QLM_MODE_10G_KR_1X2:
  1968. case CVMX_QLM_MODE_10G_KR:
  1969. case CVMX_QLM_MODE_40G_KR4:
  1970. return 0;
  1971. case CVMX_QLM_MODE_PCIE_1X1:
  1972. case CVMX_QLM_MODE_PCIE_2X1:
  1973. case CVMX_QLM_MODE_PCIE_1X2:
  1974. case CVMX_QLM_MODE_PCIE:
  1975. case CVMX_QLM_MODE_PCIE_1X8:
  1976. return 0;
  1977. case CVMX_QLM_MODE_SATA_2X1:
  1978. return 0;
  1979. default:
  1980. break;
  1981. }
  1982. lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
  1983. /* 1. Enable VMA manual mode for the QLM's lane mode */
  1984. px_mode_1.u64 = csr_rd_node(node, CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm));
  1985. px_mode_1.s.vma_mm = 1;
  1986. csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm), px_mode_1.u64);
  1987. /* 2. Disable DFE */
  1988. octeon_qlm_dfe_disable(node, qlm, lane, baud_mhz, mode);
  1989. for (l = 0; l < num_lanes; l++) {
  1990. if (lane != -1 && lane != l)
  1991. continue;
  1992. /* 3. Write GSERX_LANEx_RX_VALBBD_CTRL_0.CFG_RX_AGC_GAIN = 0x2 */
  1993. ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm));
  1994. ctrl_0.s.agc_gain = 0x2;
  1995. csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm), ctrl_0.u64);
  1996. /* 4. Write GSERX_LANEx_RX_LOOP_CTRL
  1997. * bit<8> lctrl_men = 1'b1
  1998. * bit<0> cdr_en_byp = 1'b1
  1999. */
  2000. loop_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm));
  2001. loop_ctrl.s.cfg_rx_lctrl = loop_ctrl.s.cfg_rx_lctrl | 0x101;
  2002. csr_wr_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm), loop_ctrl.u64);
  2003. /* 5. Write GSERX_LANEx_PWR_CTRL = 0x0040 (var "lanex_pwr_ctrl" with
  2004. * following bits set)
  2005. * bit<6> RX_LCTRL_OVRRD_EN = 1'b1
  2006. * all other bits cleared.
  2007. */
  2008. lanex_pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(l, qlm));
  2009. lanex_pwr_ctrl.s.rx_lctrl_ovrrd_en = 1;
  2010. csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(l, qlm), lanex_pwr_ctrl.u64);
  2011. /* --Setting AGC in manual mode and configuring CTLE-- */
  2012. rx_cfg_5.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_5(l, qlm));
  2013. rx_cfg_5.s.rx_agc_men_ovrrd_val = 1;
  2014. rx_cfg_5.s.rx_agc_men_ovrrd_en = 1;
  2015. csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_5(l, qlm), rx_cfg_5.u64);
  2016. ctle_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(l, qlm));
  2017. ctle_ctrl.s.pcs_sds_rx_ctle_zero = ctle_zero;
  2018. csr_wr_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(l, qlm), ctle_ctrl.u64);
  2019. rx_cfg_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_2(l, qlm));
  2020. rx_cfg_2.s.rx_sds_rx_agc_mval = (agc_pre_ctle << 4) | agc_post_ctle;
  2021. csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_2(l, qlm), rx_cfg_2.u64);
  2022. }
  2023. return 0;
  2024. }
  2025. /**
  2026. * Some QLM speeds need to override the default tuning parameters
  2027. *
  2028. * @param node Node to configure
  2029. * @param qlm QLM to configure
  2030. * @param baud_mhz Desired speed in MHz
  2031. * @param lane Lane the apply the tuning parameters
  2032. * @param tx_swing Voltage swing. The higher the value the lower the voltage,
  2033. * the default value is 7.
  2034. * @param tx_pre pre-cursor pre-emphasis
  2035. * @param tx_post post-cursor pre-emphasis.
  2036. * @param tx_gain Transmit gain. Range 0-7
  2037. * @param tx_vboost Transmit voltage boost. Range 0-1
  2038. */
  2039. void octeon_qlm_tune_per_lane_v3(int node, int qlm, int baud_mhz, int lane, int tx_swing,
  2040. int tx_pre, int tx_post, int tx_gain, int tx_vboost)
  2041. {
  2042. cvmx_gserx_cfg_t gserx_cfg;
  2043. cvmx_gserx_lanex_tx_cfg_0_t tx_cfg0;
  2044. cvmx_gserx_lanex_tx_pre_emphasis_t pre_emphasis;
  2045. cvmx_gserx_lanex_tx_cfg_1_t tx_cfg1;
  2046. cvmx_gserx_lanex_tx_cfg_3_t tx_cfg3;
  2047. cvmx_bgxx_spux_br_pmd_control_t pmd_control;
  2048. cvmx_gserx_lanex_pcs_ctlifc_0_t pcs_ctlifc_0;
  2049. cvmx_gserx_lanex_pcs_ctlifc_2_t pcs_ctlifc_2;
  2050. int bgx, lmac;
  2051. /* Do not apply QLM tuning to PCIe and KR interfaces. */
  2052. gserx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
  2053. if (gserx_cfg.s.pcie)
  2054. return;
  2055. /* Apply the QLM tuning only to cn73xx and cn78xx models only */
  2056. if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  2057. bgx = (qlm < 2) ? qlm : (qlm - 2);
  2058. else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  2059. bgx = (qlm < 4) ? (qlm - 2) : 2;
  2060. else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
  2061. bgx = 0;
  2062. else
  2063. return;
  2064. if ((OCTEON_IS_MODEL(OCTEON_CN73XX) && qlm == 6) ||
  2065. (OCTEON_IS_MODEL(OCTEON_CNF75XX) && qlm == 5))
  2066. lmac = 2;
  2067. else
  2068. lmac = lane;
  2069. /* No need to tune 10G-KR and 40G-KR interfaces */
  2070. pmd_control.u64 = csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(lmac, bgx));
  2071. if (pmd_control.s.train_en)
  2072. return;
  2073. if (tx_pre != -1 && tx_post == -1)
  2074. tx_post = 0;
  2075. if (tx_post != -1 && tx_pre == -1)
  2076. tx_pre = 0;
  2077. /* Check tuning constraints */
  2078. if (tx_swing < -1 || tx_swing > 25) {
  2079. printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_SWING(%d). TX_SWING must be <= 25.\n",
  2080. node, qlm, lane, tx_swing);
  2081. return;
  2082. }
  2083. if (tx_pre < -1 || tx_pre > 10) {
  2084. printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_PRE(%d). TX_PRE must be <= 10.\n",
  2085. node, qlm, lane, tx_swing);
  2086. return;
  2087. }
  2088. if (tx_post < -1 || tx_post > 31) {
  2089. printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_POST(%d). TX_POST must be <= 15.\n",
  2090. node, qlm, lane, tx_swing);
  2091. return;
  2092. }
  2093. if (tx_pre >= 0 && tx_post >= 0 && tx_swing >= 0 &&
  2094. tx_pre + tx_post - tx_swing > 2) {
  2095. printf("ERROR: N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) - TX_SWING(%d) must be <= 2\n",
  2096. node, qlm, lane, tx_pre, tx_post, tx_swing);
  2097. return;
  2098. }
  2099. if (tx_pre >= 0 && tx_post >= 0 && tx_swing >= 0 &&
  2100. tx_pre + tx_post + tx_swing > 35) {
  2101. printf("ERROR: N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) + TX_SWING(%d) must be <= 35\n",
  2102. node, qlm, lane, tx_pre, tx_post, tx_swing);
  2103. return;
  2104. }
  2105. if (tx_gain < -1 || tx_gain > 7) {
  2106. printf("ERROR: N%d.QLM%d: Lane %d: Invalid TX_GAIN(%d). TX_GAIN must be between 0 and 7\n",
  2107. node, qlm, lane, tx_gain);
  2108. return;
  2109. }
  2110. if (tx_vboost < -1 || tx_vboost > 1) {
  2111. printf("ERROR: N%d.QLM%d: Lane %d: Invalid TX_VBOOST(%d). TX_VBOOST must be 0 or 1.\n",
  2112. node, qlm, lane, tx_vboost);
  2113. return;
  2114. }
  2115. debug("N%d.QLM%d: Lane %d: TX_SWING=%d, TX_PRE=%d, TX_POST=%d, TX_GAIN=%d, TX_VBOOST=%d\n",
  2116. node, qlm, lane, tx_swing, tx_pre, tx_post, tx_gain, tx_vboost);
  2117. /* Complete the Tx swing and Tx equilization programming */
  2118. /* 1) Enable Tx swing and Tx emphasis overrides */
  2119. tx_cfg1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_1(lane, qlm));
  2120. tx_cfg1.s.tx_swing_ovrrd_en = (tx_swing != -1);
  2121. tx_cfg1.s.tx_premptap_ovrrd_val = (tx_pre != -1) && (tx_post != -1);
  2122. tx_cfg1.s.tx_vboost_en_ovrrd_en = (tx_vboost != -1); /* Vboost override */
  2123. ;
  2124. csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_1(lane, qlm), tx_cfg1.u64);
  2125. /* 2) Program the Tx swing and Tx emphasis Pre-cursor and Post-cursor values */
  2126. /* CFG_TX_PREMPTAP[8:4] = Lane X's TX post-cursor value (C+1) */
  2127. /* CFG_TX_PREMPTAP[3:0] = Lane X's TX pre-cursor value (C-1) */
  2128. if (tx_swing != -1) {
  2129. tx_cfg0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(lane, qlm));
  2130. tx_cfg0.s.cfg_tx_swing = tx_swing;
  2131. csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(lane, qlm), tx_cfg0.u64);
  2132. }
  2133. if ((tx_pre != -1) && (tx_post != -1)) {
  2134. pre_emphasis.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_PRE_EMPHASIS(lane, qlm));
  2135. pre_emphasis.s.cfg_tx_premptap = (tx_post << 4) | tx_pre;
  2136. csr_wr_node(node, CVMX_GSERX_LANEX_TX_PRE_EMPHASIS(lane, qlm), pre_emphasis.u64);
  2137. }
  2138. /* Apply TX gain settings */
  2139. if (tx_gain != -1) {
  2140. tx_cfg3.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm));
  2141. tx_cfg3.s.pcs_sds_tx_gain = tx_gain;
  2142. csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm), tx_cfg3.u64);
  2143. }
  2144. /* Apply TX vboot settings */
  2145. if (tx_vboost != -1) {
  2146. tx_cfg3.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm));
  2147. tx_cfg3.s.cfg_tx_vboost_en = tx_vboost;
  2148. csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm), tx_cfg3.u64);
  2149. }
  2150. /* 3) Program override for the Tx coefficient request */
  2151. pcs_ctlifc_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm));
  2152. if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
  2153. pcs_ctlifc_0.s.cfg_tx_coeff_req_ovrrd_val = 0x1;
  2154. if (tx_vboost != -1)
  2155. pcs_ctlifc_0.s.cfg_tx_vboost_en_ovrrd_val = 1;
  2156. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm), pcs_ctlifc_0.u64);
  2157. /* 4) Enable the Tx coefficient request override enable */
  2158. pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
  2159. if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
  2160. pcs_ctlifc_2.s.cfg_tx_coeff_req_ovrrd_en = 0x1;
  2161. if (tx_vboost != -1)
  2162. pcs_ctlifc_2.s.cfg_tx_vboost_en_ovrrd_en = 1;
  2163. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
  2164. /* 5) Issue a Control Interface Configuration Override request to start the Tx equalizer */
  2165. pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
  2166. pcs_ctlifc_2.s.ctlifc_ovrrd_req = 0x1;
  2167. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
  2168. /* 6) Wait 1 ms for the request to complete */
  2169. udelay(1000);
  2170. /* Steps 7 & 8 required for subsequent Tx swing and Tx equilization adjustment */
  2171. /* 7) Disable the Tx coefficient request override enable */
  2172. pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
  2173. pcs_ctlifc_2.s.cfg_tx_coeff_req_ovrrd_en = 0;
  2174. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
  2175. /* 8) Issue a Control Interface Configuration Override request */
  2176. pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
  2177. pcs_ctlifc_2.s.ctlifc_ovrrd_req = 0x1;
  2178. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
  2179. }
  2180. /**
  2181. * Some QLM speeds need to override the default tuning parameters
  2182. *
  2183. * @param node Node to configure
  2184. * @param qlm QLM to configure
  2185. * @param baud_mhz Desired speed in MHz
  2186. * @param tx_swing Voltage swing. The higher the value the lower the voltage,
  2187. * the default value is 7.
  2188. * @param tx_premptap bits [0:3] pre-cursor pre-emphasis, bits[4:8] post-cursor
  2189. * pre-emphasis.
  2190. * @param tx_gain Transmit gain. Range 0-7
  2191. * @param tx_vboost Transmit voltage boost. Range 0-1
  2192. *
  2193. */
  2194. void octeon_qlm_tune_v3(int node, int qlm, int baud_mhz, int tx_swing, int tx_premptap, int tx_gain,
  2195. int tx_vboost)
  2196. {
  2197. int lane;
  2198. int num_lanes = cvmx_qlm_get_lanes(qlm);
  2199. for (lane = 0; lane < num_lanes; lane++) {
  2200. int tx_pre = (tx_premptap == -1) ? -1 : tx_premptap & 0xf;
  2201. int tx_post = (tx_premptap == -1) ? -1 : (tx_premptap >> 4) & 0x1f;
  2202. octeon_qlm_tune_per_lane_v3(node, qlm, baud_mhz, lane, tx_swing, tx_pre, tx_post,
  2203. tx_gain, tx_vboost);
  2204. }
  2205. }
  2206. /**
  2207. * Some QLMs need to override the default pre-ctle for low loss channels.
  2208. *
  2209. * @param node Node to configure
  2210. * @param qlm QLM to configure
  2211. * @param pre_ctle pre-ctle settings for low loss channels
  2212. */
  2213. void octeon_qlm_set_channel_v3(int node, int qlm, int pre_ctle)
  2214. {
  2215. cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
  2216. lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
  2217. lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = pre_ctle;
  2218. csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
  2219. }
  2220. static void __qlm_init_errata_20844(int node, int qlm)
  2221. {
  2222. int lane;
  2223. /* Only applies to CN78XX pass 1.x */
  2224. if (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
  2225. return;
  2226. /* Errata GSER-20844: Electrical Idle logic can coast
  2227. * 1) After the link first comes up write the following
  2228. * register on each lane to prevent the application logic
  2229. * from stomping on the Coast inputs. This is a one time write,
  2230. * or if you prefer you could put it in the link up loop and
  2231. * write it every time the link comes up.
  2232. * 1a) Then write GSER(0..13)_LANE(0..3)_PCS_CTLIFC_2
  2233. * Set CTLIFC_OVRRD_REQ (later)
  2234. * Set CFG_RX_CDR_COAST_REQ_OVRRD_EN
  2235. * Its not clear if #1 and #1a can be combined, lets try it
  2236. * this way first.
  2237. */
  2238. for (lane = 0; lane < 4; lane++) {
  2239. cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
  2240. cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc_2;
  2241. ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
  2242. ctlifc_2.s.cfg_rx_cdr_coast_req_ovrrd_en = 1;
  2243. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), ctlifc_2.u64);
  2244. misc_ovrrd.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm));
  2245. misc_ovrrd.s.cfg_rx_eie_det_ovrrd_en = 1;
  2246. misc_ovrrd.s.cfg_rx_eie_det_ovrrd_val = 0;
  2247. csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm), misc_ovrrd.u64);
  2248. udelay(1);
  2249. misc_ovrrd.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm));
  2250. misc_ovrrd.s.cfg_rx_eie_det_ovrrd_en = 1;
  2251. misc_ovrrd.s.cfg_rx_eie_det_ovrrd_val = 1;
  2252. csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm), misc_ovrrd.u64);
  2253. ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
  2254. ctlifc_2.s.ctlifc_ovrrd_req = 1;
  2255. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), ctlifc_2.u64);
  2256. }
  2257. }
  2258. /** CN78xx reference clock register settings */
  2259. struct refclk_settings_cn78xx {
  2260. bool valid; /** Reference clock speed supported */
  2261. union cvmx_gserx_pll_px_mode_0 mode_0;
  2262. union cvmx_gserx_pll_px_mode_1 mode_1;
  2263. union cvmx_gserx_lane_px_mode_0 pmode_0;
  2264. union cvmx_gserx_lane_px_mode_1 pmode_1;
  2265. };
  2266. /** Default reference clock for various modes */
  2267. static const u8 def_ref_clk_cn78xx[R_NUM_LANE_MODES] = { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 };
  2268. /**
  2269. * This data structure stores the reference clock for each mode for each QLM.
  2270. *
  2271. * It is indexed first by the node number, then the QLM number and then the
  2272. * lane mode. It is initialized to the default values.
  2273. */
  2274. static u8 ref_clk_cn78xx[CVMX_MAX_NODES][8][R_NUM_LANE_MODES] = {
  2275. { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2276. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2277. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2278. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2279. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2280. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2281. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2282. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
  2283. { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2284. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2285. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2286. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2287. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2288. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2289. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2290. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
  2291. { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2292. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2293. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2294. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2295. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2296. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2297. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2298. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
  2299. { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2300. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2301. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2302. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2303. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2304. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2305. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
  2306. { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } }
  2307. };
  2308. /**
  2309. * This data structure contains the register values for the cn78xx PLLs
  2310. * It is indexed first by the reference clock and second by the mode.
  2311. * Note that not all combinations are supported.
  2312. */
  2313. static const struct refclk_settings_cn78xx refclk_settings_cn78xx[R_NUM_LANE_MODES][4] = {
  2314. { /* 0 R_2_5G_REFCLK100 */
  2315. { /* 100MHz reference clock */
  2316. .valid = true,
  2317. .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
  2318. .mode_1.s = { .pll_16p5en = 0x0,
  2319. .pll_cpadj = 0x2,
  2320. .pll_pcie3en = 0x0,
  2321. .pll_opr = 0x0,
  2322. .pll_div = 0x19 },
  2323. .pmode_0.s = { .ctle = 0x0,
  2324. .pcie = 0x1,
  2325. .tx_ldiv = 0x1,
  2326. .rx_ldiv = 0x1,
  2327. .srate = 0x0,
  2328. .tx_mode = 0x3,
  2329. .rx_mode = 0x3 },
  2330. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2331. .vma_mm = 0x1,
  2332. .cdr_fgain = 0xa,
  2333. .ph_acc_adj = 0x14 } },
  2334. { /* 125MHz reference clock */
  2335. .valid = true,
  2336. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
  2337. .mode_1.s = { .pll_16p5en = 0x0,
  2338. .pll_cpadj = 0x1,
  2339. .pll_pcie3en = 0x0,
  2340. .pll_opr = 0x0,
  2341. .pll_div = 0x14 },
  2342. .pmode_0.s = { .ctle = 0x0,
  2343. .pcie = 0x1,
  2344. .tx_ldiv = 0x1,
  2345. .rx_ldiv = 0x1,
  2346. .srate = 0x0,
  2347. .tx_mode = 0x3,
  2348. .rx_mode = 0x3 },
  2349. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2350. .vma_mm = 0x1,
  2351. .cdr_fgain = 0xa,
  2352. .ph_acc_adj = 0x14 } },
  2353. { /* 156.25MHz reference clock */
  2354. .valid = true,
  2355. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
  2356. .mode_1.s = { .pll_16p5en = 0x0,
  2357. .pll_cpadj = 0x2,
  2358. .pll_pcie3en = 0x0,
  2359. .pll_opr = 0x0,
  2360. .pll_div = 0x10 },
  2361. .pmode_0.s = { .ctle = 0x0,
  2362. .pcie = 0x1,
  2363. .tx_ldiv = 0x1,
  2364. .rx_ldiv = 0x1,
  2365. .srate = 0x0,
  2366. .tx_mode = 0x3,
  2367. .rx_mode = 0x3 },
  2368. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2369. .vma_mm = 0x1,
  2370. .cdr_fgain = 0xa,
  2371. .ph_acc_adj = 0x14 } },
  2372. {
  2373. /* 161.1328125MHz reference clock */
  2374. .valid = false,
  2375. } },
  2376. {
  2377. /* 1 R_5G_REFCLK100 */
  2378. { /* 100MHz reference clock */
  2379. .valid = true,
  2380. .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2381. .mode_1.s = { .pll_16p5en = 0x0,
  2382. .pll_cpadj = 0x2,
  2383. .pll_pcie3en = 0x0,
  2384. .pll_opr = 0x0,
  2385. .pll_div = 0x19 },
  2386. .pmode_0.s = { .ctle = 0x0,
  2387. .pcie = 0x1,
  2388. .tx_ldiv = 0x0,
  2389. .rx_ldiv = 0x0,
  2390. .srate = 0x0,
  2391. .tx_mode = 0x3,
  2392. .rx_mode = 0x3 },
  2393. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2394. .vma_mm = 0x0,
  2395. .cdr_fgain = 0xa,
  2396. .ph_acc_adj = 0x14 } },
  2397. { /* 125MHz reference clock */
  2398. .valid = true,
  2399. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2400. .mode_1.s = { .pll_16p5en = 0x0,
  2401. .pll_cpadj = 0x1,
  2402. .pll_pcie3en = 0x0,
  2403. .pll_opr = 0x0,
  2404. .pll_div = 0x14 },
  2405. .pmode_0.s = { .ctle = 0x0,
  2406. .pcie = 0x1,
  2407. .tx_ldiv = 0x0,
  2408. .rx_ldiv = 0x0,
  2409. .srate = 0x0,
  2410. .tx_mode = 0x3,
  2411. .rx_mode = 0x3 },
  2412. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2413. .vma_mm = 0x0,
  2414. .cdr_fgain = 0xa,
  2415. .ph_acc_adj = 0x14 } },
  2416. { /* 156.25MHz reference clock */
  2417. .valid = true,
  2418. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2419. .mode_1.s = { .pll_16p5en = 0x0,
  2420. .pll_cpadj = 0x2,
  2421. .pll_pcie3en = 0x0,
  2422. .pll_opr = 0x0,
  2423. .pll_div = 0x10 },
  2424. .pmode_0.s = { .ctle = 0x0,
  2425. .pcie = 0x1,
  2426. .tx_ldiv = 0x0,
  2427. .rx_ldiv = 0x0,
  2428. .srate = 0x0,
  2429. .tx_mode = 0x3,
  2430. .rx_mode = 0x3 },
  2431. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2432. .vma_mm = 0x0,
  2433. .cdr_fgain = 0xa,
  2434. .ph_acc_adj = 0x14 } },
  2435. {
  2436. /* 161.1328125MHz reference clock */
  2437. .valid = false,
  2438. },
  2439. },
  2440. { /* 2 R_8G_REFCLK100 */
  2441. { /* 100MHz reference clock */
  2442. .valid = true,
  2443. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
  2444. .mode_1.s = { .pll_16p5en = 0x0,
  2445. .pll_cpadj = 0x2,
  2446. .pll_pcie3en = 0x1,
  2447. .pll_opr = 0x1,
  2448. .pll_div = 0x28 },
  2449. .pmode_0.s = { .ctle = 0x3,
  2450. .pcie = 0x0,
  2451. .tx_ldiv = 0x0,
  2452. .rx_ldiv = 0x0,
  2453. .srate = 0x0,
  2454. .tx_mode = 0x3,
  2455. .rx_mode = 0x3 },
  2456. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2457. .vma_mm = 0x0,
  2458. .cdr_fgain = 0xb,
  2459. .ph_acc_adj = 0x23 } },
  2460. { /* 125MHz reference clock */
  2461. .valid = true,
  2462. .mode_0.s = { .pll_icp = 0x2, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
  2463. .mode_1.s = { .pll_16p5en = 0x0,
  2464. .pll_cpadj = 0x1,
  2465. .pll_pcie3en = 0x1,
  2466. .pll_opr = 0x1,
  2467. .pll_div = 0x20 },
  2468. .pmode_0.s = { .ctle = 0x3,
  2469. .pcie = 0x0,
  2470. .tx_ldiv = 0x0,
  2471. .rx_ldiv = 0x0,
  2472. .srate = 0x0,
  2473. .tx_mode = 0x3,
  2474. .rx_mode = 0x3 },
  2475. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2476. .vma_mm = 0x0,
  2477. .cdr_fgain = 0xb,
  2478. .ph_acc_adj = 0x23 } },
  2479. { /* 156.25MHz reference clock not supported */
  2480. .valid = false } },
  2481. {
  2482. /* 3 R_125G_REFCLK15625_KX */
  2483. { /* 100MHz reference */
  2484. .valid = true,
  2485. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
  2486. .mode_1.s = { .pll_16p5en = 0x1,
  2487. .pll_cpadj = 0x2,
  2488. .pll_pcie3en = 0x0,
  2489. .pll_opr = 0x0,
  2490. .pll_div = 0x19 },
  2491. .pmode_0.s = { .ctle = 0x0,
  2492. .pcie = 0x0,
  2493. .tx_ldiv = 0x2,
  2494. .rx_ldiv = 0x2,
  2495. .srate = 0x0,
  2496. .tx_mode = 0x3,
  2497. .rx_mode = 0x3 },
  2498. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2499. .vma_mm = 0x1,
  2500. .cdr_fgain = 0xc,
  2501. .ph_acc_adj = 0x1e } },
  2502. { /* 125MHz reference */
  2503. .valid = true,
  2504. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
  2505. .mode_1.s = { .pll_16p5en = 0x1,
  2506. .pll_cpadj = 0x2,
  2507. .pll_pcie3en = 0x0,
  2508. .pll_opr = 0x0,
  2509. .pll_div = 0x14 },
  2510. .pmode_0.s = { .ctle = 0x0,
  2511. .pcie = 0x0,
  2512. .tx_ldiv = 0x2,
  2513. .rx_ldiv = 0x2,
  2514. .srate = 0x0,
  2515. .tx_mode = 0x3,
  2516. .rx_mode = 0x3 },
  2517. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2518. .vma_mm = 0x1,
  2519. .cdr_fgain = 0xc,
  2520. .ph_acc_adj = 0x1e } },
  2521. { /* 156.25MHz reference */
  2522. .valid = true,
  2523. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
  2524. .mode_1.s = { .pll_16p5en = 0x1,
  2525. .pll_cpadj = 0x3,
  2526. .pll_pcie3en = 0x0,
  2527. .pll_opr = 0x0,
  2528. .pll_div = 0x10 },
  2529. .pmode_0.s = { .ctle = 0x0,
  2530. .pcie = 0x0,
  2531. .tx_ldiv = 0x2,
  2532. .rx_ldiv = 0x2,
  2533. .srate = 0x0,
  2534. .tx_mode = 0x3,
  2535. .rx_mode = 0x3 },
  2536. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2537. .vma_mm = 0x1,
  2538. .cdr_fgain = 0xc,
  2539. .ph_acc_adj = 0x1e } },
  2540. {
  2541. /* 161.1328125MHz reference clock */
  2542. .valid = false,
  2543. },
  2544. },
  2545. { /* 4 R_3125G_REFCLK15625_XAUI */
  2546. { /* 100MHz reference */
  2547. .valid = false },
  2548. { /* 125MHz reference */
  2549. .valid = true,
  2550. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x14 },
  2551. .mode_1.s = { .pll_16p5en = 0x1,
  2552. .pll_cpadj = 0x2,
  2553. .pll_pcie3en = 0x0,
  2554. .pll_opr = 0x0,
  2555. .pll_div = 0x19 },
  2556. .pmode_0.s = { .ctle = 0x0,
  2557. .pcie = 0x0,
  2558. .tx_ldiv = 0x1,
  2559. .rx_ldiv = 0x1,
  2560. .srate = 0x0,
  2561. .tx_mode = 0x3,
  2562. .rx_mode = 0x3 },
  2563. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2564. .vma_mm = 0x1,
  2565. .cdr_fgain = 0xc,
  2566. .ph_acc_adj = 0x1e } },
  2567. { /* 156.25MHz reference, default */
  2568. .valid = true,
  2569. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x14 },
  2570. .mode_1.s = { .pll_16p5en = 0x1,
  2571. .pll_cpadj = 0x2,
  2572. .pll_pcie3en = 0x0,
  2573. .pll_opr = 0x0,
  2574. .pll_div = 0x14 },
  2575. .pmode_0.s = { .ctle = 0x0,
  2576. .pcie = 0x0,
  2577. .tx_ldiv = 0x1,
  2578. .rx_ldiv = 0x1,
  2579. .srate = 0x0,
  2580. .tx_mode = 0x3,
  2581. .rx_mode = 0x3 },
  2582. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2583. .vma_mm = 0x1,
  2584. .cdr_fgain = 0xc,
  2585. .ph_acc_adj = 0x1e } },
  2586. {
  2587. /* 161.1328125MHz reference clock */
  2588. .valid = false,
  2589. } },
  2590. { /* 5 R_103125G_REFCLK15625_KR */
  2591. { /* 100MHz reference */
  2592. .valid = false },
  2593. { /* 125MHz reference */
  2594. .valid = false },
  2595. { /* 156.25MHz reference */
  2596. .valid = true,
  2597. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
  2598. .mode_1.s = { .pll_16p5en = 0x1,
  2599. .pll_cpadj = 0x2,
  2600. .pll_pcie3en = 0x0,
  2601. .pll_opr = 0x1,
  2602. .pll_div = 0x21 },
  2603. .pmode_0.s = { .ctle = 0x3,
  2604. .pcie = 0x0,
  2605. .tx_ldiv = 0x0,
  2606. .rx_ldiv = 0x0,
  2607. .srate = 0x0,
  2608. .tx_mode = 0x3,
  2609. .rx_mode = 0x3 },
  2610. .pmode_1.s = { .vma_fine_cfg_sel = 0x1,
  2611. .vma_mm = 0x0,
  2612. .cdr_fgain = 0xa,
  2613. .ph_acc_adj = 0xf } },
  2614. { /* 161.1328125 reference */
  2615. .valid = true,
  2616. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
  2617. .mode_1.s = { .pll_16p5en = 0x1,
  2618. .pll_cpadj = 0x2,
  2619. .pll_pcie3en = 0x0,
  2620. .pll_opr = 0x1,
  2621. .pll_div = 0x20 },
  2622. .pmode_0.s = { .ctle = 0x3,
  2623. .pcie = 0x0,
  2624. .tx_ldiv = 0x0,
  2625. .rx_ldiv = 0x0,
  2626. .srate = 0x0,
  2627. .tx_mode = 0x3,
  2628. .rx_mode = 0x3 },
  2629. .pmode_1.s = { .vma_fine_cfg_sel = 0x1,
  2630. .vma_mm = 0x0,
  2631. .cdr_fgain = 0xa,
  2632. .ph_acc_adj = 0xf } } },
  2633. { /* 6 R_125G_REFCLK15625_SGMII */
  2634. { /* 100MHz reference clock */
  2635. .valid = 1,
  2636. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
  2637. .mode_1.s = { .pll_16p5en = 0x1,
  2638. .pll_cpadj = 0x2,
  2639. .pll_pcie3en = 0x0,
  2640. .pll_opr = 0x0,
  2641. .pll_div = 0x19 },
  2642. .pmode_0.s = { .ctle = 0x0,
  2643. .pcie = 0x0,
  2644. .tx_ldiv = 0x2,
  2645. .rx_ldiv = 0x2,
  2646. .srate = 0x0,
  2647. .tx_mode = 0x3,
  2648. .rx_mode = 0x3 },
  2649. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2650. .vma_mm = 0x1,
  2651. .cdr_fgain = 0xc,
  2652. .ph_acc_adj = 0x1e } },
  2653. { /* 125MHz reference clock */
  2654. .valid = 1,
  2655. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
  2656. .mode_1.s = { .pll_16p5en = 0x1,
  2657. .pll_cpadj = 0x2,
  2658. .pll_pcie3en = 0x0,
  2659. .pll_opr = 0x0,
  2660. .pll_div = 0x14 },
  2661. .pmode_0.s = { .ctle = 0x0,
  2662. .pcie = 0x0,
  2663. .tx_ldiv = 0x2,
  2664. .rx_ldiv = 0x2,
  2665. .srate = 0x0,
  2666. .tx_mode = 0x3,
  2667. .rx_mode = 0x3 },
  2668. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2669. .vma_mm = 0x0,
  2670. .cdr_fgain = 0xc,
  2671. .ph_acc_adj = 0x1e } },
  2672. { /* 156.25MHz reference clock */
  2673. .valid = 1,
  2674. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
  2675. .mode_1.s = { .pll_16p5en = 0x1,
  2676. .pll_cpadj = 0x3,
  2677. .pll_pcie3en = 0x0,
  2678. .pll_opr = 0x0,
  2679. .pll_div = 0x10 },
  2680. .pmode_0.s = { .ctle = 0x0,
  2681. .pcie = 0x0,
  2682. .tx_ldiv = 0x2,
  2683. .rx_ldiv = 0x2,
  2684. .srate = 0x0,
  2685. .tx_mode = 0x3,
  2686. .rx_mode = 0x3 },
  2687. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2688. .vma_mm = 0x1,
  2689. .cdr_fgain = 0xc,
  2690. .ph_acc_adj = 0x1e } } },
  2691. { /* 7 R_5G_REFCLK15625_QSGMII */
  2692. { /* 100MHz reference */
  2693. .valid = true,
  2694. .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2695. .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x2, .pll_pcie3en = 0x0,
  2696. .pll_div = 0x19 },
  2697. .pmode_0.s = { .ctle = 0x0,
  2698. .pcie = 0x0,
  2699. .tx_ldiv = 0x0,
  2700. .rx_ldiv = 0x0,
  2701. .srate = 0x0,
  2702. .tx_mode = 0x3,
  2703. .rx_mode = 0x3 },
  2704. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2705. .vma_mm = 0x1,
  2706. .cdr_fgain = 0xc,
  2707. .ph_acc_adj = 0x1e } },
  2708. { /* 125MHz reference */
  2709. .valid = true,
  2710. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2711. .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x1, .pll_pcie3en = 0x0,
  2712. .pll_div = 0x14 },
  2713. .pmode_0.s = { .ctle = 0x0,
  2714. .pcie = 0x0,
  2715. .tx_ldiv = 0x0,
  2716. .rx_ldiv = 0x0,
  2717. .srate = 0x0,
  2718. .tx_mode = 0x3,
  2719. .rx_mode = 0x3 },
  2720. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2721. .vma_mm = 0x1,
  2722. .cdr_fgain = 0xc,
  2723. .ph_acc_adj = 0x1e } },
  2724. { /* 156.25MHz reference */
  2725. .valid = true,
  2726. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2727. .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x2, .pll_pcie3en = 0x0,
  2728. .pll_div = 0x10 },
  2729. .pmode_0.s = { .ctle = 0x0,
  2730. .pcie = 0x0,
  2731. .tx_ldiv = 0x0,
  2732. .rx_ldiv = 0x0,
  2733. .srate = 0x0,
  2734. .tx_mode = 0x3,
  2735. .rx_mode = 0x3 },
  2736. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2737. .vma_mm = 0x1,
  2738. .cdr_fgain = 0xc,
  2739. .ph_acc_adj = 0x1e } },
  2740. {
  2741. /* 161.1328125MHz reference clock */
  2742. .valid = false,
  2743. } },
  2744. { /* 8 R_625G_REFCLK15625_RXAUI */
  2745. { /* 100MHz reference */
  2746. .valid = false },
  2747. { /* 125MHz reference */
  2748. .valid = true,
  2749. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2750. .mode_1.s = { .pll_16p5en = 0x0,
  2751. .pll_cpadj = 0x2,
  2752. .pll_pcie3en = 0x0,
  2753. .pll_opr = 0x0,
  2754. .pll_div = 0x19 },
  2755. .pmode_0.s = { .ctle = 0x0,
  2756. .pcie = 0x0,
  2757. .tx_ldiv = 0x0,
  2758. .rx_ldiv = 0x0,
  2759. .srate = 0x0,
  2760. .tx_mode = 0x3,
  2761. .rx_mode = 0x3 },
  2762. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2763. .vma_mm = 0x0,
  2764. .cdr_fgain = 0xa,
  2765. .ph_acc_adj = 0x14 } },
  2766. { /* 156.25MHz reference */
  2767. .valid = true,
  2768. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2769. .mode_1.s = { .pll_16p5en = 0x0,
  2770. .pll_cpadj = 0x2,
  2771. .pll_pcie3en = 0x0,
  2772. .pll_opr = 0x0,
  2773. .pll_div = 0x14 },
  2774. .pmode_0.s = { .ctle = 0x0,
  2775. .pcie = 0x0,
  2776. .tx_ldiv = 0x0,
  2777. .rx_ldiv = 0x0,
  2778. .srate = 0x0,
  2779. .tx_mode = 0x3,
  2780. .rx_mode = 0x3 },
  2781. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2782. .vma_mm = 0x0,
  2783. .cdr_fgain = 0xa,
  2784. .ph_acc_adj = 0x14 } },
  2785. { /* 161.1328125 reference */
  2786. .valid = true,
  2787. .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2788. .mode_1.s = { .pll_16p5en = 0x0,
  2789. .pll_cpadj = 0x2,
  2790. .pll_pcie3en = 0x0,
  2791. .pll_opr = 0x0,
  2792. .pll_div = 0x14 },
  2793. .pmode_0.s = { .ctle = 0x0,
  2794. .pcie = 0x0,
  2795. .tx_ldiv = 0x0,
  2796. .rx_ldiv = 0x0,
  2797. .srate = 0x0,
  2798. .tx_mode = 0x3,
  2799. .rx_mode = 0x3 },
  2800. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2801. .vma_mm = 0x0,
  2802. .cdr_fgain = 0xa,
  2803. .ph_acc_adj = 0x14 } } },
  2804. { /* 9 R_2_5G_REFCLK125 */
  2805. { /* 100MHz reference */
  2806. .valid = true,
  2807. .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
  2808. .mode_1.s = { .pll_16p5en = 0x0,
  2809. .pll_cpadj = 0x2,
  2810. .pll_pcie3en = 0x0,
  2811. .pll_opr = 0x0,
  2812. .pll_div = 0x19 },
  2813. .pmode_0.s = { .ctle = 0x0,
  2814. .pcie = 0x1,
  2815. .tx_ldiv = 0x1,
  2816. .rx_ldiv = 0x1,
  2817. .srate = 0x0,
  2818. .tx_mode = 0x3,
  2819. .rx_mode = 0x3 },
  2820. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2821. .vma_mm = 0x1,
  2822. .cdr_fgain = 0xa,
  2823. .ph_acc_adj = 0x14 } },
  2824. { /* 125MHz reference */
  2825. .valid = true,
  2826. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
  2827. .mode_1.s = { .pll_16p5en = 0x0,
  2828. .pll_cpadj = 0x1,
  2829. .pll_pcie3en = 0x0,
  2830. .pll_opr = 0x0,
  2831. .pll_div = 0x14 },
  2832. .pmode_0.s = { .ctle = 0x0,
  2833. .pcie = 0x1,
  2834. .tx_ldiv = 0x1,
  2835. .rx_ldiv = 0x1,
  2836. .srate = 0x0,
  2837. .tx_mode = 0x3,
  2838. .rx_mode = 0x3 },
  2839. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2840. .vma_mm = 0x1,
  2841. .cdr_fgain = 0xa,
  2842. .ph_acc_adj = 0x14 } },
  2843. { /* 156,25MHz reference */
  2844. .valid = true,
  2845. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
  2846. .mode_1.s = { .pll_16p5en = 0x0,
  2847. .pll_cpadj = 0x2,
  2848. .pll_pcie3en = 0x0,
  2849. .pll_opr = 0x0,
  2850. .pll_div = 0x10 },
  2851. .pmode_0.s = { .ctle = 0x0,
  2852. .pcie = 0x1,
  2853. .tx_ldiv = 0x1,
  2854. .rx_ldiv = 0x1,
  2855. .srate = 0x0,
  2856. .tx_mode = 0x3,
  2857. .rx_mode = 0x3 },
  2858. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2859. .vma_mm = 0x1,
  2860. .cdr_fgain = 0xa,
  2861. .ph_acc_adj = 0x14 } },
  2862. {
  2863. /* 161.1328125MHz reference clock */
  2864. .valid = false,
  2865. } },
  2866. { /* 0xa R_5G_REFCLK125 */
  2867. { /* 100MHz reference */
  2868. .valid = true,
  2869. .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2870. .mode_1.s = { .pll_16p5en = 0x0,
  2871. .pll_cpadj = 0x2,
  2872. .pll_pcie3en = 0x0,
  2873. .pll_opr = 0x0,
  2874. .pll_div = 0x19 },
  2875. .pmode_0.s = { .ctle = 0x0,
  2876. .pcie = 0x1,
  2877. .tx_ldiv = 0x0,
  2878. .rx_ldiv = 0x0,
  2879. .srate = 0x0,
  2880. .tx_mode = 0x3,
  2881. .rx_mode = 0x3 },
  2882. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2883. .vma_mm = 0x0,
  2884. .cdr_fgain = 0xa,
  2885. .ph_acc_adj = 0x14 } },
  2886. { /* 125MHz reference */
  2887. .valid = true,
  2888. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2889. .mode_1.s = { .pll_16p5en = 0x0,
  2890. .pll_cpadj = 0x1,
  2891. .pll_pcie3en = 0x0,
  2892. .pll_opr = 0x0,
  2893. .pll_div = 0x14 },
  2894. .pmode_0.s = { .ctle = 0x0,
  2895. .pcie = 0x1,
  2896. .tx_ldiv = 0x0,
  2897. .rx_ldiv = 0x0,
  2898. .srate = 0x0,
  2899. .tx_mode = 0x3,
  2900. .rx_mode = 0x3 },
  2901. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2902. .vma_mm = 0x0,
  2903. .cdr_fgain = 0xa,
  2904. .ph_acc_adj = 0x14 } },
  2905. { /* 156.25MHz reference */
  2906. .valid = true,
  2907. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
  2908. .mode_1.s = { .pll_16p5en = 0x0,
  2909. .pll_cpadj = 0x2,
  2910. .pll_pcie3en = 0x0,
  2911. .pll_opr = 0x0,
  2912. .pll_div = 0x10 },
  2913. .pmode_0.s = { .ctle = 0x0,
  2914. .pcie = 0x1,
  2915. .tx_ldiv = 0x0,
  2916. .rx_ldiv = 0x0,
  2917. .srate = 0x0,
  2918. .tx_mode = 0x3,
  2919. .rx_mode = 0x3 },
  2920. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2921. .vma_mm = 0x0,
  2922. .cdr_fgain = 0xa,
  2923. .ph_acc_adj = 0x14 } },
  2924. {
  2925. /* 161.1328125MHz reference clock */
  2926. .valid = false,
  2927. } },
  2928. { /* 0xb R_8G_REFCLK125 */
  2929. { /* 100MHz reference */
  2930. .valid = true,
  2931. .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
  2932. .mode_1.s = { .pll_16p5en = 0x0,
  2933. .pll_cpadj = 0x2,
  2934. .pll_pcie3en = 0x1,
  2935. .pll_opr = 0x1,
  2936. .pll_div = 0x28 },
  2937. .pmode_0.s = { .ctle = 0x3,
  2938. .pcie = 0x0,
  2939. .tx_ldiv = 0x0,
  2940. .rx_ldiv = 0x0,
  2941. .srate = 0x0,
  2942. .tx_mode = 0x3,
  2943. .rx_mode = 0x3 },
  2944. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2945. .vma_mm = 0x0,
  2946. .cdr_fgain = 0xb,
  2947. .ph_acc_adj = 0x23 } },
  2948. { /* 125MHz reference */
  2949. .valid = true,
  2950. .mode_0.s = { .pll_icp = 0x2, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
  2951. .mode_1.s = { .pll_16p5en = 0x0,
  2952. .pll_cpadj = 0x1,
  2953. .pll_pcie3en = 0x1,
  2954. .pll_opr = 0x1,
  2955. .pll_div = 0x20 },
  2956. .pmode_0.s = { .ctle = 0x3,
  2957. .pcie = 0x0,
  2958. .tx_ldiv = 0x0,
  2959. .rx_ldiv = 0x0,
  2960. .srate = 0x0,
  2961. .tx_mode = 0x3,
  2962. .rx_mode = 0x3 },
  2963. .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
  2964. .vma_mm = 0x0,
  2965. .cdr_fgain = 0xb,
  2966. .ph_acc_adj = 0x23 } },
  2967. { /* 156.25MHz reference */
  2968. .valid = false },
  2969. {
  2970. /* 161.1328125MHz reference clock */
  2971. .valid = false,
  2972. } }
  2973. };
  2974. /**
  2975. * Set a non-standard reference clock for a node, qlm and lane mode.
  2976. *
  2977. * @INTERNAL
  2978. *
  2979. * @param node node number the reference clock is used with
  2980. * @param qlm qlm number the reference clock is hooked up to
  2981. * @param lane_mode current lane mode selected for the QLM
  2982. * @param ref_clk_sel 0 = 100MHz, 1 = 125MHz, 2 = 156.25MHz,
  2983. * 3 = 161.1328125MHz
  2984. *
  2985. * Return: 0 for success or -1 if the reference clock selector is not supported
  2986. *
  2987. * NOTE: This must be called before __qlm_setup_pll_cn78xx.
  2988. */
  2989. static int __set_qlm_ref_clk_cn78xx(int node, int qlm, int lane_mode, int ref_clk_sel)
  2990. {
  2991. if (ref_clk_sel > 3 || ref_clk_sel < 0 ||
  2992. !refclk_settings_cn78xx[lane_mode][ref_clk_sel].valid) {
  2993. debug("%s: Invalid reference clock %d for lane mode %d for node %d, QLM %d\n",
  2994. __func__, ref_clk_sel, lane_mode, node, qlm);
  2995. return -1;
  2996. }
  2997. debug("%s(%d, %d, 0x%x, %d)\n", __func__, node, qlm, lane_mode, ref_clk_sel);
  2998. ref_clk_cn78xx[node][qlm][lane_mode] = ref_clk_sel;
  2999. return 0;
  3000. }
  3001. /**
  3002. * KR - Inverted Tx Coefficient Direction Change. Changing Pre & Post Tap inc/dec direction
  3003. *
  3004. *
  3005. * @INTERNAL
  3006. *
  3007. * @param node Node number to configure
  3008. * @param qlm QLM number to configure
  3009. */
  3010. static void __qlm_kr_inc_dec_gser26636(int node, int qlm)
  3011. {
  3012. cvmx_gserx_rx_txdir_ctrl_1_t rx_txdir_ctrl;
  3013. /* Apply workaround for Errata GSER-26636,
  3014. * KR training coefficient update inverted
  3015. */
  3016. rx_txdir_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm));
  3017. rx_txdir_ctrl.s.rx_precorr_chg_dir = 1;
  3018. rx_txdir_ctrl.s.rx_tap1_chg_dir = 1;
  3019. csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm), rx_txdir_ctrl.u64);
  3020. }
  3021. /**
  3022. * Updating the RX EQ settings to support wider temperature range
  3023. * @INTERNAL
  3024. *
  3025. * @param node Node number to configure
  3026. * @param qlm QLM number to configure
  3027. */
  3028. static void __qlm_rx_eq_temp_gser27140(int node, int qlm)
  3029. {
  3030. int lane;
  3031. int num_lanes = cvmx_qlm_get_lanes(qlm);
  3032. cvmx_gserx_lanex_rx_valbbd_ctrl_0_t rx_valbbd_ctrl_0;
  3033. cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
  3034. cvmx_gserx_lane_vma_fine_ctrl_0_t lane_vma_fine_ctrl_0;
  3035. cvmx_gserx_rx_txdir_ctrl_1_t rx_txdir_ctrl_1;
  3036. cvmx_gserx_eq_wait_time_t eq_wait_time;
  3037. cvmx_gserx_rx_txdir_ctrl_2_t rx_txdir_ctrl_2;
  3038. cvmx_gserx_rx_txdir_ctrl_0_t rx_txdir_ctrl_0;
  3039. for (lane = 0; lane < num_lanes; lane++) {
  3040. rx_valbbd_ctrl_0.u64 =
  3041. csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(lane, qlm));
  3042. rx_valbbd_ctrl_0.s.agc_gain = 3;
  3043. rx_valbbd_ctrl_0.s.dfe_gain = 2;
  3044. csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(lane, qlm),
  3045. rx_valbbd_ctrl_0.u64);
  3046. }
  3047. /* do_pre_ctle_limits_work_around: */
  3048. lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
  3049. //lane_vma_fine_ctrl_2.s.rx_prectle_peak_max_fine = 11;
  3050. lane_vma_fine_ctrl_2.s.rx_prectle_gain_max_fine = 11;
  3051. //lane_vma_fine_ctrl_2.s.rx_prectle_peak_min_fine = 6;
  3052. lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = 6;
  3053. csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
  3054. /* do_inc_dec_thres_work_around: */
  3055. rx_txdir_ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_0(qlm));
  3056. rx_txdir_ctrl_0.s.rx_boost_hi_thrs = 11;
  3057. rx_txdir_ctrl_0.s.rx_boost_lo_thrs = 4;
  3058. rx_txdir_ctrl_0.s.rx_boost_hi_val = 15;
  3059. csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_0(qlm), rx_txdir_ctrl_0.u64);
  3060. /* do_sdll_iq_work_around: */
  3061. lane_vma_fine_ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_0(qlm));
  3062. lane_vma_fine_ctrl_0.s.rx_sdll_iq_max_fine = 14;
  3063. lane_vma_fine_ctrl_0.s.rx_sdll_iq_min_fine = 8;
  3064. lane_vma_fine_ctrl_0.s.rx_sdll_iq_step_fine = 2;
  3065. /* do_vma_window_work_around_2: */
  3066. lane_vma_fine_ctrl_0.s.vma_window_wait_fine = 5;
  3067. lane_vma_fine_ctrl_0.s.lms_wait_time_fine = 5;
  3068. csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_0(qlm), lane_vma_fine_ctrl_0.u64);
  3069. /* Set dfe_tap_1_lo_thres_val: */
  3070. rx_txdir_ctrl_1.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm));
  3071. rx_txdir_ctrl_1.s.rx_tap1_lo_thrs = 8;
  3072. rx_txdir_ctrl_1.s.rx_tap1_hi_thrs = 0x17;
  3073. csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm), rx_txdir_ctrl_1.u64);
  3074. /* do_rxeq_wait_cnt_work_around: */
  3075. eq_wait_time.u64 = csr_rd_node(node, CVMX_GSERX_EQ_WAIT_TIME(qlm));
  3076. eq_wait_time.s.rxeq_wait_cnt = 6;
  3077. csr_wr_node(node, CVMX_GSERX_EQ_WAIT_TIME(qlm), eq_wait_time.u64);
  3078. /* do_write_rx_txdir_precorr_thresholds: */
  3079. rx_txdir_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_2(qlm));
  3080. rx_txdir_ctrl_2.s.rx_precorr_hi_thrs = 0xc0;
  3081. rx_txdir_ctrl_2.s.rx_precorr_lo_thrs = 0x40;
  3082. csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_2(qlm), rx_txdir_ctrl_2.u64);
  3083. }
  3084. /* Errata GSER-26150: 10G PHY PLL Temperature Failure
  3085. * This workaround must be completed after the final deassertion of
  3086. * GSERx_PHY_CTL[PHY_RESET]
  3087. */
  3088. static int __qlm_errata_gser_26150(int node, int qlm, int is_pcie)
  3089. {
  3090. int num_lanes = 4;
  3091. int i;
  3092. cvmx_gserx_glbl_pll_cfg_3_t pll_cfg_3;
  3093. cvmx_gserx_glbl_misc_config_1_t misc_config_1;
  3094. /* PCIe only requires the LC-VCO parameters to be updated */
  3095. if (is_pcie) {
  3096. /* Update PLL parameters */
  3097. /* Step 1: Set GSER()_GLBL_PLL_CFG_3[PLL_VCTRL_SEL_LCVCO_VAL] = 0x2, and
  3098. * GSER()_GLBL_PLL_CFG_3[PCS_SDS_PLL_VCO_AMP] = 0
  3099. */
  3100. pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
  3101. pll_cfg_3.s.pcs_sds_pll_vco_amp = 0;
  3102. pll_cfg_3.s.pll_vctrl_sel_lcvco_val = 2;
  3103. csr_wr_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm), pll_cfg_3.u64);
  3104. /* Step 2: Set GSER()_GLBL_MISC_CONFIG_1[PCS_SDS_TRIM_CHP_REG] = 0x2. */
  3105. misc_config_1.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm));
  3106. misc_config_1.s.pcs_sds_trim_chp_reg = 2;
  3107. csr_wr_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm), misc_config_1.u64);
  3108. return 0;
  3109. }
  3110. /* Applying this errata twice causes problems */
  3111. pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
  3112. if (pll_cfg_3.s.pll_vctrl_sel_lcvco_val == 0x2)
  3113. return 0;
  3114. /* (GSER-26150) 10 Gb temperature excursions can cause lock failure */
  3115. /* Change the calibration point of the VCO at start up to shift some
  3116. * available range of the VCO from -deltaT direction to the +deltaT
  3117. * ramp direction allowing a greater range of VCO temperatures before
  3118. * experiencing the failure.
  3119. */
  3120. /* Check for DLMs on CN73XX and CNF75XX */
  3121. if (OCTEON_IS_MODEL(OCTEON_CN73XX) && (qlm == 5 || qlm == 6))
  3122. num_lanes = 2;
  3123. /* Put PHY in P2 Power-down state Need to Power down all lanes in a
  3124. * QLM/DLM to force PHY to P2 state
  3125. */
  3126. for (i = 0; i < num_lanes; i++) {
  3127. cvmx_gserx_lanex_pcs_ctlifc_0_t ctlifc0;
  3128. cvmx_gserx_lanex_pcs_ctlifc_1_t ctlifc1;
  3129. cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
  3130. /* Step 1: Set Set GSER()_LANE(lane_n)_PCS_CTLIFC_0[CFG_TX_PSTATE_REQ_OVERRD_VAL]
  3131. * = 0x3
  3132. * Select P2 power state for Tx lane
  3133. */
  3134. ctlifc0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm));
  3135. ctlifc0.s.cfg_tx_pstate_req_ovrrd_val = 0x3;
  3136. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm), ctlifc0.u64);
  3137. /* Step 2: Set GSER()_LANE(lane_n)_PCS_CTLIFC_1[CFG_RX_PSTATE_REQ_OVERRD_VAL]
  3138. * = 0x3
  3139. * Select P2 power state for Rx lane
  3140. */
  3141. ctlifc1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm));
  3142. ctlifc1.s.cfg_rx_pstate_req_ovrrd_val = 0x3;
  3143. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm), ctlifc1.u64);
  3144. /* Step 3: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN] = 1
  3145. * Enable Tx power state override and Set
  3146. * GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_EN] = 1
  3147. * Enable Rx power state override
  3148. */
  3149. ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
  3150. ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
  3151. ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x1;
  3152. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
  3153. /* Step 4: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ] = 1
  3154. * Start the CTLIFC override state machine
  3155. */
  3156. ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
  3157. ctlifc2.s.ctlifc_ovrrd_req = 0x1;
  3158. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
  3159. }
  3160. /* Update PLL parameters */
  3161. /* Step 5: Set GSER()_GLBL_PLL_CFG_3[PLL_VCTRL_SEL_LCVCO_VAL] = 0x2, and
  3162. * GSER()_GLBL_PLL_CFG_3[PCS_SDS_PLL_VCO_AMP] = 0
  3163. */
  3164. pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
  3165. pll_cfg_3.s.pcs_sds_pll_vco_amp = 0;
  3166. pll_cfg_3.s.pll_vctrl_sel_lcvco_val = 2;
  3167. csr_wr_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm), pll_cfg_3.u64);
  3168. /* Step 6: Set GSER()_GLBL_MISC_CONFIG_1[PCS_SDS_TRIM_CHP_REG] = 0x2. */
  3169. misc_config_1.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm));
  3170. misc_config_1.s.pcs_sds_trim_chp_reg = 2;
  3171. csr_wr_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm), misc_config_1.u64);
  3172. /* Wake up PHY and transition to P0 Power-up state to bring-up the lanes,
  3173. * need to wake up all PHY lanes
  3174. */
  3175. for (i = 0; i < num_lanes; i++) {
  3176. cvmx_gserx_lanex_pcs_ctlifc_0_t ctlifc0;
  3177. cvmx_gserx_lanex_pcs_ctlifc_1_t ctlifc1;
  3178. cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
  3179. /* Step 7: Set GSER()_LANE(lane_n)_PCS_CTLIFC_0[CFG_TX_PSTATE_REQ_OVERRD_VAL] = 0x0
  3180. * Select P0 power state for Tx lane
  3181. */
  3182. ctlifc0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm));
  3183. ctlifc0.s.cfg_tx_pstate_req_ovrrd_val = 0x0;
  3184. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm), ctlifc0.u64);
  3185. /* Step 8: Set GSER()_LANE(lane_n)_PCS_CTLIFC_1[CFG_RX_PSTATE_REQ_OVERRD_VAL] = 0x0
  3186. * Select P0 power state for Rx lane
  3187. */
  3188. ctlifc1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm));
  3189. ctlifc1.s.cfg_rx_pstate_req_ovrrd_val = 0x0;
  3190. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm), ctlifc1.u64);
  3191. /* Step 9: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN] = 1
  3192. * Enable Tx power state override and Set
  3193. * GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_EN] = 1
  3194. * Enable Rx power state override
  3195. */
  3196. ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
  3197. ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
  3198. ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x1;
  3199. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
  3200. /* Step 10: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ] = 1
  3201. * Start the CTLIFC override state machine
  3202. */
  3203. ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
  3204. ctlifc2.s.ctlifc_ovrrd_req = 0x1;
  3205. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
  3206. }
  3207. /* Step 11: Wait 10 msec */
  3208. mdelay(10);
  3209. /* Release Lane Tx/Rx Power state override enables. */
  3210. for (i = 0; i < num_lanes; i++) {
  3211. cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
  3212. ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
  3213. ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x0;
  3214. ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x0;
  3215. csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
  3216. }
  3217. /* Step 12: Poll GSER()_PLL_STAT.[PLL_LOCK] = 1
  3218. * Poll and check that PLL is locked
  3219. */
  3220. if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
  3221. pll_lock, ==, 1, 10000)) {
  3222. printf("%d:QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", node, qlm);
  3223. return -1;
  3224. }
  3225. /* Step 13: Poll GSER()_QLM_STAT.[RST_RDY] = 1
  3226. * Poll and check that QLM/DLM is Ready
  3227. */
  3228. if (is_pcie == 0 &&
  3229. CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
  3230. rst_rdy, ==, 1, 10000)) {
  3231. printf("%d:QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", node, qlm);
  3232. return -1;
  3233. }
  3234. return 0;
  3235. }
  3236. /**
  3237. * Configure all of the PLLs for a particular node and qlm
  3238. * @INTERNAL
  3239. *
  3240. * @param node Node number to configure
  3241. * @param qlm QLM number to configure
  3242. */
  3243. static void __qlm_setup_pll_cn78xx(int node, int qlm)
  3244. {
  3245. cvmx_gserx_pll_px_mode_0_t mode_0;
  3246. cvmx_gserx_pll_px_mode_1_t mode_1;
  3247. cvmx_gserx_lane_px_mode_0_t pmode_0;
  3248. cvmx_gserx_lane_px_mode_1_t pmode_1;
  3249. int lane_mode;
  3250. int ref_clk;
  3251. const struct refclk_settings_cn78xx *clk_settings;
  3252. for (lane_mode = 0; lane_mode < R_NUM_LANE_MODES; lane_mode++) {
  3253. mode_0.u64 = csr_rd_node(node, CVMX_GSERX_PLL_PX_MODE_0(lane_mode, qlm));
  3254. mode_1.u64 = csr_rd_node(node, CVMX_GSERX_PLL_PX_MODE_1(lane_mode, qlm));
  3255. pmode_0.u64 = 0;
  3256. pmode_1.u64 = 0;
  3257. ref_clk = ref_clk_cn78xx[node][qlm][lane_mode];
  3258. clk_settings = &refclk_settings_cn78xx[lane_mode][ref_clk];
  3259. debug("%s(%d, %d): lane_mode: 0x%x, ref_clk: %d\n", __func__, node, qlm, lane_mode,
  3260. ref_clk);
  3261. if (!clk_settings->valid) {
  3262. printf("%s: Error: reference clock %d is not supported for lane mode %d on qlm %d\n",
  3263. __func__, ref_clk, lane_mode, qlm);
  3264. continue;
  3265. }
  3266. mode_0.s.pll_icp = clk_settings->mode_0.s.pll_icp;
  3267. mode_0.s.pll_rloop = clk_settings->mode_0.s.pll_rloop;
  3268. mode_0.s.pll_pcs_div = clk_settings->mode_0.s.pll_pcs_div;
  3269. mode_1.s.pll_16p5en = clk_settings->mode_1.s.pll_16p5en;
  3270. mode_1.s.pll_cpadj = clk_settings->mode_1.s.pll_cpadj;
  3271. mode_1.s.pll_pcie3en = clk_settings->mode_1.s.pll_pcie3en;
  3272. mode_1.s.pll_opr = clk_settings->mode_1.s.pll_opr;
  3273. mode_1.s.pll_div = clk_settings->mode_1.s.pll_div;
  3274. pmode_0.u64 = clk_settings->pmode_0.u64;
  3275. pmode_1.u64 = clk_settings->pmode_1.u64;
  3276. csr_wr_node(node, CVMX_GSERX_PLL_PX_MODE_1(lane_mode, qlm), mode_1.u64);
  3277. csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_0(lane_mode, qlm), pmode_0.u64);
  3278. csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_1(lane_mode, qlm), pmode_1.u64);
  3279. csr_wr_node(node, CVMX_GSERX_PLL_PX_MODE_0(lane_mode, qlm), mode_0.u64);
  3280. }
  3281. }
  3282. /**
  3283. * Get the lane mode for the specified node and QLM.
  3284. *
  3285. * @param ref_clk_sel The reference-clock selection to use to configure QLM
  3286. * 0 = REF_100MHZ
  3287. * 1 = REF_125MHZ
  3288. * 2 = REF_156MHZ
  3289. * @param baud_mhz The speed the QLM needs to be configured in Mhz.
  3290. * @param[out] alt_pll_settings If non-NULL this will be set if non-default PLL
  3291. * settings are required for the mode.
  3292. *
  3293. * Return: lane mode to use or -1 on error
  3294. *
  3295. * NOTE: In some modes
  3296. */
  3297. static int __get_lane_mode_for_speed_and_ref_clk(int ref_clk_sel, int baud_mhz,
  3298. bool *alt_pll_settings)
  3299. {
  3300. if (alt_pll_settings)
  3301. *alt_pll_settings = false;
  3302. switch (baud_mhz) {
  3303. case 98304:
  3304. case 49152:
  3305. case 24576:
  3306. case 12288:
  3307. if (ref_clk_sel != 3) {
  3308. printf("Error: Invalid ref clock\n");
  3309. return -1;
  3310. }
  3311. return 0x5;
  3312. case 6144:
  3313. case 3072:
  3314. if (ref_clk_sel != 3) {
  3315. printf("Error: Invalid ref clock\n");
  3316. return -1;
  3317. }
  3318. return 0x8;
  3319. case 1250:
  3320. if (alt_pll_settings)
  3321. *alt_pll_settings = (ref_clk_sel != 2);
  3322. return R_125G_REFCLK15625_SGMII;
  3323. case 2500:
  3324. if (ref_clk_sel == 0)
  3325. return R_2_5G_REFCLK100;
  3326. if (alt_pll_settings)
  3327. *alt_pll_settings = (ref_clk_sel != 1);
  3328. return R_2_5G_REFCLK125;
  3329. case 3125:
  3330. if (ref_clk_sel == 2) {
  3331. return R_3125G_REFCLK15625_XAUI;
  3332. } else if (ref_clk_sel == 1) {
  3333. if (alt_pll_settings)
  3334. *alt_pll_settings = true;
  3335. return R_3125G_REFCLK15625_XAUI;
  3336. }
  3337. printf("Error: Invalid speed\n");
  3338. return -1;
  3339. case 5000:
  3340. if (ref_clk_sel == 0) {
  3341. return R_5G_REFCLK100;
  3342. } else if (ref_clk_sel == 1) {
  3343. if (alt_pll_settings)
  3344. *alt_pll_settings = (ref_clk_sel != 1);
  3345. return R_5G_REFCLK125;
  3346. } else {
  3347. return R_5G_REFCLK15625_QSGMII;
  3348. }
  3349. case 6250:
  3350. if (ref_clk_sel != 0) {
  3351. if (alt_pll_settings)
  3352. *alt_pll_settings = (ref_clk_sel != 2);
  3353. return R_625G_REFCLK15625_RXAUI;
  3354. }
  3355. printf("Error: Invalid speed\n");
  3356. return -1;
  3357. case 6316:
  3358. if (ref_clk_sel != 3) {
  3359. printf("Error: Invalid speed\n");
  3360. } else {
  3361. *alt_pll_settings = true;
  3362. return R_625G_REFCLK15625_RXAUI;
  3363. }
  3364. case 8000:
  3365. if (ref_clk_sel == 0)
  3366. return R_8G_REFCLK100;
  3367. else if (ref_clk_sel == 1)
  3368. return R_8G_REFCLK125;
  3369. printf("Error: Invalid speed\n");
  3370. return -1;
  3371. case 103125:
  3372. if (ref_clk_sel == 3 && alt_pll_settings)
  3373. *alt_pll_settings = true;
  3374. if (ref_clk_sel == 2 || ref_clk_sel == 3)
  3375. return R_103125G_REFCLK15625_KR;
  3376. default:
  3377. printf("Error: Invalid speed\n");
  3378. return -1;
  3379. }
  3380. return -1;
  3381. }
  3382. /*
  3383. * Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
  3384. * during speed change. Change SLI_WINDOW_CTL[time] to 525us
  3385. */
  3386. static void __set_sli_window_ctl_errata_31375(int node)
  3387. {
  3388. if (OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX) ||
  3389. OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
  3390. cvmx_sli_window_ctl_t window_ctl;
  3391. window_ctl.u64 = csr_rd_node(node, CVMX_PEXP_SLI_WINDOW_CTL);
  3392. /* Configure SLI_WINDOW_CTL only once */
  3393. if (window_ctl.s.time != 8191)
  3394. return;
  3395. window_ctl.s.time = gd->bus_clk * 525ull / 1000000;
  3396. csr_wr_node(node, CVMX_PEXP_SLI_WINDOW_CTL, window_ctl.u64);
  3397. }
  3398. }
  3399. static void __cvmx_qlm_pcie_errata_ep_cn78xx(int node, int pem)
  3400. {
  3401. cvmx_pciercx_cfg031_t cfg031;
  3402. cvmx_pciercx_cfg032_t cfg032;
  3403. cvmx_pciercx_cfg040_t cfg040;
  3404. cvmx_pemx_cfg_t pemx_cfg;
  3405. cvmx_pemx_on_t pemx_on;
  3406. int low_qlm, high_qlm;
  3407. int qlm, lane;
  3408. u64 start_cycle;
  3409. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(pem));
  3410. /* Errata (GSER-21178) PCIe gen3 doesn't work, continued */
  3411. /* Wait for the link to come up as Gen1 */
  3412. printf("PCIe%d: Waiting for EP out of reset\n", pem);
  3413. while (pemx_on.s.pemoor == 0) {
  3414. udelay(1000);
  3415. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(pem));
  3416. }
  3417. /* Enable gen3 speed selection */
  3418. printf("PCIe%d: Enabling Gen3 for EP\n", pem);
  3419. /* Force Gen1 for initial link bringup. We'll fix it later */
  3420. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
  3421. pemx_cfg.s.md = 2;
  3422. csr_wr_node(node, CVMX_PEMX_CFG(pem), pemx_cfg.u64);
  3423. cfg031.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG031(pem));
  3424. cfg031.s.mls = 2;
  3425. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG031(pem), cfg031.u32);
  3426. cfg040.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG040(pem));
  3427. cfg040.s.tls = 3;
  3428. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG040(pem), cfg040.u32);
  3429. /* Wait up to 10ms for the link speed change to complete */
  3430. start_cycle = get_timer(0);
  3431. do {
  3432. if (get_timer(start_cycle) > 10)
  3433. return;
  3434. mdelay(1);
  3435. cfg032.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG032(pem));
  3436. } while (cfg032.s.ls != 3);
  3437. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
  3438. low_qlm = pem; /* FIXME */
  3439. high_qlm = (pemx_cfg.cn78xx.lanes8) ? low_qlm + 1 : low_qlm;
  3440. /* Toggle cfg_rx_dll_locken_ovrrd_en and rx_resetn_ovrrd_en across
  3441. * all QM lanes in use
  3442. */
  3443. for (qlm = low_qlm; qlm <= high_qlm; qlm++) {
  3444. for (lane = 0; lane < 4; lane++) {
  3445. cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
  3446. cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
  3447. misc_ovrrd.u64 =
  3448. csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem));
  3449. misc_ovrrd.s.cfg_rx_dll_locken_ovrrd_en = 1;
  3450. csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem),
  3451. misc_ovrrd.u64);
  3452. pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem));
  3453. pwr_ctrl.s.rx_resetn_ovrrd_en = 1;
  3454. csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem), pwr_ctrl.u64);
  3455. }
  3456. }
  3457. for (qlm = low_qlm; qlm <= high_qlm; qlm++) {
  3458. for (lane = 0; lane < 4; lane++) {
  3459. cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
  3460. cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
  3461. misc_ovrrd.u64 =
  3462. csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem));
  3463. misc_ovrrd.s.cfg_rx_dll_locken_ovrrd_en = 0;
  3464. csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem),
  3465. misc_ovrrd.u64);
  3466. pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem));
  3467. pwr_ctrl.s.rx_resetn_ovrrd_en = 0;
  3468. csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem), pwr_ctrl.u64);
  3469. }
  3470. }
  3471. //printf("PCIe%d: Waiting for EP link up at Gen3\n", pem);
  3472. if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_PEMX_ON(pem), cvmx_pemx_on_t, pemoor, ==, 1,
  3473. 1000000)) {
  3474. printf("PCIe%d: Timeout waiting for EP link up at Gen3\n", pem);
  3475. return;
  3476. }
  3477. }
  3478. static void __cvmx_qlm_pcie_errata_cn78xx(int node, int qlm)
  3479. {
  3480. int pem, i, q;
  3481. int is_8lanes;
  3482. int is_high_lanes;
  3483. int low_qlm, high_qlm, is_host;
  3484. int need_ep_monitor;
  3485. cvmx_pemx_cfg_t pem_cfg, pem3_cfg;
  3486. cvmx_gserx_slice_cfg_t slice_cfg;
  3487. cvmx_gserx_rx_pwr_ctrl_p1_t pwr_ctrl_p1;
  3488. cvmx_rst_soft_prstx_t soft_prst;
  3489. /* Only applies to CN78XX pass 1.x */
  3490. if (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
  3491. return;
  3492. /* Determine the PEM for this QLM, whether we're in 8 lane mode,
  3493. * and whether these are the top lanes of the 8
  3494. */
  3495. switch (qlm) {
  3496. case 0: /* First 4 lanes of PEM0 */
  3497. pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
  3498. pem = 0;
  3499. is_8lanes = pem_cfg.cn78xx.lanes8;
  3500. is_high_lanes = 0;
  3501. break;
  3502. case 1: /* Either last 4 lanes of PEM0, or PEM1 */
  3503. pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
  3504. pem = (pem_cfg.cn78xx.lanes8) ? 0 : 1;
  3505. is_8lanes = pem_cfg.cn78xx.lanes8;
  3506. is_high_lanes = is_8lanes;
  3507. break;
  3508. case 2: /* First 4 lanes of PEM2 */
  3509. pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
  3510. pem = 2;
  3511. is_8lanes = pem_cfg.cn78xx.lanes8;
  3512. is_high_lanes = 0;
  3513. break;
  3514. case 3: /* Either last 4 lanes of PEM2, or PEM3 */
  3515. pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
  3516. pem3_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
  3517. pem = (pem_cfg.cn78xx.lanes8) ? 2 : 3;
  3518. is_8lanes = (pem == 2) ? pem_cfg.cn78xx.lanes8 : pem3_cfg.cn78xx.lanes8;
  3519. is_high_lanes = (pem == 2) && is_8lanes;
  3520. break;
  3521. case 4: /* Last 4 lanes of PEM3 */
  3522. pem = 3;
  3523. is_8lanes = 1;
  3524. is_high_lanes = 1;
  3525. break;
  3526. default:
  3527. return;
  3528. }
  3529. /* These workaround must be applied once per PEM. Since we're called per
  3530. * QLM, wait for the 2nd half of 8 lane setups before doing the workaround
  3531. */
  3532. if (is_8lanes && !is_high_lanes)
  3533. return;
  3534. pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
  3535. is_host = pem_cfg.cn78xx.hostmd;
  3536. low_qlm = (is_8lanes) ? qlm - 1 : qlm;
  3537. high_qlm = qlm;
  3538. qlm = -1;
  3539. if (!is_host) {
  3540. /* Read the current slice config value. If its at the value we will
  3541. * program then skip doing the workaround. We're probably doing a
  3542. * hot reset and the workaround is already applied
  3543. */
  3544. slice_cfg.u64 = csr_rd_node(node, CVMX_GSERX_SLICE_CFG(low_qlm));
  3545. if (slice_cfg.s.tx_rx_detect_lvl_enc == 7 && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
  3546. return;
  3547. }
  3548. if (is_host && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
  3549. /* (GSER-XXXX) GSER PHY needs to be reset at initialization */
  3550. cvmx_gserx_phy_ctl_t phy_ctl;
  3551. for (q = low_qlm; q <= high_qlm; q++) {
  3552. phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(q));
  3553. phy_ctl.s.phy_reset = 1;
  3554. csr_wr_node(node, CVMX_GSERX_PHY_CTL(q), phy_ctl.u64);
  3555. }
  3556. udelay(5);
  3557. for (q = low_qlm; q <= high_qlm; q++) {
  3558. phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(q));
  3559. phy_ctl.s.phy_reset = 0;
  3560. csr_wr_node(node, CVMX_GSERX_PHY_CTL(q), phy_ctl.u64);
  3561. }
  3562. udelay(5);
  3563. }
  3564. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
  3565. /* (GSER-20936) GSER has wrong PCIe RX detect reset value */
  3566. for (q = low_qlm; q <= high_qlm; q++) {
  3567. slice_cfg.u64 = csr_rd_node(node, CVMX_GSERX_SLICE_CFG(q));
  3568. slice_cfg.s.tx_rx_detect_lvl_enc = 7;
  3569. csr_wr_node(node, CVMX_GSERX_SLICE_CFG(q), slice_cfg.u64);
  3570. }
  3571. /* Clear the bit in GSERX_RX_PWR_CTRL_P1[p1_rx_subblk_pd]
  3572. * that coresponds to "Lane DLL"
  3573. */
  3574. for (q = low_qlm; q <= high_qlm; q++) {
  3575. pwr_ctrl_p1.u64 = csr_rd_node(node, CVMX_GSERX_RX_PWR_CTRL_P1(q));
  3576. pwr_ctrl_p1.s.p1_rx_subblk_pd &= ~4;
  3577. csr_wr_node(node, CVMX_GSERX_RX_PWR_CTRL_P1(q), pwr_ctrl_p1.u64);
  3578. }
  3579. /* Errata (GSER-20888) GSER incorrect synchronizers hurts PCIe
  3580. * Override TX Power State machine TX reset control signal
  3581. */
  3582. for (q = low_qlm; q <= high_qlm; q++) {
  3583. for (i = 0; i < 4; i++) {
  3584. cvmx_gserx_lanex_tx_cfg_0_t tx_cfg;
  3585. cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
  3586. tx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(i, q));
  3587. tx_cfg.s.tx_resetn_ovrrd_val = 1;
  3588. csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(i, q), tx_cfg.u64);
  3589. pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(i, q));
  3590. pwr_ctrl.s.tx_p2s_resetn_ovrrd_en = 1;
  3591. csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(i, q), pwr_ctrl.u64);
  3592. }
  3593. }
  3594. }
  3595. if (!is_host) {
  3596. cvmx_pciercx_cfg089_t cfg089;
  3597. cvmx_pciercx_cfg090_t cfg090;
  3598. cvmx_pciercx_cfg091_t cfg091;
  3599. cvmx_pciercx_cfg092_t cfg092;
  3600. cvmx_pciercx_cfg548_t cfg548;
  3601. cvmx_pciercx_cfg554_t cfg554;
  3602. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
  3603. /* Errata (GSER-21178) PCIe gen3 doesn't work */
  3604. /* The starting equalization hints are incorrect on CN78XX pass 1.x. Fix
  3605. * them for the 8 possible lanes. It doesn't hurt to program them even
  3606. * for lanes not in use
  3607. */
  3608. cfg089.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG089(pem));
  3609. cfg089.s.l1urph = 2;
  3610. cfg089.s.l1utp = 7;
  3611. cfg089.s.l0urph = 2;
  3612. cfg089.s.l0utp = 7;
  3613. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG089(pem), cfg089.u32);
  3614. cfg090.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG090(pem));
  3615. cfg090.s.l3urph = 2;
  3616. cfg090.s.l3utp = 7;
  3617. cfg090.s.l2urph = 2;
  3618. cfg090.s.l2utp = 7;
  3619. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG090(pem), cfg090.u32);
  3620. cfg091.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG091(pem));
  3621. cfg091.s.l5urph = 2;
  3622. cfg091.s.l5utp = 7;
  3623. cfg091.s.l4urph = 2;
  3624. cfg091.s.l4utp = 7;
  3625. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG091(pem), cfg091.u32);
  3626. cfg092.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG092(pem));
  3627. cfg092.s.l7urph = 2;
  3628. cfg092.s.l7utp = 7;
  3629. cfg092.s.l6urph = 2;
  3630. cfg092.s.l6utp = 7;
  3631. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG092(pem), cfg092.u32);
  3632. /* FIXME: Disable phase 2 and phase 3 equalization */
  3633. cfg548.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG548(pem));
  3634. cfg548.s.ep2p3d = 1;
  3635. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG548(pem), cfg548.u32);
  3636. }
  3637. /* Errata (GSER-21331) GEN3 Equalization may fail */
  3638. /* Disable preset #10 and disable the 2ms timeout */
  3639. cfg554.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG554(pem));
  3640. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
  3641. cfg554.s.p23td = 1;
  3642. cfg554.s.prv = 0x3ff;
  3643. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG554(pem), cfg554.u32);
  3644. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
  3645. need_ep_monitor = (pem_cfg.s.md == 2);
  3646. if (need_ep_monitor) {
  3647. cvmx_pciercx_cfg031_t cfg031;
  3648. cvmx_pciercx_cfg040_t cfg040;
  3649. /* Force Gen1 for initial link bringup. We'll
  3650. * fix it later
  3651. */
  3652. pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
  3653. pem_cfg.s.md = 0;
  3654. csr_wr_node(node, CVMX_PEMX_CFG(pem), pem_cfg.u64);
  3655. cfg031.u32 = cvmx_pcie_cfgx_read_node(node, pem,
  3656. CVMX_PCIERCX_CFG031(pem));
  3657. cfg031.s.mls = 0;
  3658. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG031(pem),
  3659. cfg031.u32);
  3660. cfg040.u32 = cvmx_pcie_cfgx_read_node(node, pem,
  3661. CVMX_PCIERCX_CFG040(pem));
  3662. cfg040.s.tls = 1;
  3663. cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG040(pem),
  3664. cfg040.u32);
  3665. __cvmx_qlm_pcie_errata_ep_cn78xx(node, pem);
  3666. }
  3667. return;
  3668. }
  3669. }
  3670. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
  3671. /* De-assert the SOFT_RST bit for this QLM (PEM), causing the PCIe
  3672. * workarounds code above to take effect.
  3673. */
  3674. soft_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(pem));
  3675. soft_prst.s.soft_prst = 0;
  3676. csr_wr_node(node, CVMX_RST_SOFT_PRSTX(pem), soft_prst.u64);
  3677. udelay(1);
  3678. /* Assert the SOFT_RST bit for this QLM (PEM), putting the PCIe back into
  3679. * reset state with disturbing the workarounds.
  3680. */
  3681. soft_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(pem));
  3682. soft_prst.s.soft_prst = 1;
  3683. csr_wr_node(node, CVMX_RST_SOFT_PRSTX(pem), soft_prst.u64);
  3684. }
  3685. udelay(1);
  3686. }
  3687. /**
  3688. * Setup the PEM to either driver or receive reset from PRST based on RC or EP
  3689. *
  3690. * @param node Node to use in a Numa setup
  3691. * @param pem Which PEM to setuo
  3692. * @param is_endpoint
  3693. * Non zero if PEM is a EP
  3694. */
  3695. static void __setup_pem_reset(int node, int pem, int is_endpoint)
  3696. {
  3697. cvmx_rst_ctlx_t rst_ctl;
  3698. /* Make sure is_endpoint is either 0 or 1 */
  3699. is_endpoint = (is_endpoint != 0);
  3700. rst_ctl.u64 = csr_rd_node(node, CVMX_RST_CTLX(pem));
  3701. rst_ctl.s.prst_link = 0; /* Link down causes soft reset */
  3702. rst_ctl.s.rst_link = is_endpoint; /* EP PERST causes a soft reset */
  3703. rst_ctl.s.rst_drv = !is_endpoint; /* Drive if RC */
  3704. rst_ctl.s.rst_rcv = is_endpoint; /* Only read PERST in EP mode */
  3705. rst_ctl.s.rst_chip = 0; /* PERST doesn't pull CHIP_RESET */
  3706. csr_wr_node(node, CVMX_RST_CTLX(pem), rst_ctl.u64);
  3707. }
  3708. /**
  3709. * Configure QLM speed and mode for cn78xx.
  3710. *
  3711. * @param node Node to configure the QLM
  3712. * @param qlm The QLM to configure
  3713. * @param baud_mhz The speed the QLM needs to be configured in Mhz.
  3714. * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
  3715. * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
  3716. * @param gen3 Only used for PCIe
  3717. * gen3 = 2 GEN3 mode
  3718. * gen3 = 1 GEN2 mode
  3719. * gen3 = 0 GEN1 mode
  3720. *
  3721. * @param ref_clk_sel The reference-clock selection to use to configure QLM
  3722. * 0 = REF_100MHZ
  3723. * 1 = REF_125MHZ
  3724. * 2 = REF_156MHZ
  3725. * 3 = REF_161MHZ
  3726. * @param ref_clk_input The reference-clock input to use to configure QLM
  3727. *
  3728. * Return: Return 0 on success or -1.
  3729. */
  3730. int octeon_configure_qlm_cn78xx(int node, int qlm, int baud_mhz, int mode, int rc, int gen3,
  3731. int ref_clk_sel, int ref_clk_input)
  3732. {
  3733. cvmx_gserx_phy_ctl_t phy_ctl;
  3734. cvmx_gserx_lane_mode_t lmode;
  3735. cvmx_gserx_cfg_t cfg;
  3736. cvmx_gserx_refclk_sel_t refclk_sel;
  3737. int is_pcie = 0;
  3738. int is_ilk = 0;
  3739. int is_bgx = 0;
  3740. int lane_mode = 0;
  3741. int lmac_type = 0;
  3742. bool alt_pll = false;
  3743. int num_ports = 0;
  3744. int lane_to_sds = 0;
  3745. debug("%s(node: %d, qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
  3746. __func__, node, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
  3747. if (OCTEON_IS_MODEL(OCTEON_CN76XX) && qlm > 4) {
  3748. debug("%s: qlm %d not present on CN76XX\n", __func__, qlm);
  3749. return -1;
  3750. }
  3751. /* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
  3752. * during speed change. Change SLI_WINDOW_CTL[time] to 525us
  3753. */
  3754. __set_sli_window_ctl_errata_31375(node);
  3755. cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
  3756. /* If PEM is in EP, no need to do anything */
  3757. if (cfg.s.pcie && rc == 0) {
  3758. debug("%s: node %d, qlm %d is in PCIe endpoint mode, returning\n",
  3759. __func__, node, qlm);
  3760. return 0;
  3761. }
  3762. /* Set the reference clock to use */
  3763. refclk_sel.u64 = 0;
  3764. if (ref_clk_input == 0) { /* External ref clock */
  3765. refclk_sel.s.com_clk_sel = 0;
  3766. refclk_sel.s.use_com1 = 0;
  3767. } else if (ref_clk_input == 1) {
  3768. refclk_sel.s.com_clk_sel = 1;
  3769. refclk_sel.s.use_com1 = 0;
  3770. } else {
  3771. refclk_sel.s.com_clk_sel = 1;
  3772. refclk_sel.s.use_com1 = 1;
  3773. }
  3774. csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  3775. /* Reset the QLM after changing the reference clock */
  3776. phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
  3777. phy_ctl.s.phy_reset = 1;
  3778. phy_ctl.s.phy_pd = 1;
  3779. csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  3780. udelay(1000);
  3781. /* Always restore the reference clocks for a QLM */
  3782. memcpy(ref_clk_cn78xx[node][qlm], def_ref_clk_cn78xx, sizeof(def_ref_clk_cn78xx));
  3783. switch (mode) {
  3784. case CVMX_QLM_MODE_PCIE:
  3785. case CVMX_QLM_MODE_PCIE_1X8: {
  3786. cvmx_pemx_cfg_t pemx_cfg;
  3787. cvmx_pemx_on_t pemx_on;
  3788. is_pcie = 1;
  3789. if (ref_clk_sel == 0) {
  3790. refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
  3791. refclk_sel.s.pcie_refclk125 = 0;
  3792. csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  3793. if (gen3 == 0) /* Gen1 mode */
  3794. lane_mode = R_2_5G_REFCLK100;
  3795. else if (gen3 == 1) /* Gen2 mode */
  3796. lane_mode = R_5G_REFCLK100;
  3797. else
  3798. lane_mode = R_8G_REFCLK100;
  3799. } else if (ref_clk_sel == 1) {
  3800. refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
  3801. refclk_sel.s.pcie_refclk125 = 1;
  3802. csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  3803. if (gen3 == 0) /* Gen1 mode */
  3804. lane_mode = R_2_5G_REFCLK125;
  3805. else if (gen3 == 1) /* Gen2 mode */
  3806. lane_mode = R_5G_REFCLK125;
  3807. else
  3808. lane_mode = R_8G_REFCLK125;
  3809. } else {
  3810. printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
  3811. return -1;
  3812. }
  3813. switch (qlm) {
  3814. case 0: /* Either x4 or x8 based on PEM0 */
  3815. {
  3816. cvmx_rst_soft_prstx_t rst_prst;
  3817. rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(0));
  3818. rst_prst.s.soft_prst = rc;
  3819. csr_wr_node(node, CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
  3820. __setup_pem_reset(node, 0, !rc);
  3821. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
  3822. pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
  3823. pemx_cfg.cn78xx.hostmd = rc;
  3824. pemx_cfg.cn78xx.md = gen3;
  3825. csr_wr_node(node, CVMX_PEMX_CFG(0), pemx_cfg.u64);
  3826. /* x8 mode waits for QLM1 setup before turning on the PEM */
  3827. if (mode == CVMX_QLM_MODE_PCIE) {
  3828. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
  3829. pemx_on.s.pemon = 1;
  3830. csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
  3831. }
  3832. break;
  3833. }
  3834. case 1: /* Either PEM0 x8 or PEM1 x4 */
  3835. {
  3836. if (mode == CVMX_QLM_MODE_PCIE) {
  3837. cvmx_rst_soft_prstx_t rst_prst;
  3838. cvmx_pemx_cfg_t pemx_cfg;
  3839. rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(1));
  3840. rst_prst.s.soft_prst = rc;
  3841. csr_wr_node(node, CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
  3842. __setup_pem_reset(node, 1, !rc);
  3843. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(1));
  3844. pemx_cfg.cn78xx.lanes8 = 0;
  3845. pemx_cfg.cn78xx.hostmd = rc;
  3846. pemx_cfg.cn78xx.md = gen3;
  3847. csr_wr_node(node, CVMX_PEMX_CFG(1), pemx_cfg.u64);
  3848. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(1));
  3849. pemx_on.s.pemon = 1;
  3850. csr_wr_node(node, CVMX_PEMX_ON(1), pemx_on.u64);
  3851. } else {
  3852. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
  3853. pemx_on.s.pemon = 1;
  3854. csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
  3855. }
  3856. break;
  3857. }
  3858. case 2: /* Either PEM2 x4 or PEM2 x8 */
  3859. {
  3860. cvmx_rst_soft_prstx_t rst_prst;
  3861. rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(2));
  3862. rst_prst.s.soft_prst = rc;
  3863. csr_wr_node(node, CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
  3864. __setup_pem_reset(node, 2, !rc);
  3865. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
  3866. pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
  3867. pemx_cfg.cn78xx.hostmd = rc;
  3868. pemx_cfg.cn78xx.md = gen3;
  3869. csr_wr_node(node, CVMX_PEMX_CFG(2), pemx_cfg.u64);
  3870. /* x8 mode waits for QLM3 setup before turning on the PEM */
  3871. if (mode == CVMX_QLM_MODE_PCIE) {
  3872. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
  3873. pemx_on.s.pemon = 1;
  3874. csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
  3875. }
  3876. break;
  3877. }
  3878. case 3: /* Either PEM2 x8 or PEM3 x4 */
  3879. {
  3880. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
  3881. if (pemx_cfg.cn78xx.lanes8) {
  3882. /* Last 4 lanes of PEM2 */
  3883. /* PEMX_CFG already setup */
  3884. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
  3885. pemx_on.s.pemon = 1;
  3886. csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
  3887. }
  3888. /* Check if PEM3 uses QLM3 and in x4 lane mode */
  3889. if (mode == CVMX_QLM_MODE_PCIE) {
  3890. cvmx_rst_soft_prstx_t rst_prst;
  3891. rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
  3892. rst_prst.s.soft_prst = rc;
  3893. csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
  3894. __setup_pem_reset(node, 3, !rc);
  3895. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
  3896. pemx_cfg.cn78xx.lanes8 = 0;
  3897. pemx_cfg.cn78xx.hostmd = rc;
  3898. pemx_cfg.cn78xx.md = gen3;
  3899. csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
  3900. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
  3901. pemx_on.s.pemon = 1;
  3902. csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
  3903. }
  3904. break;
  3905. }
  3906. case 4: /* Either PEM3 x4 or PEM3 x8 */
  3907. {
  3908. if (mode == CVMX_QLM_MODE_PCIE_1X8) {
  3909. /* Last 4 lanes of PEM3 */
  3910. /* PEMX_CFG already setup */
  3911. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
  3912. pemx_on.s.pemon = 1;
  3913. csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
  3914. } else {
  3915. /* 4 lanes of PEM3 */
  3916. cvmx_pemx_qlm_t pemx_qlm;
  3917. cvmx_rst_soft_prstx_t rst_prst;
  3918. rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
  3919. rst_prst.s.soft_prst = rc;
  3920. csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
  3921. __setup_pem_reset(node, 3, !rc);
  3922. pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
  3923. pemx_cfg.cn78xx.lanes8 = 0;
  3924. pemx_cfg.cn78xx.hostmd = rc;
  3925. pemx_cfg.cn78xx.md = gen3;
  3926. csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
  3927. /* PEM3 is on QLM4 */
  3928. pemx_qlm.u64 = csr_rd_node(node, CVMX_PEMX_QLM(3));
  3929. pemx_qlm.cn78xx.pem3qlm = 1;
  3930. csr_wr_node(node, CVMX_PEMX_QLM(3), pemx_qlm.u64);
  3931. pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
  3932. pemx_on.s.pemon = 1;
  3933. csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
  3934. }
  3935. break;
  3936. }
  3937. default:
  3938. break;
  3939. }
  3940. break;
  3941. }
  3942. case CVMX_QLM_MODE_ILK:
  3943. is_ilk = 1;
  3944. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
  3945. if (lane_mode == -1)
  3946. return -1;
  3947. /* FIXME: Set lane_mode for other speeds */
  3948. break;
  3949. case CVMX_QLM_MODE_SGMII:
  3950. is_bgx = 1;
  3951. lmac_type = 0;
  3952. lane_to_sds = 1;
  3953. num_ports = 4;
  3954. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
  3955. debug("%s: SGMII lane mode: %d, alternate PLL: %s\n", __func__, lane_mode,
  3956. alt_pll ? "true" : "false");
  3957. if (lane_mode == -1)
  3958. return -1;
  3959. break;
  3960. case CVMX_QLM_MODE_XAUI:
  3961. is_bgx = 5;
  3962. lmac_type = 1;
  3963. lane_to_sds = 0xe4;
  3964. num_ports = 1;
  3965. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
  3966. debug("%s: XAUI lane mode: %d\n", __func__, lane_mode);
  3967. if (lane_mode == -1)
  3968. return -1;
  3969. break;
  3970. case CVMX_QLM_MODE_RXAUI:
  3971. is_bgx = 3;
  3972. lmac_type = 2;
  3973. lane_to_sds = 0;
  3974. num_ports = 2;
  3975. debug("%s: RXAUI lane mode: %d\n", __func__, lane_mode);
  3976. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
  3977. if (lane_mode == -1)
  3978. return -1;
  3979. break;
  3980. case CVMX_QLM_MODE_XFI: /* 10GR_4X1 */
  3981. case CVMX_QLM_MODE_10G_KR:
  3982. is_bgx = 1;
  3983. lmac_type = 3;
  3984. lane_to_sds = 1;
  3985. num_ports = 4;
  3986. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
  3987. debug("%s: XFI/10G_KR lane mode: %d\n", __func__, lane_mode);
  3988. if (lane_mode == -1)
  3989. return -1;
  3990. break;
  3991. case CVMX_QLM_MODE_XLAUI: /* 40GR4_1X4 */
  3992. case CVMX_QLM_MODE_40G_KR4:
  3993. is_bgx = 5;
  3994. lmac_type = 4;
  3995. lane_to_sds = 0xe4;
  3996. num_ports = 1;
  3997. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
  3998. debug("%s: XLAUI/40G_KR4 lane mode: %d\n", __func__, lane_mode);
  3999. if (lane_mode == -1)
  4000. return -1;
  4001. break;
  4002. case CVMX_QLM_MODE_DISABLED:
  4003. /* Power down the QLM */
  4004. phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
  4005. phy_ctl.s.phy_pd = 1;
  4006. phy_ctl.s.phy_reset = 1;
  4007. csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  4008. /* Disable all modes */
  4009. csr_wr_node(node, CVMX_GSERX_CFG(qlm), 0);
  4010. /* Do nothing */
  4011. return 0;
  4012. default:
  4013. break;
  4014. }
  4015. if (alt_pll) {
  4016. debug("%s: alternate PLL settings used for node %d, qlm %d, lane mode %d, reference clock %d\n",
  4017. __func__, node, qlm, lane_mode, ref_clk_sel);
  4018. if (__set_qlm_ref_clk_cn78xx(node, qlm, lane_mode, ref_clk_sel)) {
  4019. printf("%s: Error: reference clock %d is not supported for node %d, qlm %d\n",
  4020. __func__, ref_clk_sel, node, qlm);
  4021. return -1;
  4022. }
  4023. }
  4024. /* Power up PHY, but keep it in reset */
  4025. phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
  4026. phy_ctl.s.phy_pd = 0;
  4027. phy_ctl.s.phy_reset = 1;
  4028. csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  4029. /* Errata GSER-20788: GSER(0..13)_CFG[BGX_QUAD]=1 is broken. Force the
  4030. * BGX_QUAD bit to be clear for CN78XX pass 1.x
  4031. */
  4032. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
  4033. is_bgx &= 3;
  4034. /* Set GSER for the interface mode */
  4035. cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
  4036. cfg.s.ila = is_ilk;
  4037. cfg.s.bgx = is_bgx & 1;
  4038. cfg.s.bgx_quad = (is_bgx >> 2) & 1;
  4039. cfg.s.bgx_dual = (is_bgx >> 1) & 1;
  4040. cfg.s.pcie = is_pcie;
  4041. csr_wr_node(node, CVMX_GSERX_CFG(qlm), cfg.u64);
  4042. /* Lane mode */
  4043. lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
  4044. lmode.s.lmode = lane_mode;
  4045. csr_wr_node(node, CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
  4046. /* BGX0-1 can connect to QLM0-1 or QLM 2-3. Program the select bit if we're
  4047. * one of these QLMs and we're using BGX
  4048. */
  4049. if (qlm < 4 && is_bgx) {
  4050. int bgx = qlm & 1;
  4051. int use_upper = (qlm >> 1) & 1;
  4052. cvmx_bgxx_cmr_global_config_t global_cfg;
  4053. global_cfg.u64 = csr_rd_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
  4054. global_cfg.s.pmux_sds_sel = use_upper;
  4055. csr_wr_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_cfg.u64);
  4056. }
  4057. /* Bring phy out of reset */
  4058. phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
  4059. phy_ctl.s.phy_reset = 0;
  4060. csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  4061. csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
  4062. /*
  4063. * Wait 250 ns until the management interface is ready to accept
  4064. * read/write commands.
  4065. */
  4066. udelay(1);
  4067. if (is_bgx) {
  4068. int bgx = (qlm < 2) ? qlm : qlm - 2;
  4069. cvmx_bgxx_cmrx_config_t cmr_config;
  4070. int index;
  4071. for (index = 0; index < num_ports; index++) {
  4072. cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx));
  4073. cmr_config.s.enable = 0;
  4074. cmr_config.s.data_pkt_tx_en = 0;
  4075. cmr_config.s.data_pkt_rx_en = 0;
  4076. cmr_config.s.lmac_type = lmac_type;
  4077. cmr_config.s.lane_to_sds = ((lane_to_sds == 1) ?
  4078. index : ((lane_to_sds == 0) ?
  4079. (index ? 0xe : 4) :
  4080. lane_to_sds));
  4081. csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
  4082. }
  4083. csr_wr_node(node, CVMX_BGXX_CMR_TX_LMACS(bgx), num_ports);
  4084. csr_wr_node(node, CVMX_BGXX_CMR_RX_LMACS(bgx), num_ports);
  4085. /* Enable/disable training for 10G_KR/40G_KR4/XFI/XLAUI modes */
  4086. for (index = 0; index < num_ports; index++) {
  4087. cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
  4088. spu_pmd_control.u64 =
  4089. csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
  4090. if (mode == CVMX_QLM_MODE_10G_KR || mode == CVMX_QLM_MODE_40G_KR4)
  4091. spu_pmd_control.s.train_en = 1;
  4092. else if (mode == CVMX_QLM_MODE_XFI || mode == CVMX_QLM_MODE_XLAUI)
  4093. spu_pmd_control.s.train_en = 0;
  4094. csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
  4095. spu_pmd_control.u64);
  4096. }
  4097. }
  4098. /* Configure the gser pll */
  4099. if (!is_pcie)
  4100. __qlm_setup_pll_cn78xx(node, qlm);
  4101. /* Wait for reset to complete and the PLL to lock */
  4102. if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_PLL_STAT(qlm),
  4103. cvmx_gserx_pll_stat_t,
  4104. pll_lock, ==, 1, 10000)) {
  4105. printf("%d:QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n",
  4106. node, qlm);
  4107. return -1;
  4108. }
  4109. /* Perform PCIe errata workaround */
  4110. if (is_pcie)
  4111. __cvmx_qlm_pcie_errata_cn78xx(node, qlm);
  4112. else
  4113. __qlm_init_errata_20844(node, qlm);
  4114. /* Wait for reset to complete and the PLL to lock */
  4115. /* PCIe mode doesn't become ready until the PEM block attempts to bring
  4116. * the interface up. Skip this check for PCIe
  4117. */
  4118. if (!is_pcie && CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_QLM_STAT(qlm),
  4119. cvmx_gserx_qlm_stat_t, rst_rdy,
  4120. ==, 1, 10000)) {
  4121. printf("%d:QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n",
  4122. node, qlm);
  4123. return -1;
  4124. }
  4125. /* Errata GSER-26150: 10G PHY PLL Temperature Failure */
  4126. /* This workaround must be completed after the final deassertion of
  4127. * GSERx_PHY_CTL[PHY_RESET].
  4128. * Apply the workaround to 10.3125Gbps and 8Gbps only.
  4129. */
  4130. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
  4131. (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
  4132. __qlm_errata_gser_26150(0, qlm, is_pcie);
  4133. /* Errata GSER-26636: 10G-KR/40G-KR - Inverted Tx Coefficient Direction
  4134. * Change. Applied to all 10G standards (required for KR) but also
  4135. * applied to other standards in case software training is used
  4136. */
  4137. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && baud_mhz == 103125)
  4138. __qlm_kr_inc_dec_gser26636(node, qlm);
  4139. /* Errata GSER-25992: RX EQ Default Settings Update (CTLE Bias) */
  4140. /* This workaround will only be applied to Pass 1.x */
  4141. /* It will also only be applied if the SERDES data-rate is 10G */
  4142. /* or if PCIe Gen3 (gen3=2 is PCIe Gen3) */
  4143. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
  4144. (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
  4145. cvmx_qlm_gser_errata_25992(node, qlm);
  4146. /* Errata GSER-27140: Updating the RX EQ settings due to temperature
  4147. * drift sensitivities
  4148. */
  4149. /* This workaround will also only be applied if the SERDES data-rate is 10G */
  4150. if (baud_mhz == 103125)
  4151. __qlm_rx_eq_temp_gser27140(node, qlm);
  4152. /* Reduce the voltage amplitude coming from Marvell PHY and also change
  4153. * DFE threshold settings for RXAUI interface
  4154. */
  4155. if (is_bgx && mode == CVMX_QLM_MODE_RXAUI) {
  4156. int l;
  4157. for (l = 0; l < 4; l++) {
  4158. cvmx_gserx_lanex_rx_cfg_4_t cfg4;
  4159. cvmx_gserx_lanex_tx_cfg_0_t cfg0;
  4160. /* Change the Q/QB error sampler 0 threshold from 0xD to 0xF */
  4161. cfg4.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
  4162. cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
  4163. csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
  4164. /* Reduce the voltage swing to roughly 460mV */
  4165. cfg0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
  4166. cfg0.s.cfg_tx_swing = 0x12;
  4167. csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
  4168. }
  4169. }
  4170. return 0;
  4171. }
  4172. static int __is_qlm_valid_bgx_cn73xx(int qlm)
  4173. {
  4174. if (qlm == 2 || qlm == 3 || qlm == 5 || qlm == 6)
  4175. return 0;
  4176. return 1;
  4177. }
  4178. /**
  4179. * Configure QLM/DLM speed and mode for cn73xx.
  4180. *
  4181. * @param qlm The QLM to configure
  4182. * @param baud_mhz The speed the QLM needs to be configured in Mhz.
  4183. * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
  4184. * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
  4185. * @param gen3 Only used for PCIe
  4186. * gen3 = 2 GEN3 mode
  4187. * gen3 = 1 GEN2 mode
  4188. * gen3 = 0 GEN1 mode
  4189. *
  4190. * @param ref_clk_sel The reference-clock selection to use to configure QLM
  4191. * 0 = REF_100MHZ
  4192. * 1 = REF_125MHZ
  4193. * 2 = REF_156MHZ
  4194. * 3 = REF_161MHZ
  4195. *
  4196. * @param ref_clk_input The reference-clock input to use to configure QLM
  4197. * 0 = QLM/DLM reference clock input
  4198. * 1 = common reference clock input 0
  4199. * 2 = common reference clock input 1
  4200. *
  4201. * Return: Return 0 on success or -1.
  4202. */
  4203. static int octeon_configure_qlm_cn73xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
  4204. int ref_clk_sel, int ref_clk_input)
  4205. {
  4206. cvmx_gserx_phy_ctl_t phy_ctl;
  4207. cvmx_gserx_lane_mode_t lmode;
  4208. cvmx_gserx_cfg_t cfg;
  4209. cvmx_gserx_refclk_sel_t refclk_sel;
  4210. int is_pcie = 0;
  4211. int is_bgx = 0;
  4212. int lane_mode = 0;
  4213. short lmac_type[4] = { 0 };
  4214. short sds_lane[4] = { 0 };
  4215. bool alt_pll = false;
  4216. int enable_training = 0;
  4217. int additional_lmacs = 0;
  4218. debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
  4219. __func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
  4220. /* Don't configure QLM4 if it is not in SATA mode */
  4221. if (qlm == 4) {
  4222. if (mode == CVMX_QLM_MODE_SATA_2X1)
  4223. return __setup_sata(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
  4224. printf("Invalid mode for QLM4\n");
  4225. return 0;
  4226. }
  4227. cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
  4228. /* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
  4229. * during speed change. Change SLI_WINDOW_CTL[time] to 525us
  4230. */
  4231. __set_sli_window_ctl_errata_31375(0);
  4232. /* If PEM is in EP, no need to do anything */
  4233. if (cfg.s.pcie && rc == 0 &&
  4234. (mode == CVMX_QLM_MODE_PCIE || mode == CVMX_QLM_MODE_PCIE_1X8 ||
  4235. mode == CVMX_QLM_MODE_PCIE_1X2)) {
  4236. debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
  4237. return 0;
  4238. }
  4239. /* Set the reference clock to use */
  4240. refclk_sel.u64 = 0;
  4241. if (ref_clk_input == 0) { /* External ref clock */
  4242. refclk_sel.s.com_clk_sel = 0;
  4243. refclk_sel.s.use_com1 = 0;
  4244. } else if (ref_clk_input == 1) {
  4245. refclk_sel.s.com_clk_sel = 1;
  4246. refclk_sel.s.use_com1 = 0;
  4247. } else {
  4248. refclk_sel.s.com_clk_sel = 1;
  4249. refclk_sel.s.use_com1 = 1;
  4250. }
  4251. csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  4252. /* Reset the QLM after changing the reference clock */
  4253. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  4254. phy_ctl.s.phy_reset = 1;
  4255. phy_ctl.s.phy_pd = 1;
  4256. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  4257. udelay(1000);
  4258. /* Check if QLM is a valid BGX interface */
  4259. if (mode != CVMX_QLM_MODE_PCIE && mode != CVMX_QLM_MODE_PCIE_1X2 &&
  4260. mode != CVMX_QLM_MODE_PCIE_1X8) {
  4261. if (__is_qlm_valid_bgx_cn73xx(qlm))
  4262. return -1;
  4263. }
  4264. switch (mode) {
  4265. case CVMX_QLM_MODE_PCIE:
  4266. case CVMX_QLM_MODE_PCIE_1X2:
  4267. case CVMX_QLM_MODE_PCIE_1X8: {
  4268. cvmx_pemx_cfg_t pemx_cfg;
  4269. cvmx_pemx_on_t pemx_on;
  4270. cvmx_pemx_qlm_t pemx_qlm;
  4271. cvmx_rst_soft_prstx_t rst_prst;
  4272. int port = 0;
  4273. is_pcie = 1;
  4274. if (qlm < 5 && mode == CVMX_QLM_MODE_PCIE_1X2) {
  4275. printf("Invalid PCIe mode(%d) for QLM%d\n", mode, qlm);
  4276. return -1;
  4277. }
  4278. if (ref_clk_sel == 0) {
  4279. refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
  4280. refclk_sel.s.pcie_refclk125 = 0;
  4281. csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  4282. if (gen3 == 0) /* Gen1 mode */
  4283. lane_mode = R_2_5G_REFCLK100;
  4284. else if (gen3 == 1) /* Gen2 mode */
  4285. lane_mode = R_5G_REFCLK100;
  4286. else
  4287. lane_mode = R_8G_REFCLK100;
  4288. } else if (ref_clk_sel == 1) {
  4289. refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
  4290. refclk_sel.s.pcie_refclk125 = 1;
  4291. csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  4292. if (gen3 == 0) /* Gen1 mode */
  4293. lane_mode = R_2_5G_REFCLK125;
  4294. else if (gen3 == 1) /* Gen2 mode */
  4295. lane_mode = R_5G_REFCLK125;
  4296. else
  4297. lane_mode = R_8G_REFCLK125;
  4298. } else {
  4299. printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
  4300. return -1;
  4301. }
  4302. switch (qlm) {
  4303. case 0: /* Either x4 or x8 based on PEM0 */
  4304. rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
  4305. rst_prst.s.soft_prst = rc;
  4306. csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
  4307. __setup_pem_reset(0, 0, !rc);
  4308. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
  4309. pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
  4310. pemx_cfg.cn78xx.hostmd = rc;
  4311. pemx_cfg.cn78xx.md = gen3;
  4312. csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
  4313. /* x8 mode waits for QLM1 setup before turning on the PEM */
  4314. if (mode == CVMX_QLM_MODE_PCIE) {
  4315. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
  4316. pemx_on.s.pemon = 1;
  4317. csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
  4318. }
  4319. break;
  4320. case 1: /* Either PEM0 x8 or PEM1 x4 */
  4321. if (mode == CVMX_QLM_MODE_PCIE) {
  4322. rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
  4323. rst_prst.s.soft_prst = rc;
  4324. csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
  4325. __setup_pem_reset(0, 1, !rc);
  4326. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
  4327. pemx_cfg.cn78xx.lanes8 = 0;
  4328. pemx_cfg.cn78xx.hostmd = rc;
  4329. pemx_cfg.cn78xx.md = gen3;
  4330. csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
  4331. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
  4332. pemx_on.s.pemon = 1;
  4333. csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
  4334. } else { /* x8 mode */
  4335. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
  4336. pemx_on.s.pemon = 1;
  4337. csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
  4338. }
  4339. break;
  4340. case 2: /* Either PEM2 x4 or PEM2 x8 or BGX0 */
  4341. {
  4342. pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(2));
  4343. pemx_qlm.cn73xx.pemdlmsel = 0;
  4344. csr_wr(CVMX_PEMX_QLM(2), pemx_qlm.u64);
  4345. rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(2));
  4346. rst_prst.s.soft_prst = rc;
  4347. csr_wr(CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
  4348. __setup_pem_reset(0, 2, !rc);
  4349. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
  4350. pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
  4351. pemx_cfg.cn78xx.hostmd = rc;
  4352. pemx_cfg.cn78xx.md = gen3;
  4353. csr_wr(CVMX_PEMX_CFG(2), pemx_cfg.u64);
  4354. /* x8 mode waits for QLM3 setup before turning on the PEM */
  4355. if (mode == CVMX_QLM_MODE_PCIE) {
  4356. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
  4357. pemx_on.s.pemon = 1;
  4358. csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
  4359. }
  4360. break;
  4361. }
  4362. case 3: /* Either PEM2 x8 or PEM3 x4 or BGX1 */
  4363. /* PEM2/PEM3 are configured to use QLM2/3 */
  4364. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
  4365. if (pemx_cfg.cn78xx.lanes8) {
  4366. /* Last 4 lanes of PEM2 */
  4367. /* PEMX_CFG already setup */
  4368. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
  4369. pemx_on.s.pemon = 1;
  4370. csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
  4371. }
  4372. /* Check if PEM3 uses QLM3 and in x4 lane mode */
  4373. if (mode == CVMX_QLM_MODE_PCIE) {
  4374. pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(3));
  4375. pemx_qlm.cn73xx.pemdlmsel = 0;
  4376. csr_wr(CVMX_PEMX_QLM(3), pemx_qlm.u64);
  4377. rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(3));
  4378. rst_prst.s.soft_prst = rc;
  4379. csr_wr(CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
  4380. __setup_pem_reset(0, 3, !rc);
  4381. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(3));
  4382. pemx_cfg.cn78xx.lanes8 = 0;
  4383. pemx_cfg.cn78xx.hostmd = rc;
  4384. pemx_cfg.cn78xx.md = gen3;
  4385. csr_wr(CVMX_PEMX_CFG(3), pemx_cfg.u64);
  4386. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(3));
  4387. pemx_on.s.pemon = 1;
  4388. csr_wr(CVMX_PEMX_ON(3), pemx_on.u64);
  4389. }
  4390. break;
  4391. case 5: /* PEM2/PEM3 x2 or BGX2 */
  4392. case 6:
  4393. port = (qlm == 5) ? 2 : 3;
  4394. if (mode == CVMX_QLM_MODE_PCIE_1X2) {
  4395. /* PEM2/PEM3 are configured to use DLM5/6 */
  4396. pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(port));
  4397. pemx_qlm.cn73xx.pemdlmsel = 1;
  4398. csr_wr(CVMX_PEMX_QLM(port), pemx_qlm.u64);
  4399. /* 2 lanes of PEM3 */
  4400. rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(port));
  4401. rst_prst.s.soft_prst = rc;
  4402. csr_wr(CVMX_RST_SOFT_PRSTX(port), rst_prst.u64);
  4403. __setup_pem_reset(0, port, !rc);
  4404. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(port));
  4405. pemx_cfg.cn78xx.lanes8 = 0;
  4406. pemx_cfg.cn78xx.hostmd = rc;
  4407. pemx_cfg.cn78xx.md = gen3;
  4408. csr_wr(CVMX_PEMX_CFG(port), pemx_cfg.u64);
  4409. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(port));
  4410. pemx_on.s.pemon = 1;
  4411. csr_wr(CVMX_PEMX_ON(port), pemx_on.u64);
  4412. }
  4413. break;
  4414. default:
  4415. break;
  4416. }
  4417. break;
  4418. }
  4419. case CVMX_QLM_MODE_SGMII:
  4420. is_bgx = 1;
  4421. lmac_type[0] = 0;
  4422. lmac_type[1] = 0;
  4423. lmac_type[2] = 0;
  4424. lmac_type[3] = 0;
  4425. sds_lane[0] = 0;
  4426. sds_lane[1] = 1;
  4427. sds_lane[2] = 2;
  4428. sds_lane[3] = 3;
  4429. break;
  4430. case CVMX_QLM_MODE_SGMII_2X1:
  4431. if (qlm == 5) {
  4432. is_bgx = 1;
  4433. lmac_type[0] = 0;
  4434. lmac_type[1] = 0;
  4435. lmac_type[2] = -1;
  4436. lmac_type[3] = -1;
  4437. sds_lane[0] = 0;
  4438. sds_lane[1] = 1;
  4439. } else if (qlm == 6) {
  4440. is_bgx = 1;
  4441. lmac_type[0] = -1;
  4442. lmac_type[1] = -1;
  4443. lmac_type[2] = 0;
  4444. lmac_type[3] = 0;
  4445. sds_lane[2] = 2;
  4446. sds_lane[3] = 3;
  4447. additional_lmacs = 2;
  4448. }
  4449. break;
  4450. case CVMX_QLM_MODE_XAUI:
  4451. is_bgx = 5;
  4452. lmac_type[0] = 1;
  4453. lmac_type[1] = -1;
  4454. lmac_type[2] = -1;
  4455. lmac_type[3] = -1;
  4456. sds_lane[0] = 0xe4;
  4457. break;
  4458. case CVMX_QLM_MODE_RXAUI:
  4459. is_bgx = 3;
  4460. lmac_type[0] = 2;
  4461. lmac_type[1] = 2;
  4462. lmac_type[2] = -1;
  4463. lmac_type[3] = -1;
  4464. sds_lane[0] = 0x4;
  4465. sds_lane[1] = 0xe;
  4466. break;
  4467. case CVMX_QLM_MODE_RXAUI_1X2:
  4468. if (qlm == 5) {
  4469. is_bgx = 3;
  4470. lmac_type[0] = 2;
  4471. lmac_type[1] = -1;
  4472. lmac_type[2] = -1;
  4473. lmac_type[3] = -1;
  4474. sds_lane[0] = 0x4;
  4475. }
  4476. if (qlm == 6) {
  4477. is_bgx = 3;
  4478. lmac_type[0] = -1;
  4479. lmac_type[1] = -1;
  4480. lmac_type[2] = 2;
  4481. lmac_type[3] = -1;
  4482. sds_lane[2] = 0xe;
  4483. additional_lmacs = 2;
  4484. }
  4485. break;
  4486. case CVMX_QLM_MODE_10G_KR:
  4487. enable_training = 1;
  4488. case CVMX_QLM_MODE_XFI: /* 10GR_4X1 */
  4489. is_bgx = 1;
  4490. lmac_type[0] = 3;
  4491. lmac_type[1] = 3;
  4492. lmac_type[2] = 3;
  4493. lmac_type[3] = 3;
  4494. sds_lane[0] = 0;
  4495. sds_lane[1] = 1;
  4496. sds_lane[2] = 2;
  4497. sds_lane[3] = 3;
  4498. break;
  4499. case CVMX_QLM_MODE_10G_KR_1X2:
  4500. enable_training = 1;
  4501. case CVMX_QLM_MODE_XFI_1X2:
  4502. if (qlm == 5) {
  4503. is_bgx = 1;
  4504. lmac_type[0] = 3;
  4505. lmac_type[1] = 3;
  4506. lmac_type[2] = -1;
  4507. lmac_type[3] = -1;
  4508. sds_lane[0] = 0;
  4509. sds_lane[1] = 1;
  4510. } else if (qlm == 6) {
  4511. is_bgx = 1;
  4512. lmac_type[0] = -1;
  4513. lmac_type[1] = -1;
  4514. lmac_type[2] = 3;
  4515. lmac_type[3] = 3;
  4516. sds_lane[2] = 2;
  4517. sds_lane[3] = 3;
  4518. additional_lmacs = 2;
  4519. }
  4520. break;
  4521. case CVMX_QLM_MODE_40G_KR4:
  4522. enable_training = 1;
  4523. case CVMX_QLM_MODE_XLAUI: /* 40GR4_1X4 */
  4524. is_bgx = 5;
  4525. lmac_type[0] = 4;
  4526. lmac_type[1] = -1;
  4527. lmac_type[2] = -1;
  4528. lmac_type[3] = -1;
  4529. sds_lane[0] = 0xe4;
  4530. break;
  4531. case CVMX_QLM_MODE_RGMII_SGMII:
  4532. is_bgx = 1;
  4533. lmac_type[0] = 5;
  4534. lmac_type[1] = 0;
  4535. lmac_type[2] = 0;
  4536. lmac_type[3] = 0;
  4537. sds_lane[0] = 0;
  4538. sds_lane[1] = 1;
  4539. sds_lane[2] = 2;
  4540. sds_lane[3] = 3;
  4541. break;
  4542. case CVMX_QLM_MODE_RGMII_SGMII_1X1:
  4543. if (qlm == 5) {
  4544. is_bgx = 1;
  4545. lmac_type[0] = 5;
  4546. lmac_type[1] = 0;
  4547. lmac_type[2] = -1;
  4548. lmac_type[3] = -1;
  4549. sds_lane[0] = 0;
  4550. sds_lane[1] = 1;
  4551. }
  4552. break;
  4553. case CVMX_QLM_MODE_RGMII_SGMII_2X1:
  4554. if (qlm == 6) {
  4555. is_bgx = 1;
  4556. lmac_type[0] = 5;
  4557. lmac_type[1] = -1;
  4558. lmac_type[2] = 0;
  4559. lmac_type[3] = 0;
  4560. sds_lane[0] = 0;
  4561. sds_lane[2] = 0;
  4562. sds_lane[3] = 1;
  4563. }
  4564. break;
  4565. case CVMX_QLM_MODE_RGMII_10G_KR:
  4566. enable_training = 1;
  4567. case CVMX_QLM_MODE_RGMII_XFI:
  4568. is_bgx = 1;
  4569. lmac_type[0] = 5;
  4570. lmac_type[1] = 3;
  4571. lmac_type[2] = 3;
  4572. lmac_type[3] = 3;
  4573. sds_lane[0] = 0;
  4574. sds_lane[1] = 1;
  4575. sds_lane[2] = 2;
  4576. sds_lane[3] = 3;
  4577. break;
  4578. case CVMX_QLM_MODE_RGMII_10G_KR_1X1:
  4579. enable_training = 1;
  4580. case CVMX_QLM_MODE_RGMII_XFI_1X1:
  4581. if (qlm == 5) {
  4582. is_bgx = 3;
  4583. lmac_type[0] = 5;
  4584. lmac_type[1] = 3;
  4585. lmac_type[2] = -1;
  4586. lmac_type[3] = -1;
  4587. sds_lane[0] = 0;
  4588. sds_lane[1] = 1;
  4589. }
  4590. break;
  4591. case CVMX_QLM_MODE_RGMII_40G_KR4:
  4592. enable_training = 1;
  4593. case CVMX_QLM_MODE_RGMII_XLAUI:
  4594. is_bgx = 5;
  4595. lmac_type[0] = 5;
  4596. lmac_type[1] = 4;
  4597. lmac_type[2] = -1;
  4598. lmac_type[3] = -1;
  4599. sds_lane[0] = 0x0;
  4600. sds_lane[1] = 0xe4;
  4601. break;
  4602. case CVMX_QLM_MODE_RGMII_RXAUI:
  4603. is_bgx = 3;
  4604. lmac_type[0] = 5;
  4605. lmac_type[1] = 2;
  4606. lmac_type[2] = 2;
  4607. lmac_type[3] = -1;
  4608. sds_lane[0] = 0x0;
  4609. sds_lane[1] = 0x4;
  4610. sds_lane[2] = 0xe;
  4611. break;
  4612. case CVMX_QLM_MODE_RGMII_XAUI:
  4613. is_bgx = 5;
  4614. lmac_type[0] = 5;
  4615. lmac_type[1] = 1;
  4616. lmac_type[2] = -1;
  4617. lmac_type[3] = -1;
  4618. sds_lane[0] = 0;
  4619. sds_lane[1] = 0xe4;
  4620. break;
  4621. default:
  4622. break;
  4623. }
  4624. if (is_pcie == 0)
  4625. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
  4626. debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
  4627. alt_pll ? "true" : "false");
  4628. if (lane_mode == -1)
  4629. return -1;
  4630. if (alt_pll) {
  4631. debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
  4632. __func__, qlm, lane_mode, ref_clk_sel);
  4633. if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
  4634. printf("%s: Error: reference clock %d is not supported for qlm %d, lane mode: 0x%x\n",
  4635. __func__, ref_clk_sel, qlm, lane_mode);
  4636. return -1;
  4637. }
  4638. }
  4639. /* Power up PHY, but keep it in reset */
  4640. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  4641. phy_ctl.s.phy_pd = 0;
  4642. phy_ctl.s.phy_reset = 1;
  4643. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  4644. /* Set GSER for the interface mode */
  4645. cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
  4646. cfg.s.bgx = is_bgx & 1;
  4647. cfg.s.bgx_quad = (is_bgx >> 2) & 1;
  4648. cfg.s.bgx_dual = (is_bgx >> 1) & 1;
  4649. cfg.s.pcie = is_pcie;
  4650. csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
  4651. /* Lane mode */
  4652. lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
  4653. lmode.s.lmode = lane_mode;
  4654. csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
  4655. /* Program lmac_type to figure out the type of BGX interface configured */
  4656. if (is_bgx) {
  4657. int bgx = (qlm < 4) ? qlm - 2 : 2;
  4658. cvmx_bgxx_cmrx_config_t cmr_config;
  4659. cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
  4660. cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
  4661. int index, total_lmacs = 0;
  4662. for (index = 0; index < 4; index++) {
  4663. cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
  4664. cmr_config.s.enable = 0;
  4665. cmr_config.s.data_pkt_rx_en = 0;
  4666. cmr_config.s.data_pkt_tx_en = 0;
  4667. if (lmac_type[index] != -1) {
  4668. cmr_config.s.lmac_type = lmac_type[index];
  4669. cmr_config.s.lane_to_sds = sds_lane[index];
  4670. total_lmacs++;
  4671. /* RXAUI takes up 2 lmacs */
  4672. if (lmac_type[index] == 2)
  4673. total_lmacs += 1;
  4674. }
  4675. csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
  4676. /* Errata (TBD) RGMII doesn't turn on clock if its by
  4677. * itself. Force them on
  4678. */
  4679. if (lmac_type[index] == 5) {
  4680. cvmx_bgxx_cmr_global_config_t global_config;
  4681. global_config.u64 = csr_rd(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
  4682. global_config.s.bgx_clk_enable = 1;
  4683. csr_wr(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_config.u64);
  4684. }
  4685. /* Enable training for 10G_KR/40G_KR4 modes */
  4686. if (enable_training == 1 &&
  4687. (lmac_type[index] == 3 || lmac_type[index] == 4)) {
  4688. spu_pmd_control.u64 =
  4689. csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
  4690. spu_pmd_control.s.train_en = 1;
  4691. csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
  4692. spu_pmd_control.u64);
  4693. }
  4694. }
  4695. /* Update the total number of lmacs */
  4696. rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
  4697. rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
  4698. csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
  4699. csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
  4700. }
  4701. /* Bring phy out of reset */
  4702. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  4703. phy_ctl.s.phy_reset = 0;
  4704. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  4705. /*
  4706. * Wait 1us until the management interface is ready to accept
  4707. * read/write commands.
  4708. */
  4709. udelay(1);
  4710. /* Wait for reset to complete and the PLL to lock */
  4711. /* PCIe mode doesn't become ready until the PEM block attempts to bring
  4712. * the interface up. Skip this check for PCIe
  4713. */
  4714. if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm),
  4715. cvmx_gserx_qlm_stat_t,
  4716. rst_rdy, ==, 1, 10000)) {
  4717. printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
  4718. return -1;
  4719. }
  4720. /* Configure the gser pll */
  4721. if (!is_pcie)
  4722. __qlm_setup_pll_cn78xx(0, qlm);
  4723. /* Wait for reset to complete and the PLL to lock */
  4724. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
  4725. pll_lock, ==, 1, 10000)) {
  4726. printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
  4727. return -1;
  4728. }
  4729. /* Errata GSER-26150: 10G PHY PLL Temperature Failure */
  4730. /* This workaround must be completed after the final deassertion of
  4731. * GSERx_PHY_CTL[PHY_RESET].
  4732. * Apply the workaround to 10.3125Gbps and 8Gbps only.
  4733. */
  4734. if (OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_0) &&
  4735. (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
  4736. __qlm_errata_gser_26150(0, qlm, is_pcie);
  4737. /* Errata GSER-26636: 10G-KR/40G-KR - Inverted Tx Coefficient Direction
  4738. * Change. Applied to all 10G standards (required for KR) but also
  4739. * applied to other standards in case software training is used
  4740. */
  4741. if (baud_mhz == 103125)
  4742. __qlm_kr_inc_dec_gser26636(0, qlm);
  4743. /* Errata GSER-25992: RX EQ Default Settings Update (CTLE Bias) */
  4744. /* This workaround will only be applied to Pass 1.x */
  4745. /* It will also only be applied if the SERDES data-rate is 10G */
  4746. /* or if PCIe Gen3 (gen3=2 is PCIe Gen3) */
  4747. if (baud_mhz == 103125 || (is_pcie && gen3 == 2))
  4748. cvmx_qlm_gser_errata_25992(0, qlm);
  4749. /* Errata GSER-27140: Updating the RX EQ settings due to temperature
  4750. * drift sensitivities
  4751. */
  4752. /* This workaround will also only be applied if the SERDES data-rate is 10G */
  4753. if (baud_mhz == 103125)
  4754. __qlm_rx_eq_temp_gser27140(0, qlm);
  4755. /* Reduce the voltage amplitude coming from Marvell PHY and also change
  4756. * DFE threshold settings for RXAUI interface
  4757. */
  4758. if (is_bgx) {
  4759. int l;
  4760. for (l = 0; l < 4; l++) {
  4761. cvmx_gserx_lanex_rx_cfg_4_t cfg4;
  4762. cvmx_gserx_lanex_tx_cfg_0_t cfg0;
  4763. if (lmac_type[l] == 2) {
  4764. /* Change the Q/QB error sampler 0 threshold from 0xD to 0xF */
  4765. cfg4.u64 = csr_rd(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
  4766. cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
  4767. csr_wr(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
  4768. /* Reduce the voltage swing to roughly 460mV */
  4769. cfg0.u64 = csr_rd(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
  4770. cfg0.s.cfg_tx_swing = 0x12;
  4771. csr_wr(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
  4772. }
  4773. }
  4774. }
  4775. return 0;
  4776. }
  4777. static int __rmac_pll_config(int baud_mhz, int qlm, int mode)
  4778. {
  4779. cvmx_gserx_pll_px_mode_0_t pmode0;
  4780. cvmx_gserx_pll_px_mode_1_t pmode1;
  4781. cvmx_gserx_lane_px_mode_0_t lmode0;
  4782. cvmx_gserx_lane_px_mode_1_t lmode1;
  4783. cvmx_gserx_lane_mode_t lmode;
  4784. switch (baud_mhz) {
  4785. case 98304:
  4786. pmode0.u64 = 0x1a0a;
  4787. pmode1.u64 = 0x3228;
  4788. lmode0.u64 = 0x600f;
  4789. lmode1.u64 = 0xa80f;
  4790. break;
  4791. case 49152:
  4792. if (mode == CVMX_QLM_MODE_SDL) {
  4793. pmode0.u64 = 0x3605;
  4794. pmode1.u64 = 0x0814;
  4795. lmode0.u64 = 0x000f;
  4796. lmode1.u64 = 0x6814;
  4797. } else {
  4798. pmode0.u64 = 0x1a0a;
  4799. pmode1.u64 = 0x3228;
  4800. lmode0.u64 = 0x650f;
  4801. lmode1.u64 = 0xe80f;
  4802. }
  4803. break;
  4804. case 24576:
  4805. pmode0.u64 = 0x1a0a;
  4806. pmode1.u64 = 0x3228;
  4807. lmode0.u64 = 0x6a0f;
  4808. lmode1.u64 = 0xe80f;
  4809. break;
  4810. case 12288:
  4811. pmode0.u64 = 0x1a0a;
  4812. pmode1.u64 = 0x3228;
  4813. lmode0.u64 = 0x6f0f;
  4814. lmode1.u64 = 0xe80f;
  4815. break;
  4816. case 6144:
  4817. pmode0.u64 = 0x160a;
  4818. pmode1.u64 = 0x1019;
  4819. lmode0.u64 = 0x000f;
  4820. lmode1.u64 = 0x2814;
  4821. break;
  4822. case 3072:
  4823. pmode0.u64 = 0x160a;
  4824. pmode1.u64 = 0x1019;
  4825. lmode0.u64 = 0x050f;
  4826. lmode1.u64 = 0x6814;
  4827. break;
  4828. default:
  4829. printf("Invalid speed for CPRI/SDL configuration\n");
  4830. return -1;
  4831. }
  4832. lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
  4833. csr_wr(CVMX_GSERX_PLL_PX_MODE_0(lmode.s.lmode, qlm), pmode0.u64);
  4834. csr_wr(CVMX_GSERX_PLL_PX_MODE_1(lmode.s.lmode, qlm), pmode1.u64);
  4835. csr_wr(CVMX_GSERX_LANE_PX_MODE_0(lmode.s.lmode, qlm), lmode0.u64);
  4836. csr_wr(CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm), lmode1.u64);
  4837. return 0;
  4838. }
  4839. /**
  4840. * Configure QLM/DLM speed and mode for cnf75xx.
  4841. *
  4842. * @param qlm The QLM to configure
  4843. * @param baud_mhz The speed the QLM needs to be configured in Mhz.
  4844. * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
  4845. * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
  4846. * @param gen3 Only used for PCIe
  4847. * gen3 = 2 GEN3 mode
  4848. * gen3 = 1 GEN2 mode
  4849. * gen3 = 0 GEN1 mode
  4850. *
  4851. * @param ref_clk_sel The reference-clock selection to use to configure QLM
  4852. * 0 = REF_100MHZ
  4853. * 1 = REF_125MHZ
  4854. * 2 = REF_156MHZ
  4855. * 3 = REF_122MHZ
  4856. * @param ref_clk_input The reference-clock input to use to configure QLM
  4857. *
  4858. * Return: Return 0 on success or -1.
  4859. */
  4860. static int octeon_configure_qlm_cnf75xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
  4861. int ref_clk_sel, int ref_clk_input)
  4862. {
  4863. cvmx_gserx_phy_ctl_t phy_ctl;
  4864. cvmx_gserx_lane_mode_t lmode;
  4865. cvmx_gserx_cfg_t cfg;
  4866. cvmx_gserx_refclk_sel_t refclk_sel;
  4867. int is_pcie = 0;
  4868. int is_bgx = 0;
  4869. int is_srio = 0;
  4870. int is_rmac = 0;
  4871. int is_rmac_pipe = 0;
  4872. int lane_mode = 0;
  4873. short lmac_type[4] = { 0 };
  4874. short sds_lane[4] = { 0 };
  4875. bool alt_pll = false;
  4876. int enable_training = 0;
  4877. int additional_lmacs = 0;
  4878. int port = (qlm == 3) ? 1 : 0;
  4879. cvmx_sriox_status_reg_t status_reg;
  4880. debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
  4881. __func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
  4882. if (qlm > 8) {
  4883. printf("Invalid qlm%d passed\n", qlm);
  4884. return -1;
  4885. }
  4886. /* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
  4887. * during speed change. Change SLI_WINDOW_CTL[time] to 525us
  4888. */
  4889. __set_sli_window_ctl_errata_31375(0);
  4890. cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
  4891. /* If PEM is in EP, no need to do anything */
  4892. if (cfg.s.pcie && rc == 0) {
  4893. debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
  4894. return 0;
  4895. }
  4896. if (cfg.s.srio && rc == 0) {
  4897. debug("%s: qlm %d is in SRIO endpoint mode, returning\n", __func__, qlm);
  4898. return 0;
  4899. }
  4900. /* Set the reference clock to use */
  4901. refclk_sel.u64 = 0;
  4902. if (ref_clk_input == 0) { /* External ref clock */
  4903. refclk_sel.s.com_clk_sel = 0;
  4904. refclk_sel.s.use_com1 = 0;
  4905. } else if (ref_clk_input == 1) {
  4906. refclk_sel.s.com_clk_sel = 1;
  4907. refclk_sel.s.use_com1 = 0;
  4908. } else {
  4909. refclk_sel.s.com_clk_sel = 1;
  4910. refclk_sel.s.use_com1 = 1;
  4911. }
  4912. csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  4913. /* Reset the QLM after changing the reference clock */
  4914. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  4915. phy_ctl.s.phy_reset = 1;
  4916. phy_ctl.s.phy_pd = 1;
  4917. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  4918. udelay(1000);
  4919. switch (mode) {
  4920. case CVMX_QLM_MODE_PCIE:
  4921. case CVMX_QLM_MODE_PCIE_1X2:
  4922. case CVMX_QLM_MODE_PCIE_2X1: {
  4923. cvmx_pemx_cfg_t pemx_cfg;
  4924. cvmx_pemx_on_t pemx_on;
  4925. cvmx_rst_soft_prstx_t rst_prst;
  4926. is_pcie = 1;
  4927. if (qlm > 1) {
  4928. printf("Invalid PCIe mode for QLM%d\n", qlm);
  4929. return -1;
  4930. }
  4931. if (ref_clk_sel == 0) {
  4932. refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
  4933. refclk_sel.s.pcie_refclk125 = 0;
  4934. csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  4935. if (gen3 == 0) /* Gen1 mode */
  4936. lane_mode = R_2_5G_REFCLK100;
  4937. else if (gen3 == 1) /* Gen2 mode */
  4938. lane_mode = R_5G_REFCLK100;
  4939. else
  4940. lane_mode = R_8G_REFCLK100;
  4941. } else if (ref_clk_sel == 1) {
  4942. refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
  4943. refclk_sel.s.pcie_refclk125 = 1;
  4944. csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
  4945. if (gen3 == 0) /* Gen1 mode */
  4946. lane_mode = R_2_5G_REFCLK125;
  4947. else if (gen3 == 1) /* Gen2 mode */
  4948. lane_mode = R_5G_REFCLK125;
  4949. else
  4950. lane_mode = R_8G_REFCLK125;
  4951. } else {
  4952. printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
  4953. return -1;
  4954. }
  4955. switch (qlm) {
  4956. case 0: /* Either x4 or x2 based on PEM0 */
  4957. rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
  4958. rst_prst.s.soft_prst = rc;
  4959. csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
  4960. __setup_pem_reset(0, 0, !rc);
  4961. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
  4962. pemx_cfg.cnf75xx.hostmd = rc;
  4963. pemx_cfg.cnf75xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE);
  4964. pemx_cfg.cnf75xx.md = gen3;
  4965. csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
  4966. /* x4 mode waits for QLM1 setup before turning on the PEM */
  4967. if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
  4968. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
  4969. pemx_on.s.pemon = 1;
  4970. csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
  4971. }
  4972. break;
  4973. case 1: /* Either PEM0 x4 or PEM1 x2 */
  4974. if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
  4975. rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
  4976. rst_prst.s.soft_prst = rc;
  4977. csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
  4978. __setup_pem_reset(0, 1, !rc);
  4979. pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
  4980. pemx_cfg.cnf75xx.hostmd = rc;
  4981. pemx_cfg.cnf75xx.md = gen3;
  4982. csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
  4983. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
  4984. pemx_on.s.pemon = 1;
  4985. csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
  4986. } else {
  4987. pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
  4988. pemx_on.s.pemon = 1;
  4989. csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
  4990. }
  4991. break;
  4992. default:
  4993. break;
  4994. }
  4995. break;
  4996. }
  4997. case CVMX_QLM_MODE_SRIO_1X4:
  4998. case CVMX_QLM_MODE_SRIO_2X2:
  4999. case CVMX_QLM_MODE_SRIO_4X1: {
  5000. int spd = 0xf;
  5001. if (cvmx_fuse_read(1601)) {
  5002. debug("SRIO is not supported on cnf73xx model\n");
  5003. return -1;
  5004. }
  5005. switch (baud_mhz) {
  5006. case 1250:
  5007. switch (ref_clk_sel) {
  5008. case 0: /* 100 MHz ref clock */
  5009. spd = 0x3;
  5010. break;
  5011. case 1: /* 125 MHz ref clock */
  5012. spd = 0xa;
  5013. break;
  5014. case 2: /* 156.25 MHz ref clock */
  5015. spd = 0x4;
  5016. break;
  5017. default:
  5018. spd = 0xf; /* Disabled */
  5019. break;
  5020. }
  5021. break;
  5022. case 2500:
  5023. switch (ref_clk_sel) {
  5024. case 0: /* 100 MHz ref clock */
  5025. spd = 0x2;
  5026. break;
  5027. case 1: /* 125 MHz ref clock */
  5028. spd = 0x9;
  5029. break;
  5030. case 2: /* 156.25 MHz ref clock */
  5031. spd = 0x7;
  5032. break;
  5033. default:
  5034. spd = 0xf; /* Disabled */
  5035. break;
  5036. }
  5037. break;
  5038. case 3125:
  5039. switch (ref_clk_sel) {
  5040. case 1: /* 125 MHz ref clock */
  5041. spd = 0x8;
  5042. break;
  5043. case 2: /* 156.25 MHz ref clock */
  5044. spd = 0xe;
  5045. break;
  5046. default:
  5047. spd = 0xf; /* Disabled */
  5048. break;
  5049. }
  5050. break;
  5051. case 5000:
  5052. switch (ref_clk_sel) {
  5053. case 0: /* 100 MHz ref clock */
  5054. spd = 0x0;
  5055. break;
  5056. case 1: /* 125 MHz ref clock */
  5057. spd = 0x6;
  5058. break;
  5059. case 2: /* 156.25 MHz ref clock */
  5060. spd = 0xb;
  5061. break;
  5062. default:
  5063. spd = 0xf; /* Disabled */
  5064. break;
  5065. }
  5066. break;
  5067. default:
  5068. spd = 0xf;
  5069. break;
  5070. }
  5071. if (spd == 0xf) {
  5072. printf("ERROR: Invalid SRIO speed (%d) configured for QLM%d\n", baud_mhz,
  5073. qlm);
  5074. return -1;
  5075. }
  5076. status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
  5077. status_reg.s.spd = spd;
  5078. csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
  5079. is_srio = 1;
  5080. break;
  5081. }
  5082. case CVMX_QLM_MODE_SGMII_2X1:
  5083. if (qlm == 4) {
  5084. is_bgx = 1;
  5085. lmac_type[0] = 0;
  5086. lmac_type[1] = 0;
  5087. lmac_type[2] = -1;
  5088. lmac_type[3] = -1;
  5089. sds_lane[0] = 0;
  5090. sds_lane[1] = 1;
  5091. } else if (qlm == 5) {
  5092. is_bgx = 1;
  5093. lmac_type[0] = -1;
  5094. lmac_type[1] = -1;
  5095. lmac_type[2] = 0;
  5096. lmac_type[3] = 0;
  5097. sds_lane[2] = 2;
  5098. sds_lane[3] = 3;
  5099. additional_lmacs = 2;
  5100. }
  5101. break;
  5102. case CVMX_QLM_MODE_10G_KR_1X2:
  5103. enable_training = 1;
  5104. case CVMX_QLM_MODE_XFI_1X2:
  5105. if (qlm == 5) {
  5106. is_bgx = 1;
  5107. lmac_type[0] = -1;
  5108. lmac_type[1] = -1;
  5109. lmac_type[2] = 3;
  5110. lmac_type[3] = 3;
  5111. sds_lane[2] = 2;
  5112. sds_lane[3] = 3;
  5113. additional_lmacs = 2;
  5114. }
  5115. break;
  5116. case CVMX_QLM_MODE_CPRI: /* CPRI / JESD204B */
  5117. is_rmac = 1;
  5118. break;
  5119. case CVMX_QLM_MODE_SDL: /* Serdes Lite (SDL) */
  5120. is_rmac = 1;
  5121. is_rmac_pipe = 1;
  5122. lane_mode = 1;
  5123. break;
  5124. default:
  5125. break;
  5126. }
  5127. if (is_rmac_pipe == 0 && is_pcie == 0) {
  5128. lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz,
  5129. &alt_pll);
  5130. }
  5131. debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
  5132. alt_pll ? "true" : "false");
  5133. if (lane_mode == -1)
  5134. return -1;
  5135. if (alt_pll) {
  5136. debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
  5137. __func__, qlm, lane_mode, ref_clk_sel);
  5138. if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
  5139. printf("%s: Error: reference clock %d is not supported for qlm %d\n",
  5140. __func__, ref_clk_sel, qlm);
  5141. return -1;
  5142. }
  5143. }
  5144. /* Power up PHY, but keep it in reset */
  5145. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  5146. phy_ctl.s.phy_pd = 0;
  5147. phy_ctl.s.phy_reset = 1;
  5148. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  5149. /* Set GSER for the interface mode */
  5150. cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
  5151. cfg.s.bgx = is_bgx & 1;
  5152. cfg.s.bgx_quad = (is_bgx >> 2) & 1;
  5153. cfg.s.bgx_dual = (is_bgx >> 1) & 1;
  5154. cfg.s.pcie = is_pcie;
  5155. cfg.s.srio = is_srio;
  5156. cfg.s.rmac = is_rmac;
  5157. cfg.s.rmac_pipe = is_rmac_pipe;
  5158. csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
  5159. /* Lane mode */
  5160. lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
  5161. lmode.s.lmode = lane_mode;
  5162. csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
  5163. /* Because of the Errata where quad mode does not work, program
  5164. * lmac_type to figure out the type of BGX interface configured
  5165. */
  5166. if (is_bgx) {
  5167. int bgx = 0;
  5168. cvmx_bgxx_cmrx_config_t cmr_config;
  5169. cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
  5170. cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
  5171. int index, total_lmacs = 0;
  5172. for (index = 0; index < 4; index++) {
  5173. cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
  5174. cmr_config.s.enable = 0;
  5175. cmr_config.s.data_pkt_rx_en = 0;
  5176. cmr_config.s.data_pkt_tx_en = 0;
  5177. if (lmac_type[index] != -1) {
  5178. cmr_config.s.lmac_type = lmac_type[index];
  5179. cmr_config.s.lane_to_sds = sds_lane[index];
  5180. total_lmacs++;
  5181. }
  5182. csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
  5183. /* Enable training for 10G_KR/40G_KR4 modes */
  5184. if (enable_training == 1 &&
  5185. (lmac_type[index] == 3 || lmac_type[index] == 4)) {
  5186. spu_pmd_control.u64 =
  5187. csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
  5188. spu_pmd_control.s.train_en = 1;
  5189. csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
  5190. spu_pmd_control.u64);
  5191. }
  5192. }
  5193. /* Update the total number of lmacs */
  5194. rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
  5195. rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
  5196. csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
  5197. csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
  5198. }
  5199. /* Bring phy out of reset */
  5200. phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
  5201. phy_ctl.s.phy_reset = 0;
  5202. csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
  5203. /*
  5204. * Wait 1us until the management interface is ready to accept
  5205. * read/write commands.
  5206. */
  5207. udelay(1);
  5208. if (is_srio) {
  5209. status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
  5210. status_reg.s.srio = 1;
  5211. csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
  5212. return 0;
  5213. }
  5214. /* Wait for reset to complete and the PLL to lock */
  5215. /* PCIe mode doesn't become ready until the PEM block attempts to bring
  5216. * the interface up. Skip this check for PCIe
  5217. */
  5218. if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
  5219. rst_rdy, ==, 1, 10000)) {
  5220. printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
  5221. return -1;
  5222. }
  5223. /* Configure the gser pll */
  5224. if (is_rmac)
  5225. __rmac_pll_config(baud_mhz, qlm, mode);
  5226. else if (!(is_pcie || is_srio))
  5227. __qlm_setup_pll_cn78xx(0, qlm);
  5228. /* Wait for reset to complete and the PLL to lock */
  5229. if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
  5230. pll_lock, ==, 1, 10000)) {
  5231. printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
  5232. return -1;
  5233. }
  5234. /* Errata GSER-27140: Updating the RX EQ settings due to temperature
  5235. * drift sensitivities
  5236. */
  5237. /* This workaround will also only be applied if the SERDES data-rate is 10G */
  5238. if (baud_mhz == 103125)
  5239. __qlm_rx_eq_temp_gser27140(0, qlm);
  5240. return 0;
  5241. }
  5242. /**
  5243. * Configure qlm/dlm speed and mode.
  5244. * @param qlm The QLM or DLM to configure
  5245. * @param speed The speed the QLM needs to be configured in Mhz.
  5246. * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
  5247. * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP
  5248. * mode.
  5249. * @param pcie_mode Only used when qlm/dlm are in pcie mode.
  5250. * @param ref_clk_sel Reference clock to use for 70XX where:
  5251. * 0: 100MHz
  5252. * 1: 125MHz
  5253. * 2: 156.25MHz
  5254. * 3: 122MHz (Used by RMAC)
  5255. * @param ref_clk_input This selects which reference clock input to use. For
  5256. * cn70xx:
  5257. * 0: DLMC_REF_CLK0
  5258. * 1: DLMC_REF_CLK1
  5259. * 2: DLM0_REF_CLK
  5260. * cn61xx: (not used)
  5261. * cn78xx/cn76xx/cn73xx:
  5262. * 0: Internal clock (QLM[0-7]_REF_CLK)
  5263. * 1: QLMC_REF_CLK0
  5264. * 2: QLMC_REF_CLK1
  5265. *
  5266. * Return: Return 0 on success or -1.
  5267. */
  5268. int octeon_configure_qlm(int qlm, int speed, int mode, int rc, int pcie_mode, int ref_clk_sel,
  5269. int ref_clk_input)
  5270. {
  5271. int node = 0; // ToDo: corrently only node 0 is supported
  5272. debug("%s(%d, %d, %d, %d, %d, %d, %d)\n", __func__, qlm, speed, mode, rc, pcie_mode,
  5273. ref_clk_sel, ref_clk_input);
  5274. if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
  5275. return octeon_configure_qlm_cn61xx(qlm, speed, mode, rc, pcie_mode);
  5276. else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
  5277. return octeon_configure_qlm_cn70xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
  5278. ref_clk_input);
  5279. else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  5280. return octeon_configure_qlm_cn78xx(node, qlm, speed, mode, rc, pcie_mode,
  5281. ref_clk_sel, ref_clk_input);
  5282. else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  5283. return octeon_configure_qlm_cn73xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
  5284. ref_clk_input);
  5285. else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
  5286. return octeon_configure_qlm_cnf75xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
  5287. ref_clk_input);
  5288. else
  5289. return -1;
  5290. }
  5291. void octeon_init_qlm(int node)
  5292. {
  5293. int qlm;
  5294. cvmx_gserx_phy_ctl_t phy_ctl;
  5295. cvmx_gserx_cfg_t cfg;
  5296. int baud_mhz;
  5297. int pem;
  5298. if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
  5299. return;
  5300. for (qlm = 0; qlm < 8; qlm++) {
  5301. phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
  5302. if (phy_ctl.s.phy_reset == 0) {
  5303. cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
  5304. if (cfg.s.pcie)
  5305. __cvmx_qlm_pcie_errata_cn78xx(node, qlm);
  5306. else
  5307. __qlm_init_errata_20844(node, qlm);
  5308. baud_mhz = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
  5309. if (baud_mhz == 6250 || baud_mhz == 6316)
  5310. octeon_qlm_tune_v3(node, qlm, baud_mhz, 0xa, 0xa0, -1, -1);
  5311. else if (baud_mhz == 103125)
  5312. octeon_qlm_tune_v3(node, qlm, baud_mhz, 0xd, 0xd0, -1, -1);
  5313. }
  5314. }
  5315. /* Setup how each PEM drives the PERST lines */
  5316. for (pem = 0; pem < 4; pem++) {
  5317. cvmx_rst_ctlx_t rst_ctl;
  5318. rst_ctl.u64 = csr_rd_node(node, CVMX_RST_CTLX(pem));
  5319. __setup_pem_reset(node, pem, !rst_ctl.s.host_mode);
  5320. }
  5321. }