mvpp2.c 150 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489
  1. /*
  2. * Driver for Marvell PPv2 network controller for Armada 375 SoC.
  3. *
  4. * Copyright (C) 2014 Marvell
  5. *
  6. * Marcin Wojtas <mw@semihalf.com>
  7. *
  8. * U-Boot version:
  9. * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
  10. *
  11. * This file is licensed under the terms of the GNU General Public
  12. * License version 2. This program is licensed "as is" without any
  13. * warranty of any kind, whether express or implied.
  14. */
  15. #include <common.h>
  16. #include <cpu_func.h>
  17. #include <dm.h>
  18. #include <dm/device-internal.h>
  19. #include <dm/lists.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <config.h>
  23. #include <malloc.h>
  24. #include <asm/io.h>
  25. #include <linux/errno.h>
  26. #include <phy.h>
  27. #include <miiphy.h>
  28. #include <watchdog.h>
  29. #include <asm/arch/cpu.h>
  30. #include <asm/arch/soc.h>
  31. #include <linux/compat.h>
  32. #include <linux/mbus.h>
  33. #include <asm-generic/gpio.h>
  34. #include <fdt_support.h>
  35. #include <linux/mdio.h>
  36. DECLARE_GLOBAL_DATA_PTR;
  37. #define __verify_pcpu_ptr(ptr) \
  38. do { \
  39. const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
  40. (void)__vpp_verify; \
  41. } while (0)
  42. #define VERIFY_PERCPU_PTR(__p) \
  43. ({ \
  44. __verify_pcpu_ptr(__p); \
  45. (typeof(*(__p)) __kernel __force *)(__p); \
  46. })
  47. #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
  48. #define smp_processor_id() 0
  49. #define num_present_cpus() 1
  50. #define for_each_present_cpu(cpu) \
  51. for ((cpu) = 0; (cpu) < 1; (cpu)++)
  52. #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
  53. #define CONFIG_NR_CPUS 1
  54. /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
  55. #define WRAP (2 + ETH_HLEN + 4 + 32)
  56. #define MTU 1500
  57. #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
  58. /* RX Fifo Registers */
  59. #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
  60. #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
  61. #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
  62. #define MVPP2_RX_FIFO_INIT_REG 0x64
  63. /* RX DMA Top Registers */
  64. #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
  65. #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
  66. #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
  67. #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
  68. #define MVPP2_POOL_BUF_SIZE_OFFSET 5
  69. #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
  70. #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
  71. #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
  72. #define MVPP2_RXQ_POOL_SHORT_OFFS 20
  73. #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
  74. #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
  75. #define MVPP2_RXQ_POOL_LONG_OFFS 24
  76. #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
  77. #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
  78. #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
  79. #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
  80. #define MVPP2_RXQ_DISABLE_MASK BIT(31)
  81. /* Parser Registers */
  82. #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
  83. #define MVPP2_PRS_PORT_LU_MAX 0xf
  84. #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
  85. #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
  86. #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
  87. #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
  88. #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
  89. #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
  90. #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
  91. #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
  92. #define MVPP2_PRS_TCAM_IDX_REG 0x1100
  93. #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
  94. #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
  95. #define MVPP2_PRS_SRAM_IDX_REG 0x1200
  96. #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
  97. #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
  98. #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
  99. /* Classifier Registers */
  100. #define MVPP2_CLS_MODE_REG 0x1800
  101. #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
  102. #define MVPP2_CLS_PORT_WAY_REG 0x1810
  103. #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
  104. #define MVPP2_CLS_LKP_INDEX_REG 0x1814
  105. #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
  106. #define MVPP2_CLS_LKP_TBL_REG 0x1818
  107. #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
  108. #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
  109. #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
  110. #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
  111. #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
  112. #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
  113. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
  114. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
  115. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
  116. #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
  117. #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
  118. #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
  119. /* Descriptor Manager Top Registers */
  120. #define MVPP2_RXQ_NUM_REG 0x2040
  121. #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
  122. #define MVPP22_DESC_ADDR_OFFS 8
  123. #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
  124. #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
  125. #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
  126. #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
  127. #define MVPP2_RXQ_NUM_NEW_OFFSET 16
  128. #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
  129. #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
  130. #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
  131. #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
  132. #define MVPP2_RXQ_THRESH_REG 0x204c
  133. #define MVPP2_OCCUPIED_THRESH_OFFSET 0
  134. #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
  135. #define MVPP2_RXQ_INDEX_REG 0x2050
  136. #define MVPP2_TXQ_NUM_REG 0x2080
  137. #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
  138. #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
  139. #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
  140. #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
  141. #define MVPP2_TXQ_THRESH_REG 0x2094
  142. #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
  143. #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
  144. #define MVPP2_TXQ_INDEX_REG 0x2098
  145. #define MVPP2_TXQ_PREF_BUF_REG 0x209c
  146. #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
  147. #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
  148. #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
  149. #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
  150. #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
  151. #define MVPP2_TXQ_PENDING_REG 0x20a0
  152. #define MVPP2_TXQ_PENDING_MASK 0x3fff
  153. #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
  154. #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
  155. #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
  156. #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
  157. #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
  158. #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
  159. #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
  160. #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
  161. #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
  162. #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
  163. #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
  164. #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
  165. #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
  166. #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
  167. #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
  168. #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
  169. #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
  170. /* MBUS bridge registers */
  171. #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
  172. #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
  173. #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
  174. #define MVPP2_BASE_ADDR_ENABLE 0x4060
  175. /* AXI Bridge Registers */
  176. #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
  177. #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
  178. #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
  179. #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
  180. #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
  181. #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
  182. #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
  183. #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
  184. #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
  185. #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
  186. #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
  187. #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
  188. /* Values for AXI Bridge registers */
  189. #define MVPP22_AXI_ATTR_CACHE_OFFS 0
  190. #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
  191. #define MVPP22_AXI_CODE_CACHE_OFFS 0
  192. #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
  193. #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
  194. #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
  195. #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
  196. #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
  197. #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
  198. /* Interrupt Cause and Mask registers */
  199. #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
  200. #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
  201. #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
  202. #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
  203. #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
  204. #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
  205. #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
  206. #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
  207. #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
  208. #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
  209. #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
  210. #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
  211. #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
  212. #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
  213. #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
  214. #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
  215. #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
  216. #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
  217. #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
  218. #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
  219. #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
  220. #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
  221. #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
  222. #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
  223. #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
  224. #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
  225. #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
  226. #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
  227. #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
  228. #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
  229. /* Buffer Manager registers */
  230. #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
  231. #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
  232. #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
  233. #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
  234. #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
  235. #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
  236. #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
  237. #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
  238. #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
  239. #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
  240. #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
  241. #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
  242. #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
  243. #define MVPP2_BM_START_MASK BIT(0)
  244. #define MVPP2_BM_STOP_MASK BIT(1)
  245. #define MVPP2_BM_STATE_MASK BIT(4)
  246. #define MVPP2_BM_LOW_THRESH_OFFS 8
  247. #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
  248. #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
  249. MVPP2_BM_LOW_THRESH_OFFS)
  250. #define MVPP2_BM_HIGH_THRESH_OFFS 16
  251. #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
  252. #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
  253. MVPP2_BM_HIGH_THRESH_OFFS)
  254. #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
  255. #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
  256. #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
  257. #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
  258. #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
  259. #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
  260. #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
  261. #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
  262. #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
  263. #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
  264. #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
  265. #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
  266. #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
  267. #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
  268. #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
  269. #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
  270. #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
  271. #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
  272. #define MVPP2_BM_VIRT_RLS_REG 0x64c0
  273. #define MVPP21_BM_MC_RLS_REG 0x64c4
  274. #define MVPP2_BM_MC_ID_MASK 0xfff
  275. #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
  276. #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
  277. #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
  278. #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
  279. #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
  280. #define MVPP22_BM_MC_RLS_REG 0x64d4
  281. #define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310
  282. #define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff
  283. /* TX Scheduler registers */
  284. #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
  285. #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
  286. #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
  287. #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
  288. #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
  289. #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
  290. #define MVPP2_TXP_SCHED_MTU_REG 0x801c
  291. #define MVPP2_TXP_MTU_MAX 0x7FFFF
  292. #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
  293. #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
  294. #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
  295. #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
  296. #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
  297. #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
  298. #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
  299. #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
  300. #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
  301. #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
  302. #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
  303. #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
  304. #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
  305. #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
  306. /* TX general registers */
  307. #define MVPP2_TX_SNOOP_REG 0x8800
  308. #define MVPP2_TX_PORT_FLUSH_REG 0x8810
  309. #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
  310. /* LMS registers */
  311. #define MVPP2_SRC_ADDR_MIDDLE 0x24
  312. #define MVPP2_SRC_ADDR_HIGH 0x28
  313. #define MVPP2_PHY_AN_CFG0_REG 0x34
  314. #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
  315. #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
  316. #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
  317. /* Per-port registers */
  318. #define MVPP2_GMAC_CTRL_0_REG 0x0
  319. #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
  320. #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
  321. #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
  322. #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
  323. #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
  324. #define MVPP2_GMAC_CTRL_1_REG 0x4
  325. #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
  326. #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
  327. #define MVPP2_GMAC_PCS_LB_EN_BIT 6
  328. #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
  329. #define MVPP2_GMAC_SA_LOW_OFFS 7
  330. #define MVPP2_GMAC_CTRL_2_REG 0x8
  331. #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
  332. #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0)
  333. #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
  334. #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
  335. #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5)
  336. #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
  337. #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9)
  338. #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
  339. #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
  340. #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
  341. #define MVPP2_GMAC_EN_PCS_AN BIT(2)
  342. #define MVPP2_GMAC_AN_BYPASS_EN BIT(3)
  343. #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
  344. #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
  345. #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
  346. #define MVPP2_GMAC_FC_ADV_EN BIT(9)
  347. #define MVPP2_GMAC_EN_FC_AN BIT(11)
  348. #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
  349. #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
  350. #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15)
  351. #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
  352. #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
  353. #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
  354. #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
  355. MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
  356. #define MVPP2_GMAC_CTRL_4_REG 0x90
  357. #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0)
  358. #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5)
  359. #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6)
  360. #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7)
  361. /*
  362. * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
  363. * relative to port->base.
  364. */
  365. /* Port Mac Control0 */
  366. #define MVPP22_XLG_CTRL0_REG 0x100
  367. #define MVPP22_XLG_PORT_EN BIT(0)
  368. #define MVPP22_XLG_MAC_RESETN BIT(1)
  369. #define MVPP22_XLG_RX_FC_EN BIT(7)
  370. #define MVPP22_XLG_MIBCNT_DIS BIT(13)
  371. /* Port Mac Control1 */
  372. #define MVPP22_XLG_CTRL1_REG 0x104
  373. #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0
  374. #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff
  375. /* Port Interrupt Mask */
  376. #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118
  377. #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1)
  378. /* Port Mac Control3 */
  379. #define MVPP22_XLG_CTRL3_REG 0x11c
  380. #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
  381. #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
  382. #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13)
  383. /* Port Mac Control4 */
  384. #define MVPP22_XLG_CTRL4_REG 0x184
  385. #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5)
  386. #define MVPP22_XLG_FORWARD_PFC_EN BIT(6)
  387. #define MVPP22_XLG_MODE_DMA_1G BIT(12)
  388. #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14)
  389. /* XPCS registers */
  390. /* Global Configuration 0 */
  391. #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0
  392. #define MVPP22_XPCS_PCSRESET BIT(0)
  393. #define MVPP22_XPCS_PCSMODE_OFFS 3
  394. #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \
  395. MVPP22_XPCS_PCSMODE_OFFS)
  396. #define MVPP22_XPCS_LANEACTIVE_OFFS 5
  397. #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \
  398. MVPP22_XPCS_LANEACTIVE_OFFS)
  399. /* MPCS registers */
  400. #define PCS40G_COMMON_CONTROL 0x14
  401. #define FORWARD_ERROR_CORRECTION_MASK BIT(10)
  402. #define PCS_CLOCK_RESET 0x14c
  403. #define TX_SD_CLK_RESET_MASK BIT(0)
  404. #define RX_SD_CLK_RESET_MASK BIT(1)
  405. #define MAC_CLK_RESET_MASK BIT(2)
  406. #define CLK_DIVISION_RATIO_OFFS 4
  407. #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS)
  408. #define CLK_DIV_PHASE_SET_MASK BIT(11)
  409. /* System Soft Reset 1 */
  410. #define GOP_SOFT_RESET_1_REG 0x108
  411. #define NETC_GOP_SOFT_RESET_OFFS 6
  412. #define NETC_GOP_SOFT_RESET_MASK (0x1 << \
  413. NETC_GOP_SOFT_RESET_OFFS)
  414. /* Ports Control 0 */
  415. #define NETCOMP_PORTS_CONTROL_0_REG 0x110
  416. #define NETC_BUS_WIDTH_SELECT_OFFS 1
  417. #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \
  418. NETC_BUS_WIDTH_SELECT_OFFS)
  419. #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29
  420. #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \
  421. NETC_GIG_RX_DATA_SAMPLE_OFFS)
  422. #define NETC_CLK_DIV_PHASE_OFFS 31
  423. #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS)
  424. /* Ports Control 1 */
  425. #define NETCOMP_PORTS_CONTROL_1_REG 0x114
  426. #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p)
  427. #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \
  428. NETC_PORTS_ACTIVE_OFFSET(p))
  429. #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p)
  430. #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \
  431. NETC_PORT_GIG_RF_RESET_OFFS(p))
  432. #define NETCOMP_CONTROL_0_REG 0x120
  433. #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0
  434. #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \
  435. NETC_GBE_PORT0_SGMII_MODE_OFFS)
  436. #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1
  437. #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \
  438. NETC_GBE_PORT1_SGMII_MODE_OFFS)
  439. #define NETC_GBE_PORT1_MII_MODE_OFFS 2
  440. #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \
  441. NETC_GBE_PORT1_MII_MODE_OFFS)
  442. #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04)
  443. #define MVPP22_SMI_POLLING_EN BIT(10)
  444. #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \
  445. (0x4 * (port)))
  446. #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
  447. /* Descriptor ring Macros */
  448. #define MVPP2_QUEUE_NEXT_DESC(q, index) \
  449. (((index) < (q)->last_desc) ? ((index) + 1) : 0)
  450. /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
  451. #define MVPP22_SMI 0x1200
  452. /* Additional PPv2.2 offsets */
  453. #define MVPP22_MPCS 0x007000
  454. #define MVPP22_XPCS 0x007400
  455. #define MVPP22_PORT_BASE 0x007e00
  456. #define MVPP22_PORT_OFFSET 0x001000
  457. #define MVPP22_RFU1 0x318000
  458. /* Maximum number of ports */
  459. #define MVPP22_GOP_MAC_NUM 4
  460. /* Sets the field located at the specified in data */
  461. #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41
  462. #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5
  463. #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb
  464. /* Net Complex */
  465. enum mv_netc_topology {
  466. MV_NETC_GE_MAC2_SGMII = BIT(0),
  467. MV_NETC_GE_MAC3_SGMII = BIT(1),
  468. MV_NETC_GE_MAC3_RGMII = BIT(2),
  469. };
  470. enum mv_netc_phase {
  471. MV_NETC_FIRST_PHASE,
  472. MV_NETC_SECOND_PHASE,
  473. };
  474. enum mv_netc_sgmii_xmi_mode {
  475. MV_NETC_GBE_SGMII,
  476. MV_NETC_GBE_XMII,
  477. };
  478. enum mv_netc_mii_mode {
  479. MV_NETC_GBE_RGMII,
  480. MV_NETC_GBE_MII,
  481. };
  482. enum mv_netc_lanes {
  483. MV_NETC_LANE_23,
  484. MV_NETC_LANE_45,
  485. };
  486. /* Various constants */
  487. /* Coalescing */
  488. #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
  489. #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
  490. #define MVPP2_RX_COAL_PKTS 32
  491. #define MVPP2_RX_COAL_USEC 100
  492. /* The two bytes Marvell header. Either contains a special value used
  493. * by Marvell switches when a specific hardware mode is enabled (not
  494. * supported by this driver) or is filled automatically by zeroes on
  495. * the RX side. Those two bytes being at the front of the Ethernet
  496. * header, they allow to have the IP header aligned on a 4 bytes
  497. * boundary automatically: the hardware skips those two bytes on its
  498. * own.
  499. */
  500. #define MVPP2_MH_SIZE 2
  501. #define MVPP2_ETH_TYPE_LEN 2
  502. #define MVPP2_PPPOE_HDR_SIZE 8
  503. #define MVPP2_VLAN_TAG_LEN 4
  504. /* Lbtd 802.3 type */
  505. #define MVPP2_IP_LBDT_TYPE 0xfffa
  506. #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
  507. #define MVPP2_TX_CSUM_MAX_SIZE 9800
  508. /* Timeout constants */
  509. #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
  510. #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
  511. #define MVPP2_TX_MTU_MAX 0x7ffff
  512. /* Maximum number of T-CONTs of PON port */
  513. #define MVPP2_MAX_TCONT 16
  514. /* Maximum number of supported ports */
  515. #define MVPP2_MAX_PORTS 4
  516. /* Maximum number of TXQs used by single port */
  517. #define MVPP2_MAX_TXQ 8
  518. /* Default number of TXQs in use */
  519. #define MVPP2_DEFAULT_TXQ 1
  520. /* Dfault number of RXQs in use */
  521. #define MVPP2_DEFAULT_RXQ 1
  522. #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
  523. /* Max number of Rx descriptors */
  524. #define MVPP2_MAX_RXD 16
  525. /* Max number of Tx descriptors */
  526. #define MVPP2_MAX_TXD 16
  527. /* Amount of Tx descriptors that can be reserved at once by CPU */
  528. #define MVPP2_CPU_DESC_CHUNK 16
  529. /* Max number of Tx descriptors in each aggregated queue */
  530. #define MVPP2_AGGR_TXQ_SIZE 16
  531. /* Descriptor aligned size */
  532. #define MVPP2_DESC_ALIGNED_SIZE 32
  533. /* Descriptor alignment mask */
  534. #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
  535. /* RX FIFO constants */
  536. #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000
  537. #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80
  538. #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000
  539. #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000
  540. #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000
  541. #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200
  542. #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80
  543. #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40
  544. #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
  545. /* TX general registers */
  546. #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2))
  547. #define MVPP22_TX_FIFO_SIZE_MASK 0xf
  548. /* TX FIFO constants */
  549. #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa
  550. #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3
  551. /* RX buffer constants */
  552. #define MVPP2_SKB_SHINFO_SIZE \
  553. 0
  554. #define MVPP2_RX_PKT_SIZE(mtu) \
  555. ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
  556. ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
  557. #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
  558. #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
  559. #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
  560. ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
  561. #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
  562. /* IPv6 max L3 address size */
  563. #define MVPP2_MAX_L3_ADDR_SIZE 16
  564. /* Port flags */
  565. #define MVPP2_F_LOOPBACK BIT(0)
  566. /* Marvell tag types */
  567. enum mvpp2_tag_type {
  568. MVPP2_TAG_TYPE_NONE = 0,
  569. MVPP2_TAG_TYPE_MH = 1,
  570. MVPP2_TAG_TYPE_DSA = 2,
  571. MVPP2_TAG_TYPE_EDSA = 3,
  572. MVPP2_TAG_TYPE_VLAN = 4,
  573. MVPP2_TAG_TYPE_LAST = 5
  574. };
  575. /* Parser constants */
  576. #define MVPP2_PRS_TCAM_SRAM_SIZE 256
  577. #define MVPP2_PRS_TCAM_WORDS 6
  578. #define MVPP2_PRS_SRAM_WORDS 4
  579. #define MVPP2_PRS_FLOW_ID_SIZE 64
  580. #define MVPP2_PRS_FLOW_ID_MASK 0x3f
  581. #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
  582. #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
  583. #define MVPP2_PRS_IPV4_HEAD 0x40
  584. #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
  585. #define MVPP2_PRS_IPV4_MC 0xe0
  586. #define MVPP2_PRS_IPV4_MC_MASK 0xf0
  587. #define MVPP2_PRS_IPV4_BC_MASK 0xff
  588. #define MVPP2_PRS_IPV4_IHL 0x5
  589. #define MVPP2_PRS_IPV4_IHL_MASK 0xf
  590. #define MVPP2_PRS_IPV6_MC 0xff
  591. #define MVPP2_PRS_IPV6_MC_MASK 0xff
  592. #define MVPP2_PRS_IPV6_HOP_MASK 0xff
  593. #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
  594. #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
  595. #define MVPP2_PRS_DBL_VLANS_MAX 100
  596. /* Tcam structure:
  597. * - lookup ID - 4 bits
  598. * - port ID - 1 byte
  599. * - additional information - 1 byte
  600. * - header data - 8 bytes
  601. * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
  602. */
  603. #define MVPP2_PRS_AI_BITS 8
  604. #define MVPP2_PRS_PORT_MASK 0xff
  605. #define MVPP2_PRS_LU_MASK 0xf
  606. #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
  607. (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
  608. #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
  609. (((offs) * 2) - ((offs) % 2) + 2)
  610. #define MVPP2_PRS_TCAM_AI_BYTE 16
  611. #define MVPP2_PRS_TCAM_PORT_BYTE 17
  612. #define MVPP2_PRS_TCAM_LU_BYTE 20
  613. #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
  614. #define MVPP2_PRS_TCAM_INV_WORD 5
  615. /* Tcam entries ID */
  616. #define MVPP2_PE_DROP_ALL 0
  617. #define MVPP2_PE_FIRST_FREE_TID 1
  618. #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
  619. #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
  620. #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
  621. #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
  622. #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
  623. #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
  624. #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
  625. #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
  626. #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
  627. #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
  628. #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
  629. #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
  630. #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
  631. #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
  632. #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
  633. #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
  634. #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
  635. #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
  636. #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
  637. #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
  638. #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
  639. #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
  640. #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
  641. #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
  642. #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  643. /* Sram structure
  644. * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
  645. */
  646. #define MVPP2_PRS_SRAM_RI_OFFS 0
  647. #define MVPP2_PRS_SRAM_RI_WORD 0
  648. #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
  649. #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
  650. #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
  651. #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
  652. #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
  653. #define MVPP2_PRS_SRAM_UDF_OFFS 73
  654. #define MVPP2_PRS_SRAM_UDF_BITS 8
  655. #define MVPP2_PRS_SRAM_UDF_MASK 0xff
  656. #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
  657. #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
  658. #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
  659. #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
  660. #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
  661. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
  662. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
  663. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
  664. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
  665. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
  666. #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
  667. #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
  668. #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
  669. #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
  670. #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
  671. #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
  672. #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
  673. #define MVPP2_PRS_SRAM_AI_OFFS 90
  674. #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
  675. #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
  676. #define MVPP2_PRS_SRAM_AI_MASK 0xff
  677. #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
  678. #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
  679. #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
  680. #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
  681. /* Sram result info bits assignment */
  682. #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
  683. #define MVPP2_PRS_RI_DSA_MASK 0x2
  684. #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
  685. #define MVPP2_PRS_RI_VLAN_NONE 0x0
  686. #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
  687. #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
  688. #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
  689. #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
  690. #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
  691. #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
  692. #define MVPP2_PRS_RI_L2_UCAST 0x0
  693. #define MVPP2_PRS_RI_L2_MCAST BIT(9)
  694. #define MVPP2_PRS_RI_L2_BCAST BIT(10)
  695. #define MVPP2_PRS_RI_PPPOE_MASK 0x800
  696. #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
  697. #define MVPP2_PRS_RI_L3_UN 0x0
  698. #define MVPP2_PRS_RI_L3_IP4 BIT(12)
  699. #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
  700. #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
  701. #define MVPP2_PRS_RI_L3_IP6 BIT(14)
  702. #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
  703. #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
  704. #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
  705. #define MVPP2_PRS_RI_L3_UCAST 0x0
  706. #define MVPP2_PRS_RI_L3_MCAST BIT(15)
  707. #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
  708. #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
  709. #define MVPP2_PRS_RI_UDF3_MASK 0x300000
  710. #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
  711. #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
  712. #define MVPP2_PRS_RI_L4_TCP BIT(22)
  713. #define MVPP2_PRS_RI_L4_UDP BIT(23)
  714. #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
  715. #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
  716. #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
  717. #define MVPP2_PRS_RI_DROP_MASK 0x80000000
  718. /* Sram additional info bits assignment */
  719. #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
  720. #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
  721. #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
  722. #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
  723. #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
  724. #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
  725. #define MVPP2_PRS_SINGLE_VLAN_AI 0
  726. #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
  727. /* DSA/EDSA type */
  728. #define MVPP2_PRS_TAGGED true
  729. #define MVPP2_PRS_UNTAGGED false
  730. #define MVPP2_PRS_EDSA true
  731. #define MVPP2_PRS_DSA false
  732. /* MAC entries, shadow udf */
  733. enum mvpp2_prs_udf {
  734. MVPP2_PRS_UDF_MAC_DEF,
  735. MVPP2_PRS_UDF_MAC_RANGE,
  736. MVPP2_PRS_UDF_L2_DEF,
  737. MVPP2_PRS_UDF_L2_DEF_COPY,
  738. MVPP2_PRS_UDF_L2_USER,
  739. };
  740. /* Lookup ID */
  741. enum mvpp2_prs_lookup {
  742. MVPP2_PRS_LU_MH,
  743. MVPP2_PRS_LU_MAC,
  744. MVPP2_PRS_LU_DSA,
  745. MVPP2_PRS_LU_VLAN,
  746. MVPP2_PRS_LU_L2,
  747. MVPP2_PRS_LU_PPPOE,
  748. MVPP2_PRS_LU_IP4,
  749. MVPP2_PRS_LU_IP6,
  750. MVPP2_PRS_LU_FLOWS,
  751. MVPP2_PRS_LU_LAST,
  752. };
  753. /* L3 cast enum */
  754. enum mvpp2_prs_l3_cast {
  755. MVPP2_PRS_L3_UNI_CAST,
  756. MVPP2_PRS_L3_MULTI_CAST,
  757. MVPP2_PRS_L3_BROAD_CAST
  758. };
  759. /* Classifier constants */
  760. #define MVPP2_CLS_FLOWS_TBL_SIZE 512
  761. #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
  762. #define MVPP2_CLS_LKP_TBL_SIZE 64
  763. /* BM constants */
  764. #define MVPP2_BM_POOLS_NUM 1
  765. #define MVPP2_BM_LONG_BUF_NUM 16
  766. #define MVPP2_BM_SHORT_BUF_NUM 16
  767. #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
  768. #define MVPP2_BM_POOL_PTR_ALIGN 128
  769. #define MVPP2_BM_SWF_LONG_POOL(port) 0
  770. /* BM cookie (32 bits) definition */
  771. #define MVPP2_BM_COOKIE_POOL_OFFS 8
  772. #define MVPP2_BM_COOKIE_CPU_OFFS 24
  773. /* BM short pool packet size
  774. * These value assure that for SWF the total number
  775. * of bytes allocated for each buffer will be 512
  776. */
  777. #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
  778. enum mvpp2_bm_type {
  779. MVPP2_BM_FREE,
  780. MVPP2_BM_SWF_LONG,
  781. MVPP2_BM_SWF_SHORT
  782. };
  783. /* Definitions */
  784. /* Shared Packet Processor resources */
  785. struct mvpp2 {
  786. /* Shared registers' base addresses */
  787. void __iomem *base;
  788. void __iomem *lms_base;
  789. void __iomem *iface_base;
  790. void __iomem *mpcs_base;
  791. void __iomem *xpcs_base;
  792. void __iomem *rfu1_base;
  793. u32 netc_config;
  794. /* List of pointers to port structures */
  795. struct mvpp2_port **port_list;
  796. /* Aggregated TXQs */
  797. struct mvpp2_tx_queue *aggr_txqs;
  798. /* BM pools */
  799. struct mvpp2_bm_pool *bm_pools;
  800. /* PRS shadow table */
  801. struct mvpp2_prs_shadow *prs_shadow;
  802. /* PRS auxiliary table for double vlan entries control */
  803. bool *prs_double_vlans;
  804. /* Tclk value */
  805. u32 tclk;
  806. /* HW version */
  807. enum { MVPP21, MVPP22 } hw_version;
  808. /* Maximum number of RXQs per port */
  809. unsigned int max_port_rxqs;
  810. int probe_done;
  811. u8 num_ports;
  812. };
  813. struct mvpp2_pcpu_stats {
  814. u64 rx_packets;
  815. u64 rx_bytes;
  816. u64 tx_packets;
  817. u64 tx_bytes;
  818. };
  819. struct mvpp2_port {
  820. u8 id;
  821. /* Index of the port from the "group of ports" complex point
  822. * of view
  823. */
  824. int gop_id;
  825. int irq;
  826. struct mvpp2 *priv;
  827. /* Per-port registers' base address */
  828. void __iomem *base;
  829. struct mvpp2_rx_queue **rxqs;
  830. struct mvpp2_tx_queue **txqs;
  831. int pkt_size;
  832. u32 pending_cause_rx;
  833. /* Per-CPU port control */
  834. struct mvpp2_port_pcpu __percpu *pcpu;
  835. /* Flags */
  836. unsigned long flags;
  837. u16 tx_ring_size;
  838. u16 rx_ring_size;
  839. struct mvpp2_pcpu_stats __percpu *stats;
  840. struct phy_device *phy_dev;
  841. phy_interface_t phy_interface;
  842. int phyaddr;
  843. struct udevice *mdio_dev;
  844. struct mii_dev *bus;
  845. #if CONFIG_IS_ENABLED(DM_GPIO)
  846. struct gpio_desc phy_reset_gpio;
  847. struct gpio_desc phy_tx_disable_gpio;
  848. #endif
  849. int init;
  850. unsigned int link;
  851. unsigned int duplex;
  852. unsigned int speed;
  853. unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */
  854. struct mvpp2_bm_pool *pool_long;
  855. struct mvpp2_bm_pool *pool_short;
  856. /* Index of first port's physical RXQ */
  857. u8 first_rxq;
  858. u8 dev_addr[ETH_ALEN];
  859. };
  860. /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
  861. * layout of the transmit and reception DMA descriptors, and their
  862. * layout is therefore defined by the hardware design
  863. */
  864. #define MVPP2_TXD_L3_OFF_SHIFT 0
  865. #define MVPP2_TXD_IP_HLEN_SHIFT 8
  866. #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
  867. #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
  868. #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
  869. #define MVPP2_TXD_PADDING_DISABLE BIT(23)
  870. #define MVPP2_TXD_L4_UDP BIT(24)
  871. #define MVPP2_TXD_L3_IP6 BIT(26)
  872. #define MVPP2_TXD_L_DESC BIT(28)
  873. #define MVPP2_TXD_F_DESC BIT(29)
  874. #define MVPP2_RXD_ERR_SUMMARY BIT(15)
  875. #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
  876. #define MVPP2_RXD_ERR_CRC 0x0
  877. #define MVPP2_RXD_ERR_OVERRUN BIT(13)
  878. #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
  879. #define MVPP2_RXD_BM_POOL_ID_OFFS 16
  880. #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
  881. #define MVPP2_RXD_HWF_SYNC BIT(21)
  882. #define MVPP2_RXD_L4_CSUM_OK BIT(22)
  883. #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
  884. #define MVPP2_RXD_L4_TCP BIT(25)
  885. #define MVPP2_RXD_L4_UDP BIT(26)
  886. #define MVPP2_RXD_L3_IP4 BIT(28)
  887. #define MVPP2_RXD_L3_IP6 BIT(30)
  888. #define MVPP2_RXD_BUF_HDR BIT(31)
  889. /* HW TX descriptor for PPv2.1 */
  890. struct mvpp21_tx_desc {
  891. u32 command; /* Options used by HW for packet transmitting.*/
  892. u8 packet_offset; /* the offset from the buffer beginning */
  893. u8 phys_txq; /* destination queue ID */
  894. u16 data_size; /* data size of transmitted packet in bytes */
  895. u32 buf_dma_addr; /* physical addr of transmitted buffer */
  896. u32 buf_cookie; /* cookie for access to TX buffer in tx path */
  897. u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
  898. u32 reserved2; /* reserved (for future use) */
  899. };
  900. /* HW RX descriptor for PPv2.1 */
  901. struct mvpp21_rx_desc {
  902. u32 status; /* info about received packet */
  903. u16 reserved1; /* parser_info (for future use, PnC) */
  904. u16 data_size; /* size of received packet in bytes */
  905. u32 buf_dma_addr; /* physical address of the buffer */
  906. u32 buf_cookie; /* cookie for access to RX buffer in rx path */
  907. u16 reserved2; /* gem_port_id (for future use, PON) */
  908. u16 reserved3; /* csum_l4 (for future use, PnC) */
  909. u8 reserved4; /* bm_qset (for future use, BM) */
  910. u8 reserved5;
  911. u16 reserved6; /* classify_info (for future use, PnC) */
  912. u32 reserved7; /* flow_id (for future use, PnC) */
  913. u32 reserved8;
  914. };
  915. /* HW TX descriptor for PPv2.2 */
  916. struct mvpp22_tx_desc {
  917. u32 command;
  918. u8 packet_offset;
  919. u8 phys_txq;
  920. u16 data_size;
  921. u64 reserved1;
  922. u64 buf_dma_addr_ptp;
  923. u64 buf_cookie_misc;
  924. };
  925. /* HW RX descriptor for PPv2.2 */
  926. struct mvpp22_rx_desc {
  927. u32 status;
  928. u16 reserved1;
  929. u16 data_size;
  930. u32 reserved2;
  931. u32 reserved3;
  932. u64 buf_dma_addr_key_hash;
  933. u64 buf_cookie_misc;
  934. };
  935. /* Opaque type used by the driver to manipulate the HW TX and RX
  936. * descriptors
  937. */
  938. struct mvpp2_tx_desc {
  939. union {
  940. struct mvpp21_tx_desc pp21;
  941. struct mvpp22_tx_desc pp22;
  942. };
  943. };
  944. struct mvpp2_rx_desc {
  945. union {
  946. struct mvpp21_rx_desc pp21;
  947. struct mvpp22_rx_desc pp22;
  948. };
  949. };
  950. /* Per-CPU Tx queue control */
  951. struct mvpp2_txq_pcpu {
  952. int cpu;
  953. /* Number of Tx DMA descriptors in the descriptor ring */
  954. int size;
  955. /* Number of currently used Tx DMA descriptor in the
  956. * descriptor ring
  957. */
  958. int count;
  959. /* Number of Tx DMA descriptors reserved for each CPU */
  960. int reserved_num;
  961. /* Index of last TX DMA descriptor that was inserted */
  962. int txq_put_index;
  963. /* Index of the TX DMA descriptor to be cleaned up */
  964. int txq_get_index;
  965. };
  966. struct mvpp2_tx_queue {
  967. /* Physical number of this Tx queue */
  968. u8 id;
  969. /* Logical number of this Tx queue */
  970. u8 log_id;
  971. /* Number of Tx DMA descriptors in the descriptor ring */
  972. int size;
  973. /* Number of currently used Tx DMA descriptor in the descriptor ring */
  974. int count;
  975. /* Per-CPU control of physical Tx queues */
  976. struct mvpp2_txq_pcpu __percpu *pcpu;
  977. u32 done_pkts_coal;
  978. /* Virtual address of thex Tx DMA descriptors array */
  979. struct mvpp2_tx_desc *descs;
  980. /* DMA address of the Tx DMA descriptors array */
  981. dma_addr_t descs_dma;
  982. /* Index of the last Tx DMA descriptor */
  983. int last_desc;
  984. /* Index of the next Tx DMA descriptor to process */
  985. int next_desc_to_proc;
  986. };
  987. struct mvpp2_rx_queue {
  988. /* RX queue number, in the range 0-31 for physical RXQs */
  989. u8 id;
  990. /* Num of rx descriptors in the rx descriptor ring */
  991. int size;
  992. u32 pkts_coal;
  993. u32 time_coal;
  994. /* Virtual address of the RX DMA descriptors array */
  995. struct mvpp2_rx_desc *descs;
  996. /* DMA address of the RX DMA descriptors array */
  997. dma_addr_t descs_dma;
  998. /* Index of the last RX DMA descriptor */
  999. int last_desc;
  1000. /* Index of the next RX DMA descriptor to process */
  1001. int next_desc_to_proc;
  1002. /* ID of port to which physical RXQ is mapped */
  1003. int port;
  1004. /* Port's logic RXQ number to which physical RXQ is mapped */
  1005. int logic_rxq;
  1006. };
  1007. union mvpp2_prs_tcam_entry {
  1008. u32 word[MVPP2_PRS_TCAM_WORDS];
  1009. u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
  1010. };
  1011. union mvpp2_prs_sram_entry {
  1012. u32 word[MVPP2_PRS_SRAM_WORDS];
  1013. u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
  1014. };
  1015. struct mvpp2_prs_entry {
  1016. u32 index;
  1017. union mvpp2_prs_tcam_entry tcam;
  1018. union mvpp2_prs_sram_entry sram;
  1019. };
  1020. struct mvpp2_prs_shadow {
  1021. bool valid;
  1022. bool finish;
  1023. /* Lookup ID */
  1024. int lu;
  1025. /* User defined offset */
  1026. int udf;
  1027. /* Result info */
  1028. u32 ri;
  1029. u32 ri_mask;
  1030. };
  1031. struct mvpp2_cls_flow_entry {
  1032. u32 index;
  1033. u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
  1034. };
  1035. struct mvpp2_cls_lookup_entry {
  1036. u32 lkpid;
  1037. u32 way;
  1038. u32 data;
  1039. };
  1040. struct mvpp2_bm_pool {
  1041. /* Pool number in the range 0-7 */
  1042. int id;
  1043. enum mvpp2_bm_type type;
  1044. /* Buffer Pointers Pool External (BPPE) size */
  1045. int size;
  1046. /* Number of buffers for this pool */
  1047. int buf_num;
  1048. /* Pool buffer size */
  1049. int buf_size;
  1050. /* Packet size */
  1051. int pkt_size;
  1052. /* BPPE virtual base address */
  1053. unsigned long *virt_addr;
  1054. /* BPPE DMA base address */
  1055. dma_addr_t dma_addr;
  1056. /* Ports using BM pool */
  1057. u32 port_map;
  1058. };
  1059. /* Static declaractions */
  1060. /* Number of RXQs used by single port */
  1061. static int rxq_number = MVPP2_DEFAULT_RXQ;
  1062. /* Number of TXQs used by single port */
  1063. static int txq_number = MVPP2_DEFAULT_TXQ;
  1064. static int base_id;
  1065. #define MVPP2_DRIVER_NAME "mvpp2"
  1066. #define MVPP2_DRIVER_VERSION "1.0"
  1067. /*
  1068. * U-Boot internal data, mostly uncached buffers for descriptors and data
  1069. */
  1070. struct buffer_location {
  1071. struct mvpp2_tx_desc *aggr_tx_descs;
  1072. struct mvpp2_tx_desc *tx_descs;
  1073. struct mvpp2_rx_desc *rx_descs;
  1074. unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
  1075. unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
  1076. int first_rxq;
  1077. };
  1078. /*
  1079. * All 4 interfaces use the same global buffer, since only one interface
  1080. * can be enabled at once
  1081. */
  1082. static struct buffer_location buffer_loc;
  1083. /*
  1084. * Page table entries are set to 1MB, or multiples of 1MB
  1085. * (not < 1MB). driver uses less bd's so use 1MB bdspace.
  1086. */
  1087. #define BD_SPACE (1 << 20)
  1088. /* Utility/helper methods */
  1089. static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
  1090. {
  1091. writel(data, priv->base + offset);
  1092. }
  1093. static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
  1094. {
  1095. return readl(priv->base + offset);
  1096. }
  1097. static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
  1098. struct mvpp2_tx_desc *tx_desc,
  1099. dma_addr_t dma_addr)
  1100. {
  1101. if (port->priv->hw_version == MVPP21) {
  1102. tx_desc->pp21.buf_dma_addr = dma_addr;
  1103. } else {
  1104. u64 val = (u64)dma_addr;
  1105. tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
  1106. tx_desc->pp22.buf_dma_addr_ptp |= val;
  1107. }
  1108. }
  1109. static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
  1110. struct mvpp2_tx_desc *tx_desc,
  1111. size_t size)
  1112. {
  1113. if (port->priv->hw_version == MVPP21)
  1114. tx_desc->pp21.data_size = size;
  1115. else
  1116. tx_desc->pp22.data_size = size;
  1117. }
  1118. static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
  1119. struct mvpp2_tx_desc *tx_desc,
  1120. unsigned int txq)
  1121. {
  1122. if (port->priv->hw_version == MVPP21)
  1123. tx_desc->pp21.phys_txq = txq;
  1124. else
  1125. tx_desc->pp22.phys_txq = txq;
  1126. }
  1127. static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
  1128. struct mvpp2_tx_desc *tx_desc,
  1129. unsigned int command)
  1130. {
  1131. if (port->priv->hw_version == MVPP21)
  1132. tx_desc->pp21.command = command;
  1133. else
  1134. tx_desc->pp22.command = command;
  1135. }
  1136. static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
  1137. struct mvpp2_tx_desc *tx_desc,
  1138. unsigned int offset)
  1139. {
  1140. if (port->priv->hw_version == MVPP21)
  1141. tx_desc->pp21.packet_offset = offset;
  1142. else
  1143. tx_desc->pp22.packet_offset = offset;
  1144. }
  1145. static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
  1146. struct mvpp2_rx_desc *rx_desc)
  1147. {
  1148. if (port->priv->hw_version == MVPP21)
  1149. return rx_desc->pp21.buf_dma_addr;
  1150. else
  1151. return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
  1152. }
  1153. static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
  1154. struct mvpp2_rx_desc *rx_desc)
  1155. {
  1156. if (port->priv->hw_version == MVPP21)
  1157. return rx_desc->pp21.buf_cookie;
  1158. else
  1159. return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
  1160. }
  1161. static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
  1162. struct mvpp2_rx_desc *rx_desc)
  1163. {
  1164. if (port->priv->hw_version == MVPP21)
  1165. return rx_desc->pp21.data_size;
  1166. else
  1167. return rx_desc->pp22.data_size;
  1168. }
  1169. static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
  1170. struct mvpp2_rx_desc *rx_desc)
  1171. {
  1172. if (port->priv->hw_version == MVPP21)
  1173. return rx_desc->pp21.status;
  1174. else
  1175. return rx_desc->pp22.status;
  1176. }
  1177. static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
  1178. {
  1179. txq_pcpu->txq_get_index++;
  1180. if (txq_pcpu->txq_get_index == txq_pcpu->size)
  1181. txq_pcpu->txq_get_index = 0;
  1182. }
  1183. /* Get number of physical egress port */
  1184. static inline int mvpp2_egress_port(struct mvpp2_port *port)
  1185. {
  1186. return MVPP2_MAX_TCONT + port->id;
  1187. }
  1188. /* Get number of physical TXQ */
  1189. static inline int mvpp2_txq_phys(int port, int txq)
  1190. {
  1191. return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
  1192. }
  1193. /* Parser configuration routines */
  1194. /* Update parser tcam and sram hw entries */
  1195. static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  1196. {
  1197. int i;
  1198. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  1199. return -EINVAL;
  1200. /* Clear entry invalidation bit */
  1201. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
  1202. /* Write tcam index - indirect access */
  1203. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  1204. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  1205. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
  1206. /* Write sram index - indirect access */
  1207. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  1208. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  1209. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
  1210. return 0;
  1211. }
  1212. /* Read tcam entry from hw */
  1213. static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  1214. {
  1215. int i;
  1216. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  1217. return -EINVAL;
  1218. /* Write tcam index - indirect access */
  1219. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  1220. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
  1221. MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
  1222. if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
  1223. return MVPP2_PRS_TCAM_ENTRY_INVALID;
  1224. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  1225. pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
  1226. /* Write sram index - indirect access */
  1227. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  1228. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  1229. pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
  1230. return 0;
  1231. }
  1232. /* Invalidate tcam hw entry */
  1233. static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
  1234. {
  1235. /* Write index - indirect access */
  1236. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  1237. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
  1238. MVPP2_PRS_TCAM_INV_MASK);
  1239. }
  1240. /* Enable shadow table entry and set its lookup ID */
  1241. static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
  1242. {
  1243. priv->prs_shadow[index].valid = true;
  1244. priv->prs_shadow[index].lu = lu;
  1245. }
  1246. /* Update ri fields in shadow table entry */
  1247. static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
  1248. unsigned int ri, unsigned int ri_mask)
  1249. {
  1250. priv->prs_shadow[index].ri_mask = ri_mask;
  1251. priv->prs_shadow[index].ri = ri;
  1252. }
  1253. /* Update lookup field in tcam sw entry */
  1254. static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
  1255. {
  1256. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
  1257. pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
  1258. pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
  1259. }
  1260. /* Update mask for single port in tcam sw entry */
  1261. static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
  1262. unsigned int port, bool add)
  1263. {
  1264. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  1265. if (add)
  1266. pe->tcam.byte[enable_off] &= ~(1 << port);
  1267. else
  1268. pe->tcam.byte[enable_off] |= 1 << port;
  1269. }
  1270. /* Update port map in tcam sw entry */
  1271. static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
  1272. unsigned int ports)
  1273. {
  1274. unsigned char port_mask = MVPP2_PRS_PORT_MASK;
  1275. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  1276. pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
  1277. pe->tcam.byte[enable_off] &= ~port_mask;
  1278. pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
  1279. }
  1280. /* Obtain port map from tcam sw entry */
  1281. static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
  1282. {
  1283. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  1284. return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
  1285. }
  1286. /* Set byte of data and its enable bits in tcam sw entry */
  1287. static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
  1288. unsigned int offs, unsigned char byte,
  1289. unsigned char enable)
  1290. {
  1291. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
  1292. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
  1293. }
  1294. /* Get byte of data and its enable bits from tcam sw entry */
  1295. static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
  1296. unsigned int offs, unsigned char *byte,
  1297. unsigned char *enable)
  1298. {
  1299. *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
  1300. *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
  1301. }
  1302. /* Set ethertype in tcam sw entry */
  1303. static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
  1304. unsigned short ethertype)
  1305. {
  1306. mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
  1307. mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
  1308. }
  1309. /* Set bits in sram sw entry */
  1310. static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
  1311. int val)
  1312. {
  1313. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
  1314. }
  1315. /* Clear bits in sram sw entry */
  1316. static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
  1317. int val)
  1318. {
  1319. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
  1320. }
  1321. /* Update ri bits in sram sw entry */
  1322. static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
  1323. unsigned int bits, unsigned int mask)
  1324. {
  1325. unsigned int i;
  1326. for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
  1327. int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
  1328. if (!(mask & BIT(i)))
  1329. continue;
  1330. if (bits & BIT(i))
  1331. mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
  1332. else
  1333. mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
  1334. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
  1335. }
  1336. }
  1337. /* Update ai bits in sram sw entry */
  1338. static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
  1339. unsigned int bits, unsigned int mask)
  1340. {
  1341. unsigned int i;
  1342. int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
  1343. for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
  1344. if (!(mask & BIT(i)))
  1345. continue;
  1346. if (bits & BIT(i))
  1347. mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
  1348. else
  1349. mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
  1350. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
  1351. }
  1352. }
  1353. /* Read ai bits from sram sw entry */
  1354. static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
  1355. {
  1356. u8 bits;
  1357. int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
  1358. int ai_en_off = ai_off + 1;
  1359. int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
  1360. bits = (pe->sram.byte[ai_off] >> ai_shift) |
  1361. (pe->sram.byte[ai_en_off] << (8 - ai_shift));
  1362. return bits;
  1363. }
  1364. /* In sram sw entry set lookup ID field of the tcam key to be used in the next
  1365. * lookup interation
  1366. */
  1367. static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
  1368. unsigned int lu)
  1369. {
  1370. int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
  1371. mvpp2_prs_sram_bits_clear(pe, sram_next_off,
  1372. MVPP2_PRS_SRAM_NEXT_LU_MASK);
  1373. mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
  1374. }
  1375. /* In the sram sw entry set sign and value of the next lookup offset
  1376. * and the offset value generated to the classifier
  1377. */
  1378. static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
  1379. unsigned int op)
  1380. {
  1381. /* Set sign */
  1382. if (shift < 0) {
  1383. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  1384. shift = 0 - shift;
  1385. } else {
  1386. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  1387. }
  1388. /* Set value */
  1389. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
  1390. (unsigned char)shift;
  1391. /* Reset and set operation */
  1392. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
  1393. MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
  1394. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
  1395. /* Set base offset as current */
  1396. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  1397. }
  1398. /* In the sram sw entry set sign and value of the user defined offset
  1399. * generated to the classifier
  1400. */
  1401. static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
  1402. unsigned int type, int offset,
  1403. unsigned int op)
  1404. {
  1405. /* Set sign */
  1406. if (offset < 0) {
  1407. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  1408. offset = 0 - offset;
  1409. } else {
  1410. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  1411. }
  1412. /* Set value */
  1413. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
  1414. MVPP2_PRS_SRAM_UDF_MASK);
  1415. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
  1416. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  1417. MVPP2_PRS_SRAM_UDF_BITS)] &=
  1418. ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  1419. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  1420. MVPP2_PRS_SRAM_UDF_BITS)] |=
  1421. (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  1422. /* Set offset type */
  1423. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
  1424. MVPP2_PRS_SRAM_UDF_TYPE_MASK);
  1425. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
  1426. /* Set offset operation */
  1427. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
  1428. MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
  1429. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
  1430. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  1431. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
  1432. ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
  1433. (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  1434. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  1435. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
  1436. (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  1437. /* Set base offset as current */
  1438. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  1439. }
  1440. /* Find parser flow entry */
  1441. static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
  1442. {
  1443. struct mvpp2_prs_entry *pe;
  1444. int tid;
  1445. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1446. if (!pe)
  1447. return NULL;
  1448. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
  1449. /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
  1450. for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
  1451. u8 bits;
  1452. if (!priv->prs_shadow[tid].valid ||
  1453. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
  1454. continue;
  1455. pe->index = tid;
  1456. mvpp2_prs_hw_read(priv, pe);
  1457. bits = mvpp2_prs_sram_ai_get(pe);
  1458. /* Sram store classification lookup ID in AI bits [5:0] */
  1459. if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
  1460. return pe;
  1461. }
  1462. kfree(pe);
  1463. return NULL;
  1464. }
  1465. /* Return first free tcam index, seeking from start to end */
  1466. static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
  1467. unsigned char end)
  1468. {
  1469. int tid;
  1470. if (start > end)
  1471. swap(start, end);
  1472. if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
  1473. end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
  1474. for (tid = start; tid <= end; tid++) {
  1475. if (!priv->prs_shadow[tid].valid)
  1476. return tid;
  1477. }
  1478. return -EINVAL;
  1479. }
  1480. /* Enable/disable dropping all mac da's */
  1481. static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
  1482. {
  1483. struct mvpp2_prs_entry pe;
  1484. if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
  1485. /* Entry exist - update port only */
  1486. pe.index = MVPP2_PE_DROP_ALL;
  1487. mvpp2_prs_hw_read(priv, &pe);
  1488. } else {
  1489. /* Entry doesn't exist - create new */
  1490. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1491. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1492. pe.index = MVPP2_PE_DROP_ALL;
  1493. /* Non-promiscuous mode for all ports - DROP unknown packets */
  1494. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1495. MVPP2_PRS_RI_DROP_MASK);
  1496. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1497. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1498. /* Update shadow table */
  1499. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1500. /* Mask all ports */
  1501. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1502. }
  1503. /* Update port mask */
  1504. mvpp2_prs_tcam_port_set(&pe, port, add);
  1505. mvpp2_prs_hw_write(priv, &pe);
  1506. }
  1507. /* Set port to promiscuous mode */
  1508. static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
  1509. {
  1510. struct mvpp2_prs_entry pe;
  1511. /* Promiscuous mode - Accept unknown packets */
  1512. if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
  1513. /* Entry exist - update port only */
  1514. pe.index = MVPP2_PE_MAC_PROMISCUOUS;
  1515. mvpp2_prs_hw_read(priv, &pe);
  1516. } else {
  1517. /* Entry doesn't exist - create new */
  1518. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1519. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1520. pe.index = MVPP2_PE_MAC_PROMISCUOUS;
  1521. /* Continue - set next lookup */
  1522. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1523. /* Set result info bits */
  1524. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
  1525. MVPP2_PRS_RI_L2_CAST_MASK);
  1526. /* Shift to ethertype */
  1527. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1528. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1529. /* Mask all ports */
  1530. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1531. /* Update shadow table */
  1532. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1533. }
  1534. /* Update port mask */
  1535. mvpp2_prs_tcam_port_set(&pe, port, add);
  1536. mvpp2_prs_hw_write(priv, &pe);
  1537. }
  1538. /* Accept multicast */
  1539. static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
  1540. bool add)
  1541. {
  1542. struct mvpp2_prs_entry pe;
  1543. unsigned char da_mc;
  1544. /* Ethernet multicast address first byte is
  1545. * 0x01 for IPv4 and 0x33 for IPv6
  1546. */
  1547. da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
  1548. if (priv->prs_shadow[index].valid) {
  1549. /* Entry exist - update port only */
  1550. pe.index = index;
  1551. mvpp2_prs_hw_read(priv, &pe);
  1552. } else {
  1553. /* Entry doesn't exist - create new */
  1554. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1555. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1556. pe.index = index;
  1557. /* Continue - set next lookup */
  1558. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1559. /* Set result info bits */
  1560. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
  1561. MVPP2_PRS_RI_L2_CAST_MASK);
  1562. /* Update tcam entry data first byte */
  1563. mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
  1564. /* Shift to ethertype */
  1565. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1566. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1567. /* Mask all ports */
  1568. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1569. /* Update shadow table */
  1570. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1571. }
  1572. /* Update port mask */
  1573. mvpp2_prs_tcam_port_set(&pe, port, add);
  1574. mvpp2_prs_hw_write(priv, &pe);
  1575. }
  1576. /* Parser per-port initialization */
  1577. static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
  1578. int lu_max, int offset)
  1579. {
  1580. u32 val;
  1581. /* Set lookup ID */
  1582. val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
  1583. val &= ~MVPP2_PRS_PORT_LU_MASK(port);
  1584. val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
  1585. mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
  1586. /* Set maximum number of loops for packet received from port */
  1587. val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
  1588. val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
  1589. val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
  1590. mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
  1591. /* Set initial offset for packet header extraction for the first
  1592. * searching loop
  1593. */
  1594. val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
  1595. val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
  1596. val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
  1597. mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
  1598. }
  1599. /* Default flow entries initialization for all ports */
  1600. static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
  1601. {
  1602. struct mvpp2_prs_entry pe;
  1603. int port;
  1604. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  1605. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1606. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1607. pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
  1608. /* Mask all ports */
  1609. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1610. /* Set flow ID*/
  1611. mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
  1612. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  1613. /* Update shadow table and hw entry */
  1614. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
  1615. mvpp2_prs_hw_write(priv, &pe);
  1616. }
  1617. }
  1618. /* Set default entry for Marvell Header field */
  1619. static void mvpp2_prs_mh_init(struct mvpp2 *priv)
  1620. {
  1621. struct mvpp2_prs_entry pe;
  1622. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1623. pe.index = MVPP2_PE_MH_DEFAULT;
  1624. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
  1625. mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
  1626. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1627. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1628. /* Unmask all ports */
  1629. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1630. /* Update shadow table and hw entry */
  1631. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
  1632. mvpp2_prs_hw_write(priv, &pe);
  1633. }
  1634. /* Set default entires (place holder) for promiscuous, non-promiscuous and
  1635. * multicast MAC addresses
  1636. */
  1637. static void mvpp2_prs_mac_init(struct mvpp2 *priv)
  1638. {
  1639. struct mvpp2_prs_entry pe;
  1640. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1641. /* Non-promiscuous mode for all ports - DROP unknown packets */
  1642. pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
  1643. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1644. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1645. MVPP2_PRS_RI_DROP_MASK);
  1646. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1647. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1648. /* Unmask all ports */
  1649. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1650. /* Update shadow table and hw entry */
  1651. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1652. mvpp2_prs_hw_write(priv, &pe);
  1653. /* place holders only - no ports */
  1654. mvpp2_prs_mac_drop_all_set(priv, 0, false);
  1655. mvpp2_prs_mac_promisc_set(priv, 0, false);
  1656. mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
  1657. mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
  1658. }
  1659. /* Match basic ethertypes */
  1660. static int mvpp2_prs_etype_init(struct mvpp2 *priv)
  1661. {
  1662. struct mvpp2_prs_entry pe;
  1663. int tid;
  1664. /* Ethertype: PPPoE */
  1665. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1666. MVPP2_PE_LAST_FREE_TID);
  1667. if (tid < 0)
  1668. return tid;
  1669. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1670. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1671. pe.index = tid;
  1672. mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
  1673. mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
  1674. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1675. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1676. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
  1677. MVPP2_PRS_RI_PPPOE_MASK);
  1678. /* Update shadow table and hw entry */
  1679. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1680. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1681. priv->prs_shadow[pe.index].finish = false;
  1682. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
  1683. MVPP2_PRS_RI_PPPOE_MASK);
  1684. mvpp2_prs_hw_write(priv, &pe);
  1685. /* Ethertype: ARP */
  1686. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1687. MVPP2_PE_LAST_FREE_TID);
  1688. if (tid < 0)
  1689. return tid;
  1690. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1691. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1692. pe.index = tid;
  1693. mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
  1694. /* Generate flow in the next iteration*/
  1695. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1696. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1697. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
  1698. MVPP2_PRS_RI_L3_PROTO_MASK);
  1699. /* Set L3 offset */
  1700. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1701. MVPP2_ETH_TYPE_LEN,
  1702. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1703. /* Update shadow table and hw entry */
  1704. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1705. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1706. priv->prs_shadow[pe.index].finish = true;
  1707. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
  1708. MVPP2_PRS_RI_L3_PROTO_MASK);
  1709. mvpp2_prs_hw_write(priv, &pe);
  1710. /* Ethertype: LBTD */
  1711. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1712. MVPP2_PE_LAST_FREE_TID);
  1713. if (tid < 0)
  1714. return tid;
  1715. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1716. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1717. pe.index = tid;
  1718. mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
  1719. /* Generate flow in the next iteration*/
  1720. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1721. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1722. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1723. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1724. MVPP2_PRS_RI_CPU_CODE_MASK |
  1725. MVPP2_PRS_RI_UDF3_MASK);
  1726. /* Set L3 offset */
  1727. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1728. MVPP2_ETH_TYPE_LEN,
  1729. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1730. /* Update shadow table and hw entry */
  1731. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1732. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1733. priv->prs_shadow[pe.index].finish = true;
  1734. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1735. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1736. MVPP2_PRS_RI_CPU_CODE_MASK |
  1737. MVPP2_PRS_RI_UDF3_MASK);
  1738. mvpp2_prs_hw_write(priv, &pe);
  1739. /* Ethertype: IPv4 without options */
  1740. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1741. MVPP2_PE_LAST_FREE_TID);
  1742. if (tid < 0)
  1743. return tid;
  1744. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1745. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1746. pe.index = tid;
  1747. mvpp2_prs_match_etype(&pe, 0, PROT_IP);
  1748. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1749. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  1750. MVPP2_PRS_IPV4_HEAD_MASK |
  1751. MVPP2_PRS_IPV4_IHL_MASK);
  1752. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1753. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  1754. MVPP2_PRS_RI_L3_PROTO_MASK);
  1755. /* Skip eth_type + 4 bytes of IP header */
  1756. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1757. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1758. /* Set L3 offset */
  1759. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1760. MVPP2_ETH_TYPE_LEN,
  1761. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1762. /* Update shadow table and hw entry */
  1763. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1764. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1765. priv->prs_shadow[pe.index].finish = false;
  1766. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
  1767. MVPP2_PRS_RI_L3_PROTO_MASK);
  1768. mvpp2_prs_hw_write(priv, &pe);
  1769. /* Ethertype: IPv4 with options */
  1770. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1771. MVPP2_PE_LAST_FREE_TID);
  1772. if (tid < 0)
  1773. return tid;
  1774. pe.index = tid;
  1775. /* Clear tcam data before updating */
  1776. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
  1777. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
  1778. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1779. MVPP2_PRS_IPV4_HEAD,
  1780. MVPP2_PRS_IPV4_HEAD_MASK);
  1781. /* Clear ri before updating */
  1782. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  1783. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  1784. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  1785. MVPP2_PRS_RI_L3_PROTO_MASK);
  1786. /* Update shadow table and hw entry */
  1787. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1788. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1789. priv->prs_shadow[pe.index].finish = false;
  1790. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
  1791. MVPP2_PRS_RI_L3_PROTO_MASK);
  1792. mvpp2_prs_hw_write(priv, &pe);
  1793. /* Ethertype: IPv6 without options */
  1794. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1795. MVPP2_PE_LAST_FREE_TID);
  1796. if (tid < 0)
  1797. return tid;
  1798. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1799. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1800. pe.index = tid;
  1801. mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
  1802. /* Skip DIP of IPV6 header */
  1803. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
  1804. MVPP2_MAX_L3_ADDR_SIZE,
  1805. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1806. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1807. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  1808. MVPP2_PRS_RI_L3_PROTO_MASK);
  1809. /* Set L3 offset */
  1810. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1811. MVPP2_ETH_TYPE_LEN,
  1812. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1813. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1814. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1815. priv->prs_shadow[pe.index].finish = false;
  1816. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
  1817. MVPP2_PRS_RI_L3_PROTO_MASK);
  1818. mvpp2_prs_hw_write(priv, &pe);
  1819. /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
  1820. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1821. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1822. pe.index = MVPP2_PE_ETH_TYPE_UN;
  1823. /* Unmask all ports */
  1824. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1825. /* Generate flow in the next iteration*/
  1826. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1827. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1828. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  1829. MVPP2_PRS_RI_L3_PROTO_MASK);
  1830. /* Set L3 offset even it's unknown L3 */
  1831. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1832. MVPP2_ETH_TYPE_LEN,
  1833. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1834. /* Update shadow table and hw entry */
  1835. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1836. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1837. priv->prs_shadow[pe.index].finish = true;
  1838. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
  1839. MVPP2_PRS_RI_L3_PROTO_MASK);
  1840. mvpp2_prs_hw_write(priv, &pe);
  1841. return 0;
  1842. }
  1843. /* Parser default initialization */
  1844. static int mvpp2_prs_default_init(struct udevice *dev,
  1845. struct mvpp2 *priv)
  1846. {
  1847. int err, index, i;
  1848. /* Enable tcam table */
  1849. mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
  1850. /* Clear all tcam and sram entries */
  1851. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
  1852. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  1853. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  1854. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
  1855. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
  1856. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  1857. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
  1858. }
  1859. /* Invalidate all tcam entries */
  1860. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
  1861. mvpp2_prs_hw_inv(priv, index);
  1862. priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
  1863. sizeof(struct mvpp2_prs_shadow),
  1864. GFP_KERNEL);
  1865. if (!priv->prs_shadow)
  1866. return -ENOMEM;
  1867. /* Always start from lookup = 0 */
  1868. for (index = 0; index < MVPP2_MAX_PORTS; index++)
  1869. mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
  1870. MVPP2_PRS_PORT_LU_MAX, 0);
  1871. mvpp2_prs_def_flow_init(priv);
  1872. mvpp2_prs_mh_init(priv);
  1873. mvpp2_prs_mac_init(priv);
  1874. err = mvpp2_prs_etype_init(priv);
  1875. if (err)
  1876. return err;
  1877. return 0;
  1878. }
  1879. /* Compare MAC DA with tcam entry data */
  1880. static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
  1881. const u8 *da, unsigned char *mask)
  1882. {
  1883. unsigned char tcam_byte, tcam_mask;
  1884. int index;
  1885. for (index = 0; index < ETH_ALEN; index++) {
  1886. mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
  1887. if (tcam_mask != mask[index])
  1888. return false;
  1889. if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
  1890. return false;
  1891. }
  1892. return true;
  1893. }
  1894. /* Find tcam entry with matched pair <MAC DA, port> */
  1895. static struct mvpp2_prs_entry *
  1896. mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
  1897. unsigned char *mask, int udf_type)
  1898. {
  1899. struct mvpp2_prs_entry *pe;
  1900. int tid;
  1901. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1902. if (!pe)
  1903. return NULL;
  1904. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
  1905. /* Go through the all entires with MVPP2_PRS_LU_MAC */
  1906. for (tid = MVPP2_PE_FIRST_FREE_TID;
  1907. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  1908. unsigned int entry_pmap;
  1909. if (!priv->prs_shadow[tid].valid ||
  1910. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  1911. (priv->prs_shadow[tid].udf != udf_type))
  1912. continue;
  1913. pe->index = tid;
  1914. mvpp2_prs_hw_read(priv, pe);
  1915. entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
  1916. if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
  1917. entry_pmap == pmap)
  1918. return pe;
  1919. }
  1920. kfree(pe);
  1921. return NULL;
  1922. }
  1923. /* Update parser's mac da entry */
  1924. static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
  1925. const u8 *da, bool add)
  1926. {
  1927. struct mvpp2_prs_entry *pe;
  1928. unsigned int pmap, len, ri;
  1929. unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  1930. int tid;
  1931. /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
  1932. pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
  1933. MVPP2_PRS_UDF_MAC_DEF);
  1934. /* No such entry */
  1935. if (!pe) {
  1936. if (!add)
  1937. return 0;
  1938. /* Create new TCAM entry */
  1939. /* Find first range mac entry*/
  1940. for (tid = MVPP2_PE_FIRST_FREE_TID;
  1941. tid <= MVPP2_PE_LAST_FREE_TID; tid++)
  1942. if (priv->prs_shadow[tid].valid &&
  1943. (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
  1944. (priv->prs_shadow[tid].udf ==
  1945. MVPP2_PRS_UDF_MAC_RANGE))
  1946. break;
  1947. /* Go through the all entries from first to last */
  1948. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1949. tid - 1);
  1950. if (tid < 0)
  1951. return tid;
  1952. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1953. if (!pe)
  1954. return -1;
  1955. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
  1956. pe->index = tid;
  1957. /* Mask all ports */
  1958. mvpp2_prs_tcam_port_map_set(pe, 0);
  1959. }
  1960. /* Update port mask */
  1961. mvpp2_prs_tcam_port_set(pe, port, add);
  1962. /* Invalidate the entry if no ports are left enabled */
  1963. pmap = mvpp2_prs_tcam_port_map_get(pe);
  1964. if (pmap == 0) {
  1965. if (add) {
  1966. kfree(pe);
  1967. return -1;
  1968. }
  1969. mvpp2_prs_hw_inv(priv, pe->index);
  1970. priv->prs_shadow[pe->index].valid = false;
  1971. kfree(pe);
  1972. return 0;
  1973. }
  1974. /* Continue - set next lookup */
  1975. mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
  1976. /* Set match on DA */
  1977. len = ETH_ALEN;
  1978. while (len--)
  1979. mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
  1980. /* Set result info bits */
  1981. ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
  1982. mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  1983. MVPP2_PRS_RI_MAC_ME_MASK);
  1984. mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  1985. MVPP2_PRS_RI_MAC_ME_MASK);
  1986. /* Shift to ethertype */
  1987. mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
  1988. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1989. /* Update shadow table and hw entry */
  1990. priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
  1991. mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
  1992. mvpp2_prs_hw_write(priv, pe);
  1993. kfree(pe);
  1994. return 0;
  1995. }
  1996. static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
  1997. {
  1998. int err;
  1999. /* Remove old parser entry */
  2000. err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
  2001. false);
  2002. if (err)
  2003. return err;
  2004. /* Add new parser entry */
  2005. err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
  2006. if (err)
  2007. return err;
  2008. /* Set addr in the device */
  2009. memcpy(port->dev_addr, da, ETH_ALEN);
  2010. return 0;
  2011. }
  2012. /* Set prs flow for the port */
  2013. static int mvpp2_prs_def_flow(struct mvpp2_port *port)
  2014. {
  2015. struct mvpp2_prs_entry *pe;
  2016. int tid;
  2017. pe = mvpp2_prs_flow_find(port->priv, port->id);
  2018. /* Such entry not exist */
  2019. if (!pe) {
  2020. /* Go through the all entires from last to first */
  2021. tid = mvpp2_prs_tcam_first_free(port->priv,
  2022. MVPP2_PE_LAST_FREE_TID,
  2023. MVPP2_PE_FIRST_FREE_TID);
  2024. if (tid < 0)
  2025. return tid;
  2026. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2027. if (!pe)
  2028. return -ENOMEM;
  2029. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
  2030. pe->index = tid;
  2031. /* Set flow ID*/
  2032. mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
  2033. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  2034. /* Update shadow table */
  2035. mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
  2036. }
  2037. mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
  2038. mvpp2_prs_hw_write(port->priv, pe);
  2039. kfree(pe);
  2040. return 0;
  2041. }
  2042. /* Classifier configuration routines */
  2043. /* Update classification flow table registers */
  2044. static void mvpp2_cls_flow_write(struct mvpp2 *priv,
  2045. struct mvpp2_cls_flow_entry *fe)
  2046. {
  2047. mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
  2048. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
  2049. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
  2050. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
  2051. }
  2052. /* Update classification lookup table register */
  2053. static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
  2054. struct mvpp2_cls_lookup_entry *le)
  2055. {
  2056. u32 val;
  2057. val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
  2058. mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
  2059. mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
  2060. }
  2061. /* Classifier default initialization */
  2062. static void mvpp2_cls_init(struct mvpp2 *priv)
  2063. {
  2064. struct mvpp2_cls_lookup_entry le;
  2065. struct mvpp2_cls_flow_entry fe;
  2066. int index;
  2067. /* Enable classifier */
  2068. mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
  2069. /* Clear classifier flow table */
  2070. memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
  2071. for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
  2072. fe.index = index;
  2073. mvpp2_cls_flow_write(priv, &fe);
  2074. }
  2075. /* Clear classifier lookup table */
  2076. le.data = 0;
  2077. for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
  2078. le.lkpid = index;
  2079. le.way = 0;
  2080. mvpp2_cls_lookup_write(priv, &le);
  2081. le.way = 1;
  2082. mvpp2_cls_lookup_write(priv, &le);
  2083. }
  2084. }
  2085. static void mvpp2_cls_port_config(struct mvpp2_port *port)
  2086. {
  2087. struct mvpp2_cls_lookup_entry le;
  2088. u32 val;
  2089. /* Set way for the port */
  2090. val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
  2091. val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
  2092. mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
  2093. /* Pick the entry to be accessed in lookup ID decoding table
  2094. * according to the way and lkpid.
  2095. */
  2096. le.lkpid = port->id;
  2097. le.way = 0;
  2098. le.data = 0;
  2099. /* Set initial CPU queue for receiving packets */
  2100. le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
  2101. le.data |= port->first_rxq;
  2102. /* Disable classification engines */
  2103. le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
  2104. /* Update lookup ID table entry */
  2105. mvpp2_cls_lookup_write(port->priv, &le);
  2106. }
  2107. /* Set CPU queue number for oversize packets */
  2108. static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
  2109. {
  2110. u32 val;
  2111. mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
  2112. port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
  2113. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
  2114. (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
  2115. val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
  2116. val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
  2117. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
  2118. }
  2119. /* Buffer Manager configuration routines */
  2120. /* Create pool */
  2121. static int mvpp2_bm_pool_create(struct udevice *dev,
  2122. struct mvpp2 *priv,
  2123. struct mvpp2_bm_pool *bm_pool, int size)
  2124. {
  2125. u32 val;
  2126. /* Number of buffer pointers must be a multiple of 16, as per
  2127. * hardware constraints
  2128. */
  2129. if (!IS_ALIGNED(size, 16))
  2130. return -EINVAL;
  2131. bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
  2132. bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
  2133. if (!bm_pool->virt_addr)
  2134. return -ENOMEM;
  2135. if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
  2136. MVPP2_BM_POOL_PTR_ALIGN)) {
  2137. dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
  2138. bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
  2139. return -ENOMEM;
  2140. }
  2141. mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
  2142. lower_32_bits(bm_pool->dma_addr));
  2143. if (priv->hw_version == MVPP22)
  2144. mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG,
  2145. (upper_32_bits(bm_pool->dma_addr) &
  2146. MVPP22_BM_POOL_BASE_HIGH_MASK));
  2147. mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
  2148. val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
  2149. val |= MVPP2_BM_START_MASK;
  2150. mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
  2151. bm_pool->type = MVPP2_BM_FREE;
  2152. bm_pool->size = size;
  2153. bm_pool->pkt_size = 0;
  2154. bm_pool->buf_num = 0;
  2155. return 0;
  2156. }
  2157. /* Set pool buffer size */
  2158. static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
  2159. struct mvpp2_bm_pool *bm_pool,
  2160. int buf_size)
  2161. {
  2162. u32 val;
  2163. bm_pool->buf_size = buf_size;
  2164. val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
  2165. mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
  2166. }
  2167. /* Free all buffers from the pool */
  2168. static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
  2169. struct mvpp2_bm_pool *bm_pool)
  2170. {
  2171. int i;
  2172. for (i = 0; i < bm_pool->buf_num; i++) {
  2173. /* Allocate buffer back from the buffer manager */
  2174. mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
  2175. }
  2176. bm_pool->buf_num = 0;
  2177. }
  2178. /* Cleanup pool */
  2179. static int mvpp2_bm_pool_destroy(struct udevice *dev,
  2180. struct mvpp2 *priv,
  2181. struct mvpp2_bm_pool *bm_pool)
  2182. {
  2183. u32 val;
  2184. mvpp2_bm_bufs_free(dev, priv, bm_pool);
  2185. if (bm_pool->buf_num) {
  2186. dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
  2187. return 0;
  2188. }
  2189. val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
  2190. val |= MVPP2_BM_STOP_MASK;
  2191. mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
  2192. return 0;
  2193. }
  2194. static int mvpp2_bm_pools_init(struct udevice *dev,
  2195. struct mvpp2 *priv)
  2196. {
  2197. int i, err, size;
  2198. struct mvpp2_bm_pool *bm_pool;
  2199. /* Create all pools with maximum size */
  2200. size = MVPP2_BM_POOL_SIZE_MAX;
  2201. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  2202. bm_pool = &priv->bm_pools[i];
  2203. bm_pool->id = i;
  2204. err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
  2205. if (err)
  2206. goto err_unroll_pools;
  2207. mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE);
  2208. }
  2209. return 0;
  2210. err_unroll_pools:
  2211. dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
  2212. for (i = i - 1; i >= 0; i--)
  2213. mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
  2214. return err;
  2215. }
  2216. static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
  2217. {
  2218. int i, err;
  2219. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  2220. /* Mask BM all interrupts */
  2221. mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
  2222. /* Clear BM cause register */
  2223. mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
  2224. }
  2225. /* Allocate and initialize BM pools */
  2226. priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
  2227. sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
  2228. if (!priv->bm_pools)
  2229. return -ENOMEM;
  2230. err = mvpp2_bm_pools_init(dev, priv);
  2231. if (err < 0)
  2232. return err;
  2233. return 0;
  2234. }
  2235. /* Attach long pool to rxq */
  2236. static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
  2237. int lrxq, int long_pool)
  2238. {
  2239. u32 val, mask;
  2240. int prxq;
  2241. /* Get queue physical ID */
  2242. prxq = port->rxqs[lrxq]->id;
  2243. if (port->priv->hw_version == MVPP21)
  2244. mask = MVPP21_RXQ_POOL_LONG_MASK;
  2245. else
  2246. mask = MVPP22_RXQ_POOL_LONG_MASK;
  2247. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  2248. val &= ~mask;
  2249. val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
  2250. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  2251. }
  2252. /* Set pool number in a BM cookie */
  2253. static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
  2254. {
  2255. u32 bm;
  2256. bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
  2257. bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
  2258. return bm;
  2259. }
  2260. /* Get pool number from a BM cookie */
  2261. static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
  2262. {
  2263. return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
  2264. }
  2265. /* Release buffer to BM */
  2266. static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
  2267. dma_addr_t buf_dma_addr,
  2268. unsigned long buf_phys_addr)
  2269. {
  2270. if (port->priv->hw_version == MVPP22) {
  2271. u32 val = 0;
  2272. if (sizeof(dma_addr_t) == 8)
  2273. val |= upper_32_bits(buf_dma_addr) &
  2274. MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
  2275. if (sizeof(phys_addr_t) == 8)
  2276. val |= (upper_32_bits(buf_phys_addr)
  2277. << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
  2278. MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
  2279. mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
  2280. }
  2281. /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
  2282. * returned in the "cookie" field of the RX
  2283. * descriptor. Instead of storing the virtual address, we
  2284. * store the physical address
  2285. */
  2286. mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
  2287. mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
  2288. }
  2289. /* Refill BM pool */
  2290. static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
  2291. dma_addr_t dma_addr,
  2292. phys_addr_t phys_addr)
  2293. {
  2294. int pool = mvpp2_bm_cookie_pool_get(bm);
  2295. mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
  2296. }
  2297. /* Allocate buffers for the pool */
  2298. static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
  2299. struct mvpp2_bm_pool *bm_pool, int buf_num)
  2300. {
  2301. int i;
  2302. if (buf_num < 0 ||
  2303. (buf_num + bm_pool->buf_num > bm_pool->size)) {
  2304. netdev_err(port->dev,
  2305. "cannot allocate %d buffers for pool %d\n",
  2306. buf_num, bm_pool->id);
  2307. return 0;
  2308. }
  2309. for (i = 0; i < buf_num; i++) {
  2310. mvpp2_bm_pool_put(port, bm_pool->id,
  2311. (dma_addr_t)buffer_loc.rx_buffer[i],
  2312. (unsigned long)buffer_loc.rx_buffer[i]);
  2313. }
  2314. /* Update BM driver with number of buffers added to pool */
  2315. bm_pool->buf_num += i;
  2316. return i;
  2317. }
  2318. /* Notify the driver that BM pool is being used as specific type and return the
  2319. * pool pointer on success
  2320. */
  2321. static struct mvpp2_bm_pool *
  2322. mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
  2323. int pkt_size)
  2324. {
  2325. struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
  2326. int num;
  2327. if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
  2328. netdev_err(port->dev, "mixing pool types is forbidden\n");
  2329. return NULL;
  2330. }
  2331. if (new_pool->type == MVPP2_BM_FREE)
  2332. new_pool->type = type;
  2333. /* Allocate buffers in case BM pool is used as long pool, but packet
  2334. * size doesn't match MTU or BM pool hasn't being used yet
  2335. */
  2336. if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
  2337. (new_pool->pkt_size == 0)) {
  2338. int pkts_num;
  2339. /* Set default buffer number or free all the buffers in case
  2340. * the pool is not empty
  2341. */
  2342. pkts_num = new_pool->buf_num;
  2343. if (pkts_num == 0)
  2344. pkts_num = type == MVPP2_BM_SWF_LONG ?
  2345. MVPP2_BM_LONG_BUF_NUM :
  2346. MVPP2_BM_SHORT_BUF_NUM;
  2347. else
  2348. mvpp2_bm_bufs_free(NULL,
  2349. port->priv, new_pool);
  2350. new_pool->pkt_size = pkt_size;
  2351. /* Allocate buffers for this pool */
  2352. num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
  2353. if (num != pkts_num) {
  2354. dev_err(dev, "pool %d: %d of %d allocated\n",
  2355. new_pool->id, num, pkts_num);
  2356. return NULL;
  2357. }
  2358. }
  2359. return new_pool;
  2360. }
  2361. /* Initialize pools for swf */
  2362. static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
  2363. {
  2364. int rxq;
  2365. if (!port->pool_long) {
  2366. port->pool_long =
  2367. mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
  2368. MVPP2_BM_SWF_LONG,
  2369. port->pkt_size);
  2370. if (!port->pool_long)
  2371. return -ENOMEM;
  2372. port->pool_long->port_map |= (1 << port->id);
  2373. for (rxq = 0; rxq < rxq_number; rxq++)
  2374. mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
  2375. }
  2376. return 0;
  2377. }
  2378. /* Port configuration routines */
  2379. static void mvpp2_port_mii_set(struct mvpp2_port *port)
  2380. {
  2381. u32 val;
  2382. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
  2383. switch (port->phy_interface) {
  2384. case PHY_INTERFACE_MODE_SGMII:
  2385. val |= MVPP2_GMAC_INBAND_AN_MASK;
  2386. break;
  2387. case PHY_INTERFACE_MODE_RGMII:
  2388. case PHY_INTERFACE_MODE_RGMII_ID:
  2389. val |= MVPP2_GMAC_PORT_RGMII_MASK;
  2390. default:
  2391. val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
  2392. }
  2393. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  2394. }
  2395. static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
  2396. {
  2397. u32 val;
  2398. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  2399. val |= MVPP2_GMAC_FC_ADV_EN;
  2400. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  2401. }
  2402. static void mvpp2_port_enable(struct mvpp2_port *port)
  2403. {
  2404. u32 val;
  2405. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  2406. val |= MVPP2_GMAC_PORT_EN_MASK;
  2407. val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
  2408. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  2409. }
  2410. static void mvpp2_port_disable(struct mvpp2_port *port)
  2411. {
  2412. u32 val;
  2413. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  2414. val &= ~(MVPP2_GMAC_PORT_EN_MASK);
  2415. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  2416. }
  2417. /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
  2418. static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
  2419. {
  2420. u32 val;
  2421. val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
  2422. ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
  2423. writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
  2424. }
  2425. /* Configure loopback port */
  2426. static void mvpp2_port_loopback_set(struct mvpp2_port *port)
  2427. {
  2428. u32 val;
  2429. val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
  2430. if (port->speed == 1000)
  2431. val |= MVPP2_GMAC_GMII_LB_EN_MASK;
  2432. else
  2433. val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
  2434. if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
  2435. val |= MVPP2_GMAC_PCS_LB_EN_MASK;
  2436. else
  2437. val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
  2438. writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
  2439. }
  2440. static void mvpp2_port_reset(struct mvpp2_port *port)
  2441. {
  2442. u32 val;
  2443. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
  2444. ~MVPP2_GMAC_PORT_RESET_MASK;
  2445. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  2446. while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
  2447. MVPP2_GMAC_PORT_RESET_MASK)
  2448. continue;
  2449. }
  2450. /* Change maximum receive size of the port */
  2451. static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
  2452. {
  2453. u32 val;
  2454. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  2455. val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
  2456. val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
  2457. MVPP2_GMAC_MAX_RX_SIZE_OFFS);
  2458. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  2459. }
  2460. /* PPv2.2 GoP/GMAC config */
  2461. /* Set the MAC to reset or exit from reset */
  2462. static int gop_gmac_reset(struct mvpp2_port *port, int reset)
  2463. {
  2464. u32 val;
  2465. /* read - modify - write */
  2466. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
  2467. if (reset)
  2468. val |= MVPP2_GMAC_PORT_RESET_MASK;
  2469. else
  2470. val &= ~MVPP2_GMAC_PORT_RESET_MASK;
  2471. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  2472. return 0;
  2473. }
  2474. /*
  2475. * gop_gpcs_mode_cfg
  2476. *
  2477. * Configure port to working with Gig PCS or don't.
  2478. */
  2479. static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en)
  2480. {
  2481. u32 val;
  2482. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
  2483. if (en)
  2484. val |= MVPP2_GMAC_PCS_ENABLE_MASK;
  2485. else
  2486. val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
  2487. /* enable / disable PCS on this port */
  2488. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  2489. return 0;
  2490. }
  2491. static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en)
  2492. {
  2493. u32 val;
  2494. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
  2495. if (en)
  2496. val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
  2497. else
  2498. val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
  2499. /* enable / disable PCS on this port */
  2500. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  2501. return 0;
  2502. }
  2503. static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port)
  2504. {
  2505. u32 val, thresh;
  2506. /*
  2507. * Configure minimal level of the Tx FIFO before the lower part
  2508. * starts to read a packet
  2509. */
  2510. thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH;
  2511. val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  2512. val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
  2513. val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
  2514. writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  2515. /* Disable bypass of sync module */
  2516. val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
  2517. val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
  2518. /* configure DP clock select according to mode */
  2519. val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
  2520. /* configure QSGMII bypass according to mode */
  2521. val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
  2522. writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
  2523. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  2524. /*
  2525. * Configure GIG MAC to 1000Base-X mode connected to a fiber
  2526. * transceiver
  2527. */
  2528. val |= MVPP2_GMAC_PORT_TYPE_MASK;
  2529. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  2530. /* configure AN 0x9268 */
  2531. val = MVPP2_GMAC_EN_PCS_AN |
  2532. MVPP2_GMAC_AN_BYPASS_EN |
  2533. MVPP2_GMAC_CONFIG_MII_SPEED |
  2534. MVPP2_GMAC_CONFIG_GMII_SPEED |
  2535. MVPP2_GMAC_FC_ADV_EN |
  2536. MVPP2_GMAC_CONFIG_FULL_DUPLEX |
  2537. MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
  2538. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  2539. }
  2540. static void gop_gmac_sgmii_cfg(struct mvpp2_port *port)
  2541. {
  2542. u32 val, thresh;
  2543. /*
  2544. * Configure minimal level of the Tx FIFO before the lower part
  2545. * starts to read a packet
  2546. */
  2547. thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
  2548. val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  2549. val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
  2550. val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
  2551. writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  2552. /* Disable bypass of sync module */
  2553. val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
  2554. val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
  2555. /* configure DP clock select according to mode */
  2556. val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
  2557. /* configure QSGMII bypass according to mode */
  2558. val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
  2559. writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
  2560. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  2561. /* configure GIG MAC to SGMII mode */
  2562. val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
  2563. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  2564. /* configure AN */
  2565. val = MVPP2_GMAC_EN_PCS_AN |
  2566. MVPP2_GMAC_AN_BYPASS_EN |
  2567. MVPP2_GMAC_AN_SPEED_EN |
  2568. MVPP2_GMAC_EN_FC_AN |
  2569. MVPP2_GMAC_AN_DUPLEX_EN |
  2570. MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
  2571. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  2572. }
  2573. static void gop_gmac_rgmii_cfg(struct mvpp2_port *port)
  2574. {
  2575. u32 val, thresh;
  2576. /*
  2577. * Configure minimal level of the Tx FIFO before the lower part
  2578. * starts to read a packet
  2579. */
  2580. thresh = MVPP2_RGMII_TX_FIFO_MIN_TH;
  2581. val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  2582. val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
  2583. val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
  2584. writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  2585. /* Disable bypass of sync module */
  2586. val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
  2587. val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
  2588. /* configure DP clock select according to mode */
  2589. val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
  2590. val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
  2591. val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK;
  2592. writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
  2593. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  2594. /* configure GIG MAC to SGMII mode */
  2595. val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
  2596. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  2597. /* configure AN 0xb8e8 */
  2598. val = MVPP2_GMAC_AN_BYPASS_EN |
  2599. MVPP2_GMAC_AN_SPEED_EN |
  2600. MVPP2_GMAC_EN_FC_AN |
  2601. MVPP2_GMAC_AN_DUPLEX_EN |
  2602. MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
  2603. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  2604. }
  2605. /* Set the internal mux's to the required MAC in the GOP */
  2606. static int gop_gmac_mode_cfg(struct mvpp2_port *port)
  2607. {
  2608. u32 val;
  2609. /* Set TX FIFO thresholds */
  2610. switch (port->phy_interface) {
  2611. case PHY_INTERFACE_MODE_SGMII:
  2612. if (port->phy_speed == 2500)
  2613. gop_gmac_sgmii2_5_cfg(port);
  2614. else
  2615. gop_gmac_sgmii_cfg(port);
  2616. break;
  2617. case PHY_INTERFACE_MODE_RGMII:
  2618. case PHY_INTERFACE_MODE_RGMII_ID:
  2619. gop_gmac_rgmii_cfg(port);
  2620. break;
  2621. default:
  2622. return -1;
  2623. }
  2624. /* Jumbo frame support - 0x1400*2= 0x2800 bytes */
  2625. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  2626. val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
  2627. val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS;
  2628. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  2629. /* PeriodicXonEn disable */
  2630. val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
  2631. val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
  2632. writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
  2633. return 0;
  2634. }
  2635. static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port)
  2636. {
  2637. u32 val;
  2638. /* relevant only for MAC0 (XLG0 and GMAC0) */
  2639. if (port->gop_id > 0)
  2640. return;
  2641. /* configure 1Gig MAC mode */
  2642. val = readl(port->base + MVPP22_XLG_CTRL3_REG);
  2643. val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
  2644. val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
  2645. writel(val, port->base + MVPP22_XLG_CTRL3_REG);
  2646. }
  2647. static int gop_gpcs_reset(struct mvpp2_port *port, int reset)
  2648. {
  2649. u32 val;
  2650. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
  2651. if (reset)
  2652. val &= ~MVPP2_GMAC_SGMII_MODE_MASK;
  2653. else
  2654. val |= MVPP2_GMAC_SGMII_MODE_MASK;
  2655. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  2656. return 0;
  2657. }
  2658. /* Set the internal mux's to the required PCS in the PI */
  2659. static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes)
  2660. {
  2661. u32 val;
  2662. int lane;
  2663. switch (num_of_lanes) {
  2664. case 1:
  2665. lane = 0;
  2666. break;
  2667. case 2:
  2668. lane = 1;
  2669. break;
  2670. case 4:
  2671. lane = 2;
  2672. break;
  2673. default:
  2674. return -1;
  2675. }
  2676. /* configure XG MAC mode */
  2677. val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
  2678. val &= ~MVPP22_XPCS_PCSMODE_MASK;
  2679. val &= ~MVPP22_XPCS_LANEACTIVE_MASK;
  2680. val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS;
  2681. writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
  2682. return 0;
  2683. }
  2684. static int gop_mpcs_mode(struct mvpp2_port *port)
  2685. {
  2686. u32 val;
  2687. /* configure PCS40G COMMON CONTROL */
  2688. val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
  2689. val &= ~FORWARD_ERROR_CORRECTION_MASK;
  2690. writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
  2691. /* configure PCS CLOCK RESET */
  2692. val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET);
  2693. val &= ~CLK_DIVISION_RATIO_MASK;
  2694. val |= 1 << CLK_DIVISION_RATIO_OFFS;
  2695. writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
  2696. val &= ~CLK_DIV_PHASE_SET_MASK;
  2697. val |= MAC_CLK_RESET_MASK;
  2698. val |= RX_SD_CLK_RESET_MASK;
  2699. val |= TX_SD_CLK_RESET_MASK;
  2700. writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
  2701. return 0;
  2702. }
  2703. /* Set the internal mux's to the required MAC in the GOP */
  2704. static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes)
  2705. {
  2706. u32 val;
  2707. /* configure 10G MAC mode */
  2708. val = readl(port->base + MVPP22_XLG_CTRL0_REG);
  2709. val |= MVPP22_XLG_RX_FC_EN;
  2710. writel(val, port->base + MVPP22_XLG_CTRL0_REG);
  2711. val = readl(port->base + MVPP22_XLG_CTRL3_REG);
  2712. val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
  2713. val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC;
  2714. writel(val, port->base + MVPP22_XLG_CTRL3_REG);
  2715. /* read - modify - write */
  2716. val = readl(port->base + MVPP22_XLG_CTRL4_REG);
  2717. val &= ~MVPP22_XLG_MODE_DMA_1G;
  2718. val |= MVPP22_XLG_FORWARD_PFC_EN;
  2719. val |= MVPP22_XLG_FORWARD_802_3X_FC_EN;
  2720. val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK;
  2721. writel(val, port->base + MVPP22_XLG_CTRL4_REG);
  2722. /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
  2723. val = readl(port->base + MVPP22_XLG_CTRL1_REG);
  2724. val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK;
  2725. val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS;
  2726. writel(val, port->base + MVPP22_XLG_CTRL1_REG);
  2727. /* unmask link change interrupt */
  2728. val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
  2729. val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE;
  2730. val |= 1; /* unmask summary bit */
  2731. writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
  2732. return 0;
  2733. }
  2734. /* Set PCS to reset or exit from reset */
  2735. static int gop_xpcs_reset(struct mvpp2_port *port, int reset)
  2736. {
  2737. u32 val;
  2738. /* read - modify - write */
  2739. val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
  2740. if (reset)
  2741. val &= ~MVPP22_XPCS_PCSRESET;
  2742. else
  2743. val |= MVPP22_XPCS_PCSRESET;
  2744. writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
  2745. return 0;
  2746. }
  2747. /* Set the MAC to reset or exit from reset */
  2748. static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset)
  2749. {
  2750. u32 val;
  2751. /* read - modify - write */
  2752. val = readl(port->base + MVPP22_XLG_CTRL0_REG);
  2753. if (reset)
  2754. val &= ~MVPP22_XLG_MAC_RESETN;
  2755. else
  2756. val |= MVPP22_XLG_MAC_RESETN;
  2757. writel(val, port->base + MVPP22_XLG_CTRL0_REG);
  2758. return 0;
  2759. }
  2760. /*
  2761. * gop_port_init
  2762. *
  2763. * Init physical port. Configures the port mode and all it's elements
  2764. * accordingly.
  2765. * Does not verify that the selected mode/port number is valid at the
  2766. * core level.
  2767. */
  2768. static int gop_port_init(struct mvpp2_port *port)
  2769. {
  2770. int mac_num = port->gop_id;
  2771. int num_of_act_lanes;
  2772. if (mac_num >= MVPP22_GOP_MAC_NUM) {
  2773. netdev_err(NULL, "%s: illegal port number %d", __func__,
  2774. mac_num);
  2775. return -1;
  2776. }
  2777. switch (port->phy_interface) {
  2778. case PHY_INTERFACE_MODE_RGMII:
  2779. case PHY_INTERFACE_MODE_RGMII_ID:
  2780. gop_gmac_reset(port, 1);
  2781. /* configure PCS */
  2782. gop_gpcs_mode_cfg(port, 0);
  2783. gop_bypass_clk_cfg(port, 1);
  2784. /* configure MAC */
  2785. gop_gmac_mode_cfg(port);
  2786. /* pcs unreset */
  2787. gop_gpcs_reset(port, 0);
  2788. /* mac unreset */
  2789. gop_gmac_reset(port, 0);
  2790. break;
  2791. case PHY_INTERFACE_MODE_SGMII:
  2792. /* configure PCS */
  2793. gop_gpcs_mode_cfg(port, 1);
  2794. /* configure MAC */
  2795. gop_gmac_mode_cfg(port);
  2796. /* select proper Mac mode */
  2797. gop_xlg_2_gig_mac_cfg(port);
  2798. /* pcs unreset */
  2799. gop_gpcs_reset(port, 0);
  2800. /* mac unreset */
  2801. gop_gmac_reset(port, 0);
  2802. break;
  2803. case PHY_INTERFACE_MODE_SFI:
  2804. num_of_act_lanes = 2;
  2805. mac_num = 0;
  2806. /* configure PCS */
  2807. gop_xpcs_mode(port, num_of_act_lanes);
  2808. gop_mpcs_mode(port);
  2809. /* configure MAC */
  2810. gop_xlg_mac_mode_cfg(port, num_of_act_lanes);
  2811. /* pcs unreset */
  2812. gop_xpcs_reset(port, 0);
  2813. /* mac unreset */
  2814. gop_xlg_mac_reset(port, 0);
  2815. break;
  2816. default:
  2817. netdev_err(NULL, "%s: Requested port mode (%d) not supported\n",
  2818. __func__, port->phy_interface);
  2819. return -1;
  2820. }
  2821. return 0;
  2822. }
  2823. static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable)
  2824. {
  2825. u32 val;
  2826. val = readl(port->base + MVPP22_XLG_CTRL0_REG);
  2827. if (enable) {
  2828. /* Enable port and MIB counters update */
  2829. val |= MVPP22_XLG_PORT_EN;
  2830. val &= ~MVPP22_XLG_MIBCNT_DIS;
  2831. } else {
  2832. /* Disable port */
  2833. val &= ~MVPP22_XLG_PORT_EN;
  2834. }
  2835. writel(val, port->base + MVPP22_XLG_CTRL0_REG);
  2836. }
  2837. static void gop_port_enable(struct mvpp2_port *port, int enable)
  2838. {
  2839. switch (port->phy_interface) {
  2840. case PHY_INTERFACE_MODE_RGMII:
  2841. case PHY_INTERFACE_MODE_RGMII_ID:
  2842. case PHY_INTERFACE_MODE_SGMII:
  2843. if (enable)
  2844. mvpp2_port_enable(port);
  2845. else
  2846. mvpp2_port_disable(port);
  2847. break;
  2848. case PHY_INTERFACE_MODE_SFI:
  2849. gop_xlg_mac_port_enable(port, enable);
  2850. break;
  2851. default:
  2852. netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__,
  2853. port->phy_interface);
  2854. return;
  2855. }
  2856. }
  2857. /* RFU1 functions */
  2858. static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset)
  2859. {
  2860. return readl(priv->rfu1_base + offset);
  2861. }
  2862. static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data)
  2863. {
  2864. writel(data, priv->rfu1_base + offset);
  2865. }
  2866. static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type)
  2867. {
  2868. u32 val = 0;
  2869. if (gop_id == 2) {
  2870. if (phy_type == PHY_INTERFACE_MODE_SGMII)
  2871. val |= MV_NETC_GE_MAC2_SGMII;
  2872. }
  2873. if (gop_id == 3) {
  2874. if (phy_type == PHY_INTERFACE_MODE_SGMII)
  2875. val |= MV_NETC_GE_MAC3_SGMII;
  2876. else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
  2877. phy_type == PHY_INTERFACE_MODE_RGMII_ID)
  2878. val |= MV_NETC_GE_MAC3_RGMII;
  2879. }
  2880. return val;
  2881. }
  2882. static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val)
  2883. {
  2884. u32 reg;
  2885. reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
  2886. reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id));
  2887. val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id);
  2888. val &= NETC_PORTS_ACTIVE_MASK(gop_id);
  2889. reg |= val;
  2890. gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
  2891. }
  2892. static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val)
  2893. {
  2894. u32 reg;
  2895. reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
  2896. reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
  2897. val <<= NETC_GBE_PORT1_MII_MODE_OFFS;
  2898. val &= NETC_GBE_PORT1_MII_MODE_MASK;
  2899. reg |= val;
  2900. gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
  2901. }
  2902. static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val)
  2903. {
  2904. u32 reg;
  2905. reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG);
  2906. reg &= ~NETC_GOP_SOFT_RESET_MASK;
  2907. val <<= NETC_GOP_SOFT_RESET_OFFS;
  2908. val &= NETC_GOP_SOFT_RESET_MASK;
  2909. reg |= val;
  2910. gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg);
  2911. }
  2912. static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val)
  2913. {
  2914. u32 reg;
  2915. reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
  2916. reg &= ~NETC_CLK_DIV_PHASE_MASK;
  2917. val <<= NETC_CLK_DIV_PHASE_OFFS;
  2918. val &= NETC_CLK_DIV_PHASE_MASK;
  2919. reg |= val;
  2920. gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
  2921. }
  2922. static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val)
  2923. {
  2924. u32 reg;
  2925. reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
  2926. reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id));
  2927. val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id);
  2928. val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id);
  2929. reg |= val;
  2930. gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
  2931. }
  2932. static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id,
  2933. u32 val)
  2934. {
  2935. u32 reg, mask, offset;
  2936. if (gop_id == 2) {
  2937. mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
  2938. offset = NETC_GBE_PORT0_SGMII_MODE_OFFS;
  2939. } else {
  2940. mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
  2941. offset = NETC_GBE_PORT1_SGMII_MODE_OFFS;
  2942. }
  2943. reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
  2944. reg &= ~mask;
  2945. val <<= offset;
  2946. val &= mask;
  2947. reg |= val;
  2948. gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
  2949. }
  2950. static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val)
  2951. {
  2952. u32 reg;
  2953. reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
  2954. reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
  2955. val <<= NETC_BUS_WIDTH_SELECT_OFFS;
  2956. val &= NETC_BUS_WIDTH_SELECT_MASK;
  2957. reg |= val;
  2958. gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
  2959. }
  2960. static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val)
  2961. {
  2962. u32 reg;
  2963. reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
  2964. reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
  2965. val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS;
  2966. val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
  2967. reg |= val;
  2968. gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
  2969. }
  2970. static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id,
  2971. enum mv_netc_phase phase)
  2972. {
  2973. switch (phase) {
  2974. case MV_NETC_FIRST_PHASE:
  2975. /* Set Bus Width to HB mode = 1 */
  2976. gop_netc_bus_width_select(priv, 1);
  2977. /* Select RGMII mode */
  2978. gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII);
  2979. break;
  2980. case MV_NETC_SECOND_PHASE:
  2981. /* De-assert the relevant port HB reset */
  2982. gop_netc_port_rf_reset(priv, gop_id, 1);
  2983. break;
  2984. }
  2985. }
  2986. static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id,
  2987. enum mv_netc_phase phase)
  2988. {
  2989. switch (phase) {
  2990. case MV_NETC_FIRST_PHASE:
  2991. /* Set Bus Width to HB mode = 1 */
  2992. gop_netc_bus_width_select(priv, 1);
  2993. /* Select SGMII mode */
  2994. if (gop_id >= 1) {
  2995. gop_netc_gbe_sgmii_mode_select(priv, gop_id,
  2996. MV_NETC_GBE_SGMII);
  2997. }
  2998. /* Configure the sample stages */
  2999. gop_netc_sample_stages_timing(priv, 0);
  3000. /* Configure the ComPhy Selector */
  3001. /* gop_netc_com_phy_selector_config(netComplex); */
  3002. break;
  3003. case MV_NETC_SECOND_PHASE:
  3004. /* De-assert the relevant port HB reset */
  3005. gop_netc_port_rf_reset(priv, gop_id, 1);
  3006. break;
  3007. }
  3008. }
  3009. static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase)
  3010. {
  3011. u32 c = priv->netc_config;
  3012. if (c & MV_NETC_GE_MAC2_SGMII)
  3013. gop_netc_mac_to_sgmii(priv, 2, phase);
  3014. else
  3015. gop_netc_mac_to_xgmii(priv, 2, phase);
  3016. if (c & MV_NETC_GE_MAC3_SGMII) {
  3017. gop_netc_mac_to_sgmii(priv, 3, phase);
  3018. } else {
  3019. gop_netc_mac_to_xgmii(priv, 3, phase);
  3020. if (c & MV_NETC_GE_MAC3_RGMII)
  3021. gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII);
  3022. else
  3023. gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII);
  3024. }
  3025. /* Activate gop ports 0, 2, 3 */
  3026. gop_netc_active_port(priv, 0, 1);
  3027. gop_netc_active_port(priv, 2, 1);
  3028. gop_netc_active_port(priv, 3, 1);
  3029. if (phase == MV_NETC_SECOND_PHASE) {
  3030. /* Enable the GOP internal clock logic */
  3031. gop_netc_gop_clock_logic_set(priv, 1);
  3032. /* De-assert GOP unit reset */
  3033. gop_netc_gop_reset(priv, 1);
  3034. }
  3035. return 0;
  3036. }
  3037. /* Set defaults to the MVPP2 port */
  3038. static void mvpp2_defaults_set(struct mvpp2_port *port)
  3039. {
  3040. int tx_port_num, val, queue, ptxq, lrxq;
  3041. if (port->priv->hw_version == MVPP21) {
  3042. /* Configure port to loopback if needed */
  3043. if (port->flags & MVPP2_F_LOOPBACK)
  3044. mvpp2_port_loopback_set(port);
  3045. /* Update TX FIFO MIN Threshold */
  3046. val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  3047. val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
  3048. /* Min. TX threshold must be less than minimal packet length */
  3049. val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
  3050. writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  3051. }
  3052. /* Disable Legacy WRR, Disable EJP, Release from reset */
  3053. tx_port_num = mvpp2_egress_port(port);
  3054. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
  3055. tx_port_num);
  3056. mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
  3057. /* Close bandwidth for all queues */
  3058. for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
  3059. ptxq = mvpp2_txq_phys(port->id, queue);
  3060. mvpp2_write(port->priv,
  3061. MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
  3062. }
  3063. /* Set refill period to 1 usec, refill tokens
  3064. * and bucket size to maximum
  3065. */
  3066. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
  3067. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
  3068. val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
  3069. val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
  3070. val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
  3071. mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
  3072. val = MVPP2_TXP_TOKEN_SIZE_MAX;
  3073. mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
  3074. /* Set MaximumLowLatencyPacketSize value to 256 */
  3075. mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
  3076. MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
  3077. MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
  3078. /* Enable Rx cache snoop */
  3079. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3080. queue = port->rxqs[lrxq]->id;
  3081. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3082. val |= MVPP2_SNOOP_PKT_SIZE_MASK |
  3083. MVPP2_SNOOP_BUF_HDR_MASK;
  3084. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3085. }
  3086. }
  3087. /* Enable/disable receiving packets */
  3088. static void mvpp2_ingress_enable(struct mvpp2_port *port)
  3089. {
  3090. u32 val;
  3091. int lrxq, queue;
  3092. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3093. queue = port->rxqs[lrxq]->id;
  3094. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3095. val &= ~MVPP2_RXQ_DISABLE_MASK;
  3096. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3097. }
  3098. }
  3099. static void mvpp2_ingress_disable(struct mvpp2_port *port)
  3100. {
  3101. u32 val;
  3102. int lrxq, queue;
  3103. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3104. queue = port->rxqs[lrxq]->id;
  3105. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3106. val |= MVPP2_RXQ_DISABLE_MASK;
  3107. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3108. }
  3109. }
  3110. /* Enable transmit via physical egress queue
  3111. * - HW starts take descriptors from DRAM
  3112. */
  3113. static void mvpp2_egress_enable(struct mvpp2_port *port)
  3114. {
  3115. u32 qmap;
  3116. int queue;
  3117. int tx_port_num = mvpp2_egress_port(port);
  3118. /* Enable all initialized TXs. */
  3119. qmap = 0;
  3120. for (queue = 0; queue < txq_number; queue++) {
  3121. struct mvpp2_tx_queue *txq = port->txqs[queue];
  3122. if (txq->descs != NULL)
  3123. qmap |= (1 << queue);
  3124. }
  3125. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3126. mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
  3127. }
  3128. /* Disable transmit via physical egress queue
  3129. * - HW doesn't take descriptors from DRAM
  3130. */
  3131. static void mvpp2_egress_disable(struct mvpp2_port *port)
  3132. {
  3133. u32 reg_data;
  3134. int delay;
  3135. int tx_port_num = mvpp2_egress_port(port);
  3136. /* Issue stop command for active channels only */
  3137. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3138. reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
  3139. MVPP2_TXP_SCHED_ENQ_MASK;
  3140. if (reg_data != 0)
  3141. mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
  3142. (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
  3143. /* Wait for all Tx activity to terminate. */
  3144. delay = 0;
  3145. do {
  3146. if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
  3147. netdev_warn(port->dev,
  3148. "Tx stop timed out, status=0x%08x\n",
  3149. reg_data);
  3150. break;
  3151. }
  3152. mdelay(1);
  3153. delay++;
  3154. /* Check port TX Command register that all
  3155. * Tx queues are stopped
  3156. */
  3157. reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
  3158. } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
  3159. }
  3160. /* Rx descriptors helper methods */
  3161. /* Get number of Rx descriptors occupied by received packets */
  3162. static inline int
  3163. mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
  3164. {
  3165. u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
  3166. return val & MVPP2_RXQ_OCCUPIED_MASK;
  3167. }
  3168. /* Update Rx queue status with the number of occupied and available
  3169. * Rx descriptor slots.
  3170. */
  3171. static inline void
  3172. mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
  3173. int used_count, int free_count)
  3174. {
  3175. /* Decrement the number of used descriptors and increment count
  3176. * increment the number of free descriptors.
  3177. */
  3178. u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
  3179. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
  3180. }
  3181. /* Get pointer to next RX descriptor to be processed by SW */
  3182. static inline struct mvpp2_rx_desc *
  3183. mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
  3184. {
  3185. int rx_desc = rxq->next_desc_to_proc;
  3186. rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
  3187. prefetch(rxq->descs + rxq->next_desc_to_proc);
  3188. return rxq->descs + rx_desc;
  3189. }
  3190. /* Set rx queue offset */
  3191. static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
  3192. int prxq, int offset)
  3193. {
  3194. u32 val;
  3195. /* Convert offset from bytes to units of 32 bytes */
  3196. offset = offset >> 5;
  3197. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  3198. val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
  3199. /* Offset is in */
  3200. val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
  3201. MVPP2_RXQ_PACKET_OFFSET_MASK);
  3202. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  3203. }
  3204. /* Obtain BM cookie information from descriptor */
  3205. static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
  3206. struct mvpp2_rx_desc *rx_desc)
  3207. {
  3208. int cpu = smp_processor_id();
  3209. int pool;
  3210. pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
  3211. MVPP2_RXD_BM_POOL_ID_MASK) >>
  3212. MVPP2_RXD_BM_POOL_ID_OFFS;
  3213. return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
  3214. ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
  3215. }
  3216. /* Tx descriptors helper methods */
  3217. /* Get number of Tx descriptors waiting to be transmitted by HW */
  3218. static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
  3219. struct mvpp2_tx_queue *txq)
  3220. {
  3221. u32 val;
  3222. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3223. val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
  3224. return val & MVPP2_TXQ_PENDING_MASK;
  3225. }
  3226. /* Get pointer to next Tx descriptor to be processed (send) by HW */
  3227. static struct mvpp2_tx_desc *
  3228. mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
  3229. {
  3230. int tx_desc = txq->next_desc_to_proc;
  3231. txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
  3232. return txq->descs + tx_desc;
  3233. }
  3234. /* Update HW with number of aggregated Tx descriptors to be sent */
  3235. static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
  3236. {
  3237. /* aggregated access - relevant TXQ number is written in TX desc */
  3238. mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
  3239. }
  3240. /* Get number of sent descriptors and decrement counter.
  3241. * The number of sent descriptors is returned.
  3242. * Per-CPU access
  3243. */
  3244. static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
  3245. struct mvpp2_tx_queue *txq)
  3246. {
  3247. u32 val;
  3248. /* Reading status reg resets transmitted descriptor counter */
  3249. val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
  3250. return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
  3251. MVPP2_TRANSMITTED_COUNT_OFFSET;
  3252. }
  3253. static void mvpp2_txq_sent_counter_clear(void *arg)
  3254. {
  3255. struct mvpp2_port *port = arg;
  3256. int queue;
  3257. for (queue = 0; queue < txq_number; queue++) {
  3258. int id = port->txqs[queue]->id;
  3259. mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
  3260. }
  3261. }
  3262. /* Set max sizes for Tx queues */
  3263. static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
  3264. {
  3265. u32 val, size, mtu;
  3266. int txq, tx_port_num;
  3267. mtu = port->pkt_size * 8;
  3268. if (mtu > MVPP2_TXP_MTU_MAX)
  3269. mtu = MVPP2_TXP_MTU_MAX;
  3270. /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
  3271. mtu = 3 * mtu;
  3272. /* Indirect access to registers */
  3273. tx_port_num = mvpp2_egress_port(port);
  3274. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3275. /* Set MTU */
  3276. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
  3277. val &= ~MVPP2_TXP_MTU_MAX;
  3278. val |= mtu;
  3279. mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
  3280. /* TXP token size and all TXQs token size must be larger that MTU */
  3281. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
  3282. size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
  3283. if (size < mtu) {
  3284. size = mtu;
  3285. val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
  3286. val |= size;
  3287. mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
  3288. }
  3289. for (txq = 0; txq < txq_number; txq++) {
  3290. val = mvpp2_read(port->priv,
  3291. MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
  3292. size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
  3293. if (size < mtu) {
  3294. size = mtu;
  3295. val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
  3296. val |= size;
  3297. mvpp2_write(port->priv,
  3298. MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
  3299. val);
  3300. }
  3301. }
  3302. }
  3303. /* Free Tx queue skbuffs */
  3304. static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
  3305. struct mvpp2_tx_queue *txq,
  3306. struct mvpp2_txq_pcpu *txq_pcpu, int num)
  3307. {
  3308. int i;
  3309. for (i = 0; i < num; i++)
  3310. mvpp2_txq_inc_get(txq_pcpu);
  3311. }
  3312. static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
  3313. u32 cause)
  3314. {
  3315. int queue = fls(cause) - 1;
  3316. return port->rxqs[queue];
  3317. }
  3318. static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
  3319. u32 cause)
  3320. {
  3321. int queue = fls(cause) - 1;
  3322. return port->txqs[queue];
  3323. }
  3324. /* Rx/Tx queue initialization/cleanup methods */
  3325. /* Allocate and initialize descriptors for aggr TXQ */
  3326. static int mvpp2_aggr_txq_init(struct udevice *dev,
  3327. struct mvpp2_tx_queue *aggr_txq,
  3328. int desc_num, int cpu,
  3329. struct mvpp2 *priv)
  3330. {
  3331. u32 txq_dma;
  3332. /* Allocate memory for TX descriptors */
  3333. aggr_txq->descs = buffer_loc.aggr_tx_descs;
  3334. aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
  3335. if (!aggr_txq->descs)
  3336. return -ENOMEM;
  3337. /* Make sure descriptor address is cache line size aligned */
  3338. BUG_ON(aggr_txq->descs !=
  3339. PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
  3340. aggr_txq->last_desc = aggr_txq->size - 1;
  3341. /* Aggr TXQ no reset WA */
  3342. aggr_txq->next_desc_to_proc = mvpp2_read(priv,
  3343. MVPP2_AGGR_TXQ_INDEX_REG(cpu));
  3344. /* Set Tx descriptors queue starting address indirect
  3345. * access
  3346. */
  3347. if (priv->hw_version == MVPP21)
  3348. txq_dma = aggr_txq->descs_dma;
  3349. else
  3350. txq_dma = aggr_txq->descs_dma >>
  3351. MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
  3352. mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
  3353. mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
  3354. return 0;
  3355. }
  3356. /* Create a specified Rx queue */
  3357. static int mvpp2_rxq_init(struct mvpp2_port *port,
  3358. struct mvpp2_rx_queue *rxq)
  3359. {
  3360. u32 rxq_dma;
  3361. rxq->size = port->rx_ring_size;
  3362. /* Allocate memory for RX descriptors */
  3363. rxq->descs = buffer_loc.rx_descs;
  3364. rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
  3365. if (!rxq->descs)
  3366. return -ENOMEM;
  3367. BUG_ON(rxq->descs !=
  3368. PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
  3369. rxq->last_desc = rxq->size - 1;
  3370. /* Zero occupied and non-occupied counters - direct access */
  3371. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
  3372. /* Set Rx descriptors queue starting address - indirect access */
  3373. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3374. if (port->priv->hw_version == MVPP21)
  3375. rxq_dma = rxq->descs_dma;
  3376. else
  3377. rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
  3378. mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
  3379. mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
  3380. mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
  3381. /* Set Offset */
  3382. mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
  3383. /* Add number of descriptors ready for receiving packets */
  3384. mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
  3385. return 0;
  3386. }
  3387. /* Push packets received by the RXQ to BM pool */
  3388. static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
  3389. struct mvpp2_rx_queue *rxq)
  3390. {
  3391. int rx_received, i;
  3392. rx_received = mvpp2_rxq_received(port, rxq->id);
  3393. if (!rx_received)
  3394. return;
  3395. for (i = 0; i < rx_received; i++) {
  3396. struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
  3397. u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
  3398. mvpp2_pool_refill(port, bm,
  3399. mvpp2_rxdesc_dma_addr_get(port, rx_desc),
  3400. mvpp2_rxdesc_cookie_get(port, rx_desc));
  3401. }
  3402. mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
  3403. }
  3404. /* Cleanup Rx queue */
  3405. static void mvpp2_rxq_deinit(struct mvpp2_port *port,
  3406. struct mvpp2_rx_queue *rxq)
  3407. {
  3408. mvpp2_rxq_drop_pkts(port, rxq);
  3409. rxq->descs = NULL;
  3410. rxq->last_desc = 0;
  3411. rxq->next_desc_to_proc = 0;
  3412. rxq->descs_dma = 0;
  3413. /* Clear Rx descriptors queue starting address and size;
  3414. * free descriptor number
  3415. */
  3416. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
  3417. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3418. mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
  3419. mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
  3420. }
  3421. /* Create and initialize a Tx queue */
  3422. static int mvpp2_txq_init(struct mvpp2_port *port,
  3423. struct mvpp2_tx_queue *txq)
  3424. {
  3425. u32 val;
  3426. int cpu, desc, desc_per_txq, tx_port_num;
  3427. struct mvpp2_txq_pcpu *txq_pcpu;
  3428. txq->size = port->tx_ring_size;
  3429. /* Allocate memory for Tx descriptors */
  3430. txq->descs = buffer_loc.tx_descs;
  3431. txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
  3432. if (!txq->descs)
  3433. return -ENOMEM;
  3434. /* Make sure descriptor address is cache line size aligned */
  3435. BUG_ON(txq->descs !=
  3436. PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
  3437. txq->last_desc = txq->size - 1;
  3438. /* Set Tx descriptors queue starting address - indirect access */
  3439. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3440. mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
  3441. mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
  3442. MVPP2_TXQ_DESC_SIZE_MASK);
  3443. mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
  3444. mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
  3445. txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
  3446. val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
  3447. val &= ~MVPP2_TXQ_PENDING_MASK;
  3448. mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
  3449. /* Calculate base address in prefetch buffer. We reserve 16 descriptors
  3450. * for each existing TXQ.
  3451. * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
  3452. * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
  3453. */
  3454. desc_per_txq = 16;
  3455. desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
  3456. (txq->log_id * desc_per_txq);
  3457. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
  3458. MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
  3459. MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
  3460. /* WRR / EJP configuration - indirect access */
  3461. tx_port_num = mvpp2_egress_port(port);
  3462. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3463. val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
  3464. val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
  3465. val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
  3466. val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
  3467. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
  3468. val = MVPP2_TXQ_TOKEN_SIZE_MAX;
  3469. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
  3470. val);
  3471. for_each_present_cpu(cpu) {
  3472. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3473. txq_pcpu->size = txq->size;
  3474. }
  3475. return 0;
  3476. }
  3477. /* Free allocated TXQ resources */
  3478. static void mvpp2_txq_deinit(struct mvpp2_port *port,
  3479. struct mvpp2_tx_queue *txq)
  3480. {
  3481. txq->descs = NULL;
  3482. txq->last_desc = 0;
  3483. txq->next_desc_to_proc = 0;
  3484. txq->descs_dma = 0;
  3485. /* Set minimum bandwidth for disabled TXQs */
  3486. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
  3487. /* Set Tx descriptors queue starting address and size */
  3488. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3489. mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
  3490. mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
  3491. }
  3492. /* Cleanup Tx ports */
  3493. static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
  3494. {
  3495. struct mvpp2_txq_pcpu *txq_pcpu;
  3496. int delay, pending, cpu;
  3497. u32 val;
  3498. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3499. val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
  3500. val |= MVPP2_TXQ_DRAIN_EN_MASK;
  3501. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
  3502. /* The napi queue has been stopped so wait for all packets
  3503. * to be transmitted.
  3504. */
  3505. delay = 0;
  3506. do {
  3507. if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
  3508. netdev_warn(port->dev,
  3509. "port %d: cleaning queue %d timed out\n",
  3510. port->id, txq->log_id);
  3511. break;
  3512. }
  3513. mdelay(1);
  3514. delay++;
  3515. pending = mvpp2_txq_pend_desc_num_get(port, txq);
  3516. } while (pending);
  3517. val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
  3518. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
  3519. for_each_present_cpu(cpu) {
  3520. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3521. /* Release all packets */
  3522. mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
  3523. /* Reset queue */
  3524. txq_pcpu->count = 0;
  3525. txq_pcpu->txq_put_index = 0;
  3526. txq_pcpu->txq_get_index = 0;
  3527. }
  3528. }
  3529. /* Cleanup all Tx queues */
  3530. static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
  3531. {
  3532. struct mvpp2_tx_queue *txq;
  3533. int queue;
  3534. u32 val;
  3535. val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
  3536. /* Reset Tx ports and delete Tx queues */
  3537. val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
  3538. mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
  3539. for (queue = 0; queue < txq_number; queue++) {
  3540. txq = port->txqs[queue];
  3541. mvpp2_txq_clean(port, txq);
  3542. mvpp2_txq_deinit(port, txq);
  3543. }
  3544. mvpp2_txq_sent_counter_clear(port);
  3545. val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
  3546. mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
  3547. }
  3548. /* Cleanup all Rx queues */
  3549. static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
  3550. {
  3551. int queue;
  3552. for (queue = 0; queue < rxq_number; queue++)
  3553. mvpp2_rxq_deinit(port, port->rxqs[queue]);
  3554. }
  3555. /* Init all Rx queues for port */
  3556. static int mvpp2_setup_rxqs(struct mvpp2_port *port)
  3557. {
  3558. int queue, err;
  3559. for (queue = 0; queue < rxq_number; queue++) {
  3560. err = mvpp2_rxq_init(port, port->rxqs[queue]);
  3561. if (err)
  3562. goto err_cleanup;
  3563. }
  3564. return 0;
  3565. err_cleanup:
  3566. mvpp2_cleanup_rxqs(port);
  3567. return err;
  3568. }
  3569. /* Init all tx queues for port */
  3570. static int mvpp2_setup_txqs(struct mvpp2_port *port)
  3571. {
  3572. struct mvpp2_tx_queue *txq;
  3573. int queue, err;
  3574. for (queue = 0; queue < txq_number; queue++) {
  3575. txq = port->txqs[queue];
  3576. err = mvpp2_txq_init(port, txq);
  3577. if (err)
  3578. goto err_cleanup;
  3579. }
  3580. mvpp2_txq_sent_counter_clear(port);
  3581. return 0;
  3582. err_cleanup:
  3583. mvpp2_cleanup_txqs(port);
  3584. return err;
  3585. }
  3586. /* Adjust link */
  3587. static void mvpp2_link_event(struct mvpp2_port *port)
  3588. {
  3589. struct phy_device *phydev = port->phy_dev;
  3590. int status_change = 0;
  3591. u32 val;
  3592. if (phydev->link) {
  3593. if ((port->speed != phydev->speed) ||
  3594. (port->duplex != phydev->duplex)) {
  3595. u32 val;
  3596. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3597. val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
  3598. MVPP2_GMAC_CONFIG_GMII_SPEED |
  3599. MVPP2_GMAC_CONFIG_FULL_DUPLEX |
  3600. MVPP2_GMAC_AN_SPEED_EN |
  3601. MVPP2_GMAC_AN_DUPLEX_EN);
  3602. if (phydev->duplex)
  3603. val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
  3604. if (phydev->speed == SPEED_1000)
  3605. val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
  3606. else if (phydev->speed == SPEED_100)
  3607. val |= MVPP2_GMAC_CONFIG_MII_SPEED;
  3608. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3609. port->duplex = phydev->duplex;
  3610. port->speed = phydev->speed;
  3611. }
  3612. }
  3613. if (phydev->link != port->link) {
  3614. if (!phydev->link) {
  3615. port->duplex = -1;
  3616. port->speed = 0;
  3617. }
  3618. port->link = phydev->link;
  3619. status_change = 1;
  3620. }
  3621. if (status_change) {
  3622. if (phydev->link) {
  3623. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3624. val |= (MVPP2_GMAC_FORCE_LINK_PASS |
  3625. MVPP2_GMAC_FORCE_LINK_DOWN);
  3626. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3627. mvpp2_egress_enable(port);
  3628. mvpp2_ingress_enable(port);
  3629. } else {
  3630. mvpp2_ingress_disable(port);
  3631. mvpp2_egress_disable(port);
  3632. }
  3633. }
  3634. }
  3635. /* Main RX/TX processing routines */
  3636. /* Display more error info */
  3637. static void mvpp2_rx_error(struct mvpp2_port *port,
  3638. struct mvpp2_rx_desc *rx_desc)
  3639. {
  3640. u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
  3641. size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
  3642. switch (status & MVPP2_RXD_ERR_CODE_MASK) {
  3643. case MVPP2_RXD_ERR_CRC:
  3644. netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
  3645. status, sz);
  3646. break;
  3647. case MVPP2_RXD_ERR_OVERRUN:
  3648. netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
  3649. status, sz);
  3650. break;
  3651. case MVPP2_RXD_ERR_RESOURCE:
  3652. netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
  3653. status, sz);
  3654. break;
  3655. }
  3656. }
  3657. /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
  3658. static int mvpp2_rx_refill(struct mvpp2_port *port,
  3659. struct mvpp2_bm_pool *bm_pool,
  3660. u32 bm, dma_addr_t dma_addr)
  3661. {
  3662. mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
  3663. return 0;
  3664. }
  3665. /* Set hw internals when starting port */
  3666. static void mvpp2_start_dev(struct mvpp2_port *port)
  3667. {
  3668. switch (port->phy_interface) {
  3669. case PHY_INTERFACE_MODE_RGMII:
  3670. case PHY_INTERFACE_MODE_RGMII_ID:
  3671. case PHY_INTERFACE_MODE_SGMII:
  3672. mvpp2_gmac_max_rx_size_set(port);
  3673. default:
  3674. break;
  3675. }
  3676. mvpp2_txp_max_tx_size_set(port);
  3677. if (port->priv->hw_version == MVPP21)
  3678. mvpp2_port_enable(port);
  3679. else
  3680. gop_port_enable(port, 1);
  3681. }
  3682. /* Set hw internals when stopping port */
  3683. static void mvpp2_stop_dev(struct mvpp2_port *port)
  3684. {
  3685. /* Stop new packets from arriving to RXQs */
  3686. mvpp2_ingress_disable(port);
  3687. mvpp2_egress_disable(port);
  3688. if (port->priv->hw_version == MVPP21)
  3689. mvpp2_port_disable(port);
  3690. else
  3691. gop_port_enable(port, 0);
  3692. }
  3693. static void mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
  3694. {
  3695. struct phy_device *phy_dev;
  3696. if (!port->init || port->link == 0) {
  3697. phy_dev = dm_mdio_phy_connect(port->mdio_dev, port->phyaddr,
  3698. dev, port->phy_interface);
  3699. /*
  3700. * If the phy doesn't match with any existing u-boot drivers the
  3701. * phy framework will connect it to generic one which
  3702. * uid == 0xffffffff. In this case act as if the phy wouldn't be
  3703. * declared in dts. Otherwise in case of 3310 (for which the
  3704. * driver doesn't exist) the link will not be correctly
  3705. * detected. Removing phy entry from dts in case of 3310 is not
  3706. * an option because it is required for the phy_fw_down
  3707. * procedure.
  3708. */
  3709. if (phy_dev &&
  3710. phy_dev->drv->uid == 0xffffffff) {/* Generic phy */
  3711. netdev_warn(port->dev,
  3712. "Marking phy as invalid, link will not be checked\n");
  3713. /* set phy_addr to invalid value */
  3714. port->phyaddr = PHY_MAX_ADDR;
  3715. mvpp2_egress_enable(port);
  3716. mvpp2_ingress_enable(port);
  3717. return;
  3718. }
  3719. port->phy_dev = phy_dev;
  3720. if (!phy_dev) {
  3721. netdev_err(port->dev, "cannot connect to phy\n");
  3722. return;
  3723. }
  3724. phy_dev->supported &= PHY_GBIT_FEATURES;
  3725. phy_dev->advertising = phy_dev->supported;
  3726. port->phy_dev = phy_dev;
  3727. port->link = 0;
  3728. port->duplex = 0;
  3729. port->speed = 0;
  3730. phy_config(phy_dev);
  3731. phy_startup(phy_dev);
  3732. if (!phy_dev->link)
  3733. printf("%s: No link\n", phy_dev->dev->name);
  3734. else
  3735. port->init = 1;
  3736. } else {
  3737. mvpp2_egress_enable(port);
  3738. mvpp2_ingress_enable(port);
  3739. }
  3740. }
  3741. static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
  3742. {
  3743. unsigned char mac_bcast[ETH_ALEN] = {
  3744. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  3745. int err;
  3746. err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
  3747. if (err) {
  3748. netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
  3749. return err;
  3750. }
  3751. err = mvpp2_prs_mac_da_accept(port->priv, port->id,
  3752. port->dev_addr, true);
  3753. if (err) {
  3754. netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
  3755. return err;
  3756. }
  3757. err = mvpp2_prs_def_flow(port);
  3758. if (err) {
  3759. netdev_err(dev, "mvpp2_prs_def_flow failed\n");
  3760. return err;
  3761. }
  3762. /* Allocate the Rx/Tx queues */
  3763. err = mvpp2_setup_rxqs(port);
  3764. if (err) {
  3765. netdev_err(port->dev, "cannot allocate Rx queues\n");
  3766. return err;
  3767. }
  3768. err = mvpp2_setup_txqs(port);
  3769. if (err) {
  3770. netdev_err(port->dev, "cannot allocate Tx queues\n");
  3771. return err;
  3772. }
  3773. if (port->phyaddr < PHY_MAX_ADDR) {
  3774. mvpp2_phy_connect(dev, port);
  3775. mvpp2_link_event(port);
  3776. } else {
  3777. mvpp2_egress_enable(port);
  3778. mvpp2_ingress_enable(port);
  3779. }
  3780. mvpp2_start_dev(port);
  3781. return 0;
  3782. }
  3783. /* No Device ops here in U-Boot */
  3784. /* Driver initialization */
  3785. static void mvpp2_port_power_up(struct mvpp2_port *port)
  3786. {
  3787. struct mvpp2 *priv = port->priv;
  3788. /* On PPv2.2 the GoP / interface configuration has already been done */
  3789. if (priv->hw_version == MVPP21)
  3790. mvpp2_port_mii_set(port);
  3791. mvpp2_port_periodic_xon_disable(port);
  3792. if (priv->hw_version == MVPP21)
  3793. mvpp2_port_fc_adv_enable(port);
  3794. mvpp2_port_reset(port);
  3795. }
  3796. /* Initialize port HW */
  3797. static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
  3798. {
  3799. struct mvpp2 *priv = port->priv;
  3800. struct mvpp2_txq_pcpu *txq_pcpu;
  3801. int queue, cpu, err;
  3802. if (port->first_rxq + rxq_number >
  3803. MVPP2_MAX_PORTS * priv->max_port_rxqs)
  3804. return -EINVAL;
  3805. /* Disable port */
  3806. mvpp2_egress_disable(port);
  3807. if (priv->hw_version == MVPP21)
  3808. mvpp2_port_disable(port);
  3809. else
  3810. gop_port_enable(port, 0);
  3811. port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
  3812. GFP_KERNEL);
  3813. if (!port->txqs)
  3814. return -ENOMEM;
  3815. /* Associate physical Tx queues to this port and initialize.
  3816. * The mapping is predefined.
  3817. */
  3818. for (queue = 0; queue < txq_number; queue++) {
  3819. int queue_phy_id = mvpp2_txq_phys(port->id, queue);
  3820. struct mvpp2_tx_queue *txq;
  3821. txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
  3822. if (!txq)
  3823. return -ENOMEM;
  3824. txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
  3825. GFP_KERNEL);
  3826. if (!txq->pcpu)
  3827. return -ENOMEM;
  3828. txq->id = queue_phy_id;
  3829. txq->log_id = queue;
  3830. txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
  3831. for_each_present_cpu(cpu) {
  3832. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3833. txq_pcpu->cpu = cpu;
  3834. }
  3835. port->txqs[queue] = txq;
  3836. }
  3837. port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
  3838. GFP_KERNEL);
  3839. if (!port->rxqs)
  3840. return -ENOMEM;
  3841. /* Allocate and initialize Rx queue for this port */
  3842. for (queue = 0; queue < rxq_number; queue++) {
  3843. struct mvpp2_rx_queue *rxq;
  3844. /* Map physical Rx queue to port's logical Rx queue */
  3845. rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
  3846. if (!rxq)
  3847. return -ENOMEM;
  3848. /* Map this Rx queue to a physical queue */
  3849. rxq->id = port->first_rxq + queue;
  3850. rxq->port = port->id;
  3851. rxq->logic_rxq = queue;
  3852. port->rxqs[queue] = rxq;
  3853. }
  3854. /* Create Rx descriptor rings */
  3855. for (queue = 0; queue < rxq_number; queue++) {
  3856. struct mvpp2_rx_queue *rxq = port->rxqs[queue];
  3857. rxq->size = port->rx_ring_size;
  3858. rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
  3859. rxq->time_coal = MVPP2_RX_COAL_USEC;
  3860. }
  3861. mvpp2_ingress_disable(port);
  3862. /* Port default configuration */
  3863. mvpp2_defaults_set(port);
  3864. /* Port's classifier configuration */
  3865. mvpp2_cls_oversize_rxq_set(port);
  3866. mvpp2_cls_port_config(port);
  3867. /* Provide an initial Rx packet size */
  3868. port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
  3869. /* Initialize pools for swf */
  3870. err = mvpp2_swf_bm_pool_init(port);
  3871. if (err)
  3872. return err;
  3873. return 0;
  3874. }
  3875. static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port)
  3876. {
  3877. int port_node = dev_of_offset(dev);
  3878. const char *phy_mode_str;
  3879. int phy_node;
  3880. u32 id;
  3881. u32 phyaddr = 0;
  3882. int phy_mode = -1;
  3883. int ret;
  3884. phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
  3885. if (phy_node > 0) {
  3886. int parent;
  3887. phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
  3888. if (phyaddr < 0) {
  3889. dev_err(&pdev->dev, "could not find phy address\n");
  3890. return -1;
  3891. }
  3892. parent = fdt_parent_offset(gd->fdt_blob, phy_node);
  3893. ret = uclass_get_device_by_of_offset(UCLASS_MDIO, parent,
  3894. &port->mdio_dev);
  3895. if (ret)
  3896. return ret;
  3897. } else {
  3898. /* phy_addr is set to invalid value */
  3899. phyaddr = PHY_MAX_ADDR;
  3900. }
  3901. phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
  3902. if (phy_mode_str)
  3903. phy_mode = phy_get_interface_by_name(phy_mode_str);
  3904. if (phy_mode == -1) {
  3905. dev_err(&pdev->dev, "incorrect phy mode\n");
  3906. return -EINVAL;
  3907. }
  3908. id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
  3909. if (id == -1) {
  3910. dev_err(&pdev->dev, "missing port-id value\n");
  3911. return -EINVAL;
  3912. }
  3913. #if CONFIG_IS_ENABLED(DM_GPIO)
  3914. gpio_request_by_name(dev, "phy-reset-gpios", 0,
  3915. &port->phy_reset_gpio, GPIOD_IS_OUT);
  3916. gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0,
  3917. &port->phy_tx_disable_gpio, GPIOD_IS_OUT);
  3918. #endif
  3919. /*
  3920. * ToDo:
  3921. * Not sure if this DT property "phy-speed" will get accepted, so
  3922. * this might change later
  3923. */
  3924. /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */
  3925. port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node,
  3926. "phy-speed", 1000);
  3927. port->id = id;
  3928. if (port->priv->hw_version == MVPP21)
  3929. port->first_rxq = port->id * rxq_number;
  3930. else
  3931. port->first_rxq = port->id * port->priv->max_port_rxqs;
  3932. port->phy_interface = phy_mode;
  3933. port->phyaddr = phyaddr;
  3934. return 0;
  3935. }
  3936. #if CONFIG_IS_ENABLED(DM_GPIO)
  3937. /* Port GPIO initialization */
  3938. static void mvpp2_gpio_init(struct mvpp2_port *port)
  3939. {
  3940. if (dm_gpio_is_valid(&port->phy_reset_gpio)) {
  3941. dm_gpio_set_value(&port->phy_reset_gpio, 1);
  3942. mdelay(10);
  3943. dm_gpio_set_value(&port->phy_reset_gpio, 0);
  3944. }
  3945. if (dm_gpio_is_valid(&port->phy_tx_disable_gpio))
  3946. dm_gpio_set_value(&port->phy_tx_disable_gpio, 0);
  3947. }
  3948. #endif
  3949. /* Ports initialization */
  3950. static int mvpp2_port_probe(struct udevice *dev,
  3951. struct mvpp2_port *port,
  3952. int port_node,
  3953. struct mvpp2 *priv)
  3954. {
  3955. int err;
  3956. port->tx_ring_size = MVPP2_MAX_TXD;
  3957. port->rx_ring_size = MVPP2_MAX_RXD;
  3958. err = mvpp2_port_init(dev, port);
  3959. if (err < 0) {
  3960. dev_err(&pdev->dev, "failed to init port %d\n", port->id);
  3961. return err;
  3962. }
  3963. mvpp2_port_power_up(port);
  3964. #if CONFIG_IS_ENABLED(DM_GPIO)
  3965. mvpp2_gpio_init(port);
  3966. #endif
  3967. priv->port_list[port->id] = port;
  3968. priv->num_ports++;
  3969. return 0;
  3970. }
  3971. /* Initialize decoding windows */
  3972. static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
  3973. struct mvpp2 *priv)
  3974. {
  3975. u32 win_enable;
  3976. int i;
  3977. for (i = 0; i < 6; i++) {
  3978. mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
  3979. mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
  3980. if (i < 4)
  3981. mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
  3982. }
  3983. win_enable = 0;
  3984. for (i = 0; i < dram->num_cs; i++) {
  3985. const struct mbus_dram_window *cs = dram->cs + i;
  3986. mvpp2_write(priv, MVPP2_WIN_BASE(i),
  3987. (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
  3988. dram->mbus_dram_target_id);
  3989. mvpp2_write(priv, MVPP2_WIN_SIZE(i),
  3990. (cs->size - 1) & 0xffff0000);
  3991. win_enable |= (1 << i);
  3992. }
  3993. mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
  3994. }
  3995. /* Initialize Rx FIFO's */
  3996. static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
  3997. {
  3998. int port;
  3999. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  4000. if (priv->hw_version == MVPP22) {
  4001. if (port == 0) {
  4002. mvpp2_write(priv,
  4003. MVPP2_RX_DATA_FIFO_SIZE_REG(port),
  4004. MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE);
  4005. mvpp2_write(priv,
  4006. MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
  4007. MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE);
  4008. } else if (port == 1) {
  4009. mvpp2_write(priv,
  4010. MVPP2_RX_DATA_FIFO_SIZE_REG(port),
  4011. MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE);
  4012. mvpp2_write(priv,
  4013. MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
  4014. MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE);
  4015. } else {
  4016. mvpp2_write(priv,
  4017. MVPP2_RX_DATA_FIFO_SIZE_REG(port),
  4018. MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE);
  4019. mvpp2_write(priv,
  4020. MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
  4021. MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE);
  4022. }
  4023. } else {
  4024. mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
  4025. MVPP21_RX_FIFO_PORT_DATA_SIZE);
  4026. mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
  4027. MVPP21_RX_FIFO_PORT_ATTR_SIZE);
  4028. }
  4029. }
  4030. mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
  4031. MVPP2_RX_FIFO_PORT_MIN_PKT);
  4032. mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
  4033. }
  4034. /* Initialize Tx FIFO's */
  4035. static void mvpp2_tx_fifo_init(struct mvpp2 *priv)
  4036. {
  4037. int port, val;
  4038. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  4039. /* Port 0 supports 10KB TX FIFO */
  4040. if (port == 0) {
  4041. val = MVPP2_TX_FIFO_DATA_SIZE_10KB &
  4042. MVPP22_TX_FIFO_SIZE_MASK;
  4043. } else {
  4044. val = MVPP2_TX_FIFO_DATA_SIZE_3KB &
  4045. MVPP22_TX_FIFO_SIZE_MASK;
  4046. }
  4047. mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val);
  4048. }
  4049. }
  4050. static void mvpp2_axi_init(struct mvpp2 *priv)
  4051. {
  4052. u32 val, rdval, wrval;
  4053. mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
  4054. /* AXI Bridge Configuration */
  4055. rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
  4056. << MVPP22_AXI_ATTR_CACHE_OFFS;
  4057. rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
  4058. << MVPP22_AXI_ATTR_DOMAIN_OFFS;
  4059. wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
  4060. << MVPP22_AXI_ATTR_CACHE_OFFS;
  4061. wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
  4062. << MVPP22_AXI_ATTR_DOMAIN_OFFS;
  4063. /* BM */
  4064. mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
  4065. mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
  4066. /* Descriptors */
  4067. mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
  4068. mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
  4069. mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
  4070. mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
  4071. /* Buffer Data */
  4072. mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
  4073. mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
  4074. val = MVPP22_AXI_CODE_CACHE_NON_CACHE
  4075. << MVPP22_AXI_CODE_CACHE_OFFS;
  4076. val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
  4077. << MVPP22_AXI_CODE_DOMAIN_OFFS;
  4078. mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
  4079. mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
  4080. val = MVPP22_AXI_CODE_CACHE_RD_CACHE
  4081. << MVPP22_AXI_CODE_CACHE_OFFS;
  4082. val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
  4083. << MVPP22_AXI_CODE_DOMAIN_OFFS;
  4084. mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
  4085. val = MVPP22_AXI_CODE_CACHE_WR_CACHE
  4086. << MVPP22_AXI_CODE_CACHE_OFFS;
  4087. val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
  4088. << MVPP22_AXI_CODE_DOMAIN_OFFS;
  4089. mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
  4090. }
  4091. /* Initialize network controller common part HW */
  4092. static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
  4093. {
  4094. const struct mbus_dram_target_info *dram_target_info;
  4095. int err, i;
  4096. u32 val;
  4097. /* Checks for hardware constraints (U-Boot uses only one rxq) */
  4098. if ((rxq_number > priv->max_port_rxqs) ||
  4099. (txq_number > MVPP2_MAX_TXQ)) {
  4100. dev_err(&pdev->dev, "invalid queue size parameter\n");
  4101. return -EINVAL;
  4102. }
  4103. if (priv->hw_version == MVPP22)
  4104. mvpp2_axi_init(priv);
  4105. else {
  4106. /* MBUS windows configuration */
  4107. dram_target_info = mvebu_mbus_dram_info();
  4108. if (dram_target_info)
  4109. mvpp2_conf_mbus_windows(dram_target_info, priv);
  4110. }
  4111. if (priv->hw_version == MVPP21) {
  4112. /* Disable HW PHY polling */
  4113. val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
  4114. val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
  4115. writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
  4116. } else {
  4117. /* Enable HW PHY polling */
  4118. val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
  4119. val |= MVPP22_SMI_POLLING_EN;
  4120. writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
  4121. }
  4122. /* Allocate and initialize aggregated TXQs */
  4123. priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
  4124. sizeof(struct mvpp2_tx_queue),
  4125. GFP_KERNEL);
  4126. if (!priv->aggr_txqs)
  4127. return -ENOMEM;
  4128. for_each_present_cpu(i) {
  4129. priv->aggr_txqs[i].id = i;
  4130. priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
  4131. err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
  4132. MVPP2_AGGR_TXQ_SIZE, i, priv);
  4133. if (err < 0)
  4134. return err;
  4135. }
  4136. /* Rx Fifo Init */
  4137. mvpp2_rx_fifo_init(priv);
  4138. /* Tx Fifo Init */
  4139. if (priv->hw_version == MVPP22)
  4140. mvpp2_tx_fifo_init(priv);
  4141. if (priv->hw_version == MVPP21)
  4142. writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
  4143. priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
  4144. /* Allow cache snoop when transmiting packets */
  4145. mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
  4146. /* Buffer Manager initialization */
  4147. err = mvpp2_bm_init(dev, priv);
  4148. if (err < 0)
  4149. return err;
  4150. /* Parser default initialization */
  4151. err = mvpp2_prs_default_init(dev, priv);
  4152. if (err < 0)
  4153. return err;
  4154. /* Classifier default initialization */
  4155. mvpp2_cls_init(priv);
  4156. return 0;
  4157. }
  4158. static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
  4159. {
  4160. struct mvpp2_port *port = dev_get_priv(dev);
  4161. struct mvpp2_rx_desc *rx_desc;
  4162. struct mvpp2_bm_pool *bm_pool;
  4163. dma_addr_t dma_addr;
  4164. u32 bm, rx_status;
  4165. int pool, rx_bytes, err;
  4166. int rx_received;
  4167. struct mvpp2_rx_queue *rxq;
  4168. u8 *data;
  4169. if (port->phyaddr < PHY_MAX_ADDR)
  4170. if (!port->phy_dev->link)
  4171. return 0;
  4172. /* Process RX packets */
  4173. rxq = port->rxqs[0];
  4174. /* Get number of received packets and clamp the to-do */
  4175. rx_received = mvpp2_rxq_received(port, rxq->id);
  4176. /* Return if no packets are received */
  4177. if (!rx_received)
  4178. return 0;
  4179. rx_desc = mvpp2_rxq_next_desc_get(rxq);
  4180. rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
  4181. rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
  4182. rx_bytes -= MVPP2_MH_SIZE;
  4183. dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
  4184. bm = mvpp2_bm_cookie_build(port, rx_desc);
  4185. pool = mvpp2_bm_cookie_pool_get(bm);
  4186. bm_pool = &port->priv->bm_pools[pool];
  4187. /* In case of an error, release the requested buffer pointer
  4188. * to the Buffer Manager. This request process is controlled
  4189. * by the hardware, and the information about the buffer is
  4190. * comprised by the RX descriptor.
  4191. */
  4192. if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
  4193. mvpp2_rx_error(port, rx_desc);
  4194. /* Return the buffer to the pool */
  4195. mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
  4196. return 0;
  4197. }
  4198. err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
  4199. if (err) {
  4200. netdev_err(port->dev, "failed to refill BM pools\n");
  4201. return 0;
  4202. }
  4203. /* Update Rx queue management counters */
  4204. mb();
  4205. mvpp2_rxq_status_update(port, rxq->id, 1, 1);
  4206. /* give packet to stack - skip on first n bytes */
  4207. data = (u8 *)dma_addr + 2 + 32;
  4208. if (rx_bytes <= 0)
  4209. return 0;
  4210. /*
  4211. * No cache invalidation needed here, since the rx_buffer's are
  4212. * located in a uncached memory region
  4213. */
  4214. *packetp = data;
  4215. return rx_bytes;
  4216. }
  4217. static int mvpp2_send(struct udevice *dev, void *packet, int length)
  4218. {
  4219. struct mvpp2_port *port = dev_get_priv(dev);
  4220. struct mvpp2_tx_queue *txq, *aggr_txq;
  4221. struct mvpp2_tx_desc *tx_desc;
  4222. int tx_done;
  4223. int timeout;
  4224. if (port->phyaddr < PHY_MAX_ADDR)
  4225. if (!port->phy_dev->link)
  4226. return 0;
  4227. txq = port->txqs[0];
  4228. aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
  4229. /* Get a descriptor for the first part of the packet */
  4230. tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
  4231. mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
  4232. mvpp2_txdesc_size_set(port, tx_desc, length);
  4233. mvpp2_txdesc_offset_set(port, tx_desc,
  4234. (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
  4235. mvpp2_txdesc_dma_addr_set(port, tx_desc,
  4236. (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
  4237. /* First and Last descriptor */
  4238. mvpp2_txdesc_cmd_set(port, tx_desc,
  4239. MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
  4240. | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
  4241. /* Flush tx data */
  4242. flush_dcache_range((unsigned long)packet,
  4243. (unsigned long)packet + ALIGN(length, PKTALIGN));
  4244. /* Enable transmit */
  4245. mb();
  4246. mvpp2_aggr_txq_pend_desc_add(port, 1);
  4247. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  4248. timeout = 0;
  4249. do {
  4250. if (timeout++ > 10000) {
  4251. printf("timeout: packet not sent from aggregated to phys TXQ\n");
  4252. return 0;
  4253. }
  4254. tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
  4255. } while (tx_done);
  4256. timeout = 0;
  4257. do {
  4258. if (timeout++ > 10000) {
  4259. printf("timeout: packet not sent\n");
  4260. return 0;
  4261. }
  4262. tx_done = mvpp2_txq_sent_desc_proc(port, txq);
  4263. } while (!tx_done);
  4264. return 0;
  4265. }
  4266. static int mvpp2_start(struct udevice *dev)
  4267. {
  4268. struct eth_pdata *pdata = dev_get_platdata(dev);
  4269. struct mvpp2_port *port = dev_get_priv(dev);
  4270. /* Load current MAC address */
  4271. memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
  4272. /* Reconfigure parser accept the original MAC address */
  4273. mvpp2_prs_update_mac_da(port, port->dev_addr);
  4274. switch (port->phy_interface) {
  4275. case PHY_INTERFACE_MODE_RGMII:
  4276. case PHY_INTERFACE_MODE_RGMII_ID:
  4277. case PHY_INTERFACE_MODE_SGMII:
  4278. mvpp2_port_power_up(port);
  4279. default:
  4280. break;
  4281. }
  4282. mvpp2_open(dev, port);
  4283. return 0;
  4284. }
  4285. static void mvpp2_stop(struct udevice *dev)
  4286. {
  4287. struct mvpp2_port *port = dev_get_priv(dev);
  4288. mvpp2_stop_dev(port);
  4289. mvpp2_cleanup_rxqs(port);
  4290. mvpp2_cleanup_txqs(port);
  4291. }
  4292. static int mvpp2_write_hwaddr(struct udevice *dev)
  4293. {
  4294. struct mvpp2_port *port = dev_get_priv(dev);
  4295. return mvpp2_prs_update_mac_da(port, port->dev_addr);
  4296. }
  4297. static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port)
  4298. {
  4299. writel(port->phyaddr, port->priv->iface_base +
  4300. MVPP22_SMI_PHY_ADDR_REG(port->gop_id));
  4301. return 0;
  4302. }
  4303. static int mvpp2_base_probe(struct udevice *dev)
  4304. {
  4305. struct mvpp2 *priv = dev_get_priv(dev);
  4306. void *bd_space;
  4307. u32 size = 0;
  4308. int i;
  4309. /* Save hw-version */
  4310. priv->hw_version = dev_get_driver_data(dev);
  4311. /*
  4312. * U-Boot special buffer handling:
  4313. *
  4314. * Allocate buffer area for descs and rx_buffers. This is only
  4315. * done once for all interfaces. As only one interface can
  4316. * be active. Make this area DMA-safe by disabling the D-cache
  4317. */
  4318. /* Align buffer area for descs and rx_buffers to 1MiB */
  4319. bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
  4320. mmu_set_region_dcache_behaviour((unsigned long)bd_space,
  4321. BD_SPACE, DCACHE_OFF);
  4322. buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
  4323. size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
  4324. buffer_loc.tx_descs =
  4325. (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
  4326. size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
  4327. buffer_loc.rx_descs =
  4328. (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
  4329. size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
  4330. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  4331. buffer_loc.bm_pool[i] =
  4332. (unsigned long *)((unsigned long)bd_space + size);
  4333. if (priv->hw_version == MVPP21)
  4334. size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
  4335. else
  4336. size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
  4337. }
  4338. for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
  4339. buffer_loc.rx_buffer[i] =
  4340. (unsigned long *)((unsigned long)bd_space + size);
  4341. size += RX_BUFFER_SIZE;
  4342. }
  4343. /* Clear the complete area so that all descriptors are cleared */
  4344. memset(bd_space, 0, size);
  4345. /* Save base addresses for later use */
  4346. priv->base = (void *)devfdt_get_addr_index(dev, 0);
  4347. if (IS_ERR(priv->base))
  4348. return PTR_ERR(priv->base);
  4349. if (priv->hw_version == MVPP21) {
  4350. priv->lms_base = (void *)devfdt_get_addr_index(dev, 1);
  4351. if (IS_ERR(priv->lms_base))
  4352. return PTR_ERR(priv->lms_base);
  4353. } else {
  4354. priv->iface_base = (void *)devfdt_get_addr_index(dev, 1);
  4355. if (IS_ERR(priv->iface_base))
  4356. return PTR_ERR(priv->iface_base);
  4357. /* Store common base addresses for all ports */
  4358. priv->mpcs_base = priv->iface_base + MVPP22_MPCS;
  4359. priv->xpcs_base = priv->iface_base + MVPP22_XPCS;
  4360. priv->rfu1_base = priv->iface_base + MVPP22_RFU1;
  4361. }
  4362. if (priv->hw_version == MVPP21)
  4363. priv->max_port_rxqs = 8;
  4364. else
  4365. priv->max_port_rxqs = 32;
  4366. return 0;
  4367. }
  4368. static int mvpp2_probe(struct udevice *dev)
  4369. {
  4370. struct mvpp2_port *port = dev_get_priv(dev);
  4371. struct mvpp2 *priv = dev_get_priv(dev->parent);
  4372. int err;
  4373. /* Only call the probe function for the parent once */
  4374. if (!priv->probe_done)
  4375. err = mvpp2_base_probe(dev->parent);
  4376. port->priv = priv;
  4377. err = phy_info_parse(dev, port);
  4378. if (err)
  4379. return err;
  4380. /*
  4381. * We need the port specific io base addresses at this stage, since
  4382. * gop_port_init() accesses these registers
  4383. */
  4384. if (priv->hw_version == MVPP21) {
  4385. int priv_common_regs_num = 2;
  4386. port->base = (void __iomem *)devfdt_get_addr_index(
  4387. dev->parent, priv_common_regs_num + port->id);
  4388. if (IS_ERR(port->base))
  4389. return PTR_ERR(port->base);
  4390. } else {
  4391. port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
  4392. "gop-port-id", -1);
  4393. if (port->id == -1) {
  4394. dev_err(&pdev->dev, "missing gop-port-id value\n");
  4395. return -EINVAL;
  4396. }
  4397. port->base = priv->iface_base + MVPP22_PORT_BASE +
  4398. port->gop_id * MVPP22_PORT_OFFSET;
  4399. /* Set phy address of the port */
  4400. if (port->phyaddr < PHY_MAX_ADDR)
  4401. mvpp22_smi_phy_addr_cfg(port);
  4402. /* GoP Init */
  4403. gop_port_init(port);
  4404. }
  4405. if (!priv->probe_done) {
  4406. /* Initialize network controller */
  4407. err = mvpp2_init(dev, priv);
  4408. if (err < 0) {
  4409. dev_err(&pdev->dev, "failed to initialize controller\n");
  4410. return err;
  4411. }
  4412. priv->num_ports = 0;
  4413. priv->probe_done = 1;
  4414. }
  4415. err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
  4416. if (err)
  4417. return err;
  4418. if (priv->hw_version == MVPP22) {
  4419. priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id,
  4420. port->phy_interface);
  4421. /* Netcomplex configurations for all ports */
  4422. gop_netc_init(priv, MV_NETC_FIRST_PHASE);
  4423. gop_netc_init(priv, MV_NETC_SECOND_PHASE);
  4424. }
  4425. return 0;
  4426. }
  4427. /*
  4428. * Empty BM pool and stop its activity before the OS is started
  4429. */
  4430. static int mvpp2_remove(struct udevice *dev)
  4431. {
  4432. struct mvpp2_port *port = dev_get_priv(dev);
  4433. struct mvpp2 *priv = port->priv;
  4434. int i;
  4435. priv->num_ports--;
  4436. if (priv->num_ports)
  4437. return 0;
  4438. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++)
  4439. mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
  4440. return 0;
  4441. }
  4442. static const struct eth_ops mvpp2_ops = {
  4443. .start = mvpp2_start,
  4444. .send = mvpp2_send,
  4445. .recv = mvpp2_recv,
  4446. .stop = mvpp2_stop,
  4447. .write_hwaddr = mvpp2_write_hwaddr
  4448. };
  4449. static struct driver mvpp2_driver = {
  4450. .name = "mvpp2",
  4451. .id = UCLASS_ETH,
  4452. .probe = mvpp2_probe,
  4453. .remove = mvpp2_remove,
  4454. .ops = &mvpp2_ops,
  4455. .priv_auto_alloc_size = sizeof(struct mvpp2_port),
  4456. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  4457. .flags = DM_FLAG_ACTIVE_DMA,
  4458. };
  4459. /*
  4460. * Use a MISC device to bind the n instances (child nodes) of the
  4461. * network base controller in UCLASS_ETH.
  4462. */
  4463. static int mvpp2_base_bind(struct udevice *parent)
  4464. {
  4465. const void *blob = gd->fdt_blob;
  4466. int node = dev_of_offset(parent);
  4467. struct uclass_driver *drv;
  4468. struct udevice *dev;
  4469. struct eth_pdata *plat;
  4470. char *name;
  4471. int subnode;
  4472. u32 id;
  4473. int base_id_add;
  4474. /* Lookup eth driver */
  4475. drv = lists_uclass_lookup(UCLASS_ETH);
  4476. if (!drv) {
  4477. puts("Cannot find eth driver\n");
  4478. return -ENOENT;
  4479. }
  4480. base_id_add = base_id;
  4481. fdt_for_each_subnode(subnode, blob, node) {
  4482. /* Increment base_id for all subnodes, also the disabled ones */
  4483. base_id++;
  4484. /* Skip disabled ports */
  4485. if (!fdtdec_get_is_enabled(blob, subnode))
  4486. continue;
  4487. plat = calloc(1, sizeof(*plat));
  4488. if (!plat)
  4489. return -ENOMEM;
  4490. id = fdtdec_get_int(blob, subnode, "port-id", -1);
  4491. id += base_id_add;
  4492. name = calloc(1, 16);
  4493. if (!name) {
  4494. free(plat);
  4495. return -ENOMEM;
  4496. }
  4497. sprintf(name, "mvpp2-%d", id);
  4498. /* Create child device UCLASS_ETH and bind it */
  4499. device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
  4500. dev_set_of_offset(dev, subnode);
  4501. }
  4502. return 0;
  4503. }
  4504. static const struct udevice_id mvpp2_ids[] = {
  4505. {
  4506. .compatible = "marvell,armada-375-pp2",
  4507. .data = MVPP21,
  4508. },
  4509. {
  4510. .compatible = "marvell,armada-7k-pp22",
  4511. .data = MVPP22,
  4512. },
  4513. { }
  4514. };
  4515. U_BOOT_DRIVER(mvpp2_base) = {
  4516. .name = "mvpp2_base",
  4517. .id = UCLASS_MISC,
  4518. .of_match = mvpp2_ids,
  4519. .bind = mvpp2_base_bind,
  4520. .priv_auto_alloc_size = sizeof(struct mvpp2),
  4521. };