filter.c 279 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Linux Socket Filter - Kernel level socket filtering
  4. *
  5. * Based on the design of the Berkeley Packet Filter. The new
  6. * internal format has been designed by PLUMgrid:
  7. *
  8. * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
  9. *
  10. * Authors:
  11. *
  12. * Jay Schulist <jschlst@samba.org>
  13. * Alexei Starovoitov <ast@plumgrid.com>
  14. * Daniel Borkmann <dborkman@redhat.com>
  15. *
  16. * Andi Kleen - Fix a few bad bugs and races.
  17. * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18. */
  19. #include <linux/module.h>
  20. #include <linux/types.h>
  21. #include <linux/mm.h>
  22. #include <linux/fcntl.h>
  23. #include <linux/socket.h>
  24. #include <linux/sock_diag.h>
  25. #include <linux/in.h>
  26. #include <linux/inet.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/if_packet.h>
  29. #include <linux/if_arp.h>
  30. #include <linux/gfp.h>
  31. #include <net/inet_common.h>
  32. #include <net/ip.h>
  33. #include <net/protocol.h>
  34. #include <net/netlink.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/skmsg.h>
  37. #include <net/sock.h>
  38. #include <net/flow_dissector.h>
  39. #include <linux/errno.h>
  40. #include <linux/timer.h>
  41. #include <linux/uaccess.h>
  42. #include <asm/unaligned.h>
  43. #include <asm/cmpxchg.h>
  44. #include <linux/filter.h>
  45. #include <linux/ratelimit.h>
  46. #include <linux/seccomp.h>
  47. #include <linux/if_vlan.h>
  48. #include <linux/bpf.h>
  49. #include <linux/btf.h>
  50. #include <net/sch_generic.h>
  51. #include <net/cls_cgroup.h>
  52. #include <net/dst_metadata.h>
  53. #include <net/dst.h>
  54. #include <net/sock_reuseport.h>
  55. #include <net/busy_poll.h>
  56. #include <net/tcp.h>
  57. #include <net/xfrm.h>
  58. #include <net/udp.h>
  59. #include <linux/bpf_trace.h>
  60. #include <net/xdp_sock.h>
  61. #include <linux/inetdevice.h>
  62. #include <net/inet_hashtables.h>
  63. #include <net/inet6_hashtables.h>
  64. #include <net/ip_fib.h>
  65. #include <net/nexthop.h>
  66. #include <net/flow.h>
  67. #include <net/arp.h>
  68. #include <net/ipv6.h>
  69. #include <net/net_namespace.h>
  70. #include <linux/seg6_local.h>
  71. #include <net/seg6.h>
  72. #include <net/seg6_local.h>
  73. #include <net/lwtunnel.h>
  74. #include <net/ipv6_stubs.h>
  75. #include <net/bpf_sk_storage.h>
  76. #include <net/transp_v6.h>
  77. #include <linux/btf_ids.h>
  78. #include <net/tls.h>
  79. static const struct bpf_func_proto *
  80. bpf_sk_base_func_proto(enum bpf_func_id func_id);
  81. int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
  82. {
  83. if (in_compat_syscall()) {
  84. struct compat_sock_fprog f32;
  85. if (len != sizeof(f32))
  86. return -EINVAL;
  87. if (copy_from_sockptr(&f32, src, sizeof(f32)))
  88. return -EFAULT;
  89. memset(dst, 0, sizeof(*dst));
  90. dst->len = f32.len;
  91. dst->filter = compat_ptr(f32.filter);
  92. } else {
  93. if (len != sizeof(*dst))
  94. return -EINVAL;
  95. if (copy_from_sockptr(dst, src, sizeof(*dst)))
  96. return -EFAULT;
  97. }
  98. return 0;
  99. }
  100. EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
  101. /**
  102. * sk_filter_trim_cap - run a packet through a socket filter
  103. * @sk: sock associated with &sk_buff
  104. * @skb: buffer to filter
  105. * @cap: limit on how short the eBPF program may trim the packet
  106. *
  107. * Run the eBPF program and then cut skb->data to correct size returned by
  108. * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
  109. * than pkt_len we keep whole skb->data. This is the socket level
  110. * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
  111. * be accepted or -EPERM if the packet should be tossed.
  112. *
  113. */
  114. int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
  115. {
  116. int err;
  117. struct sk_filter *filter;
  118. /*
  119. * If the skb was allocated from pfmemalloc reserves, only
  120. * allow SOCK_MEMALLOC sockets to use it as this socket is
  121. * helping free memory
  122. */
  123. if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
  124. NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
  125. return -ENOMEM;
  126. }
  127. err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
  128. if (err)
  129. return err;
  130. err = security_sock_rcv_skb(sk, skb);
  131. if (err)
  132. return err;
  133. rcu_read_lock();
  134. filter = rcu_dereference(sk->sk_filter);
  135. if (filter) {
  136. struct sock *save_sk = skb->sk;
  137. unsigned int pkt_len;
  138. skb->sk = sk;
  139. pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
  140. skb->sk = save_sk;
  141. err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
  142. }
  143. rcu_read_unlock();
  144. return err;
  145. }
  146. EXPORT_SYMBOL(sk_filter_trim_cap);
  147. BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
  148. {
  149. return skb_get_poff(skb);
  150. }
  151. BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
  152. {
  153. struct nlattr *nla;
  154. if (skb_is_nonlinear(skb))
  155. return 0;
  156. if (skb->len < sizeof(struct nlattr))
  157. return 0;
  158. if (a > skb->len - sizeof(struct nlattr))
  159. return 0;
  160. nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
  161. if (nla)
  162. return (void *) nla - (void *) skb->data;
  163. return 0;
  164. }
  165. BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
  166. {
  167. struct nlattr *nla;
  168. if (skb_is_nonlinear(skb))
  169. return 0;
  170. if (skb->len < sizeof(struct nlattr))
  171. return 0;
  172. if (a > skb->len - sizeof(struct nlattr))
  173. return 0;
  174. nla = (struct nlattr *) &skb->data[a];
  175. if (nla->nla_len > skb->len - a)
  176. return 0;
  177. nla = nla_find_nested(nla, x);
  178. if (nla)
  179. return (void *) nla - (void *) skb->data;
  180. return 0;
  181. }
  182. BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
  183. data, int, headlen, int, offset)
  184. {
  185. u8 tmp, *ptr;
  186. const int len = sizeof(tmp);
  187. if (offset >= 0) {
  188. if (headlen - offset >= len)
  189. return *(u8 *)(data + offset);
  190. if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
  191. return tmp;
  192. } else {
  193. ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
  194. if (likely(ptr))
  195. return *(u8 *)ptr;
  196. }
  197. return -EFAULT;
  198. }
  199. BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
  200. int, offset)
  201. {
  202. return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
  203. offset);
  204. }
  205. BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
  206. data, int, headlen, int, offset)
  207. {
  208. u16 tmp, *ptr;
  209. const int len = sizeof(tmp);
  210. if (offset >= 0) {
  211. if (headlen - offset >= len)
  212. return get_unaligned_be16(data + offset);
  213. if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
  214. return be16_to_cpu(tmp);
  215. } else {
  216. ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
  217. if (likely(ptr))
  218. return get_unaligned_be16(ptr);
  219. }
  220. return -EFAULT;
  221. }
  222. BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
  223. int, offset)
  224. {
  225. return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
  226. offset);
  227. }
  228. BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
  229. data, int, headlen, int, offset)
  230. {
  231. u32 tmp, *ptr;
  232. const int len = sizeof(tmp);
  233. if (likely(offset >= 0)) {
  234. if (headlen - offset >= len)
  235. return get_unaligned_be32(data + offset);
  236. if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
  237. return be32_to_cpu(tmp);
  238. } else {
  239. ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
  240. if (likely(ptr))
  241. return get_unaligned_be32(ptr);
  242. }
  243. return -EFAULT;
  244. }
  245. BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
  246. int, offset)
  247. {
  248. return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
  249. offset);
  250. }
  251. static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
  252. struct bpf_insn *insn_buf)
  253. {
  254. struct bpf_insn *insn = insn_buf;
  255. switch (skb_field) {
  256. case SKF_AD_MARK:
  257. BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
  258. *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
  259. offsetof(struct sk_buff, mark));
  260. break;
  261. case SKF_AD_PKTTYPE:
  262. *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
  263. *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
  264. #ifdef __BIG_ENDIAN_BITFIELD
  265. *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
  266. #endif
  267. break;
  268. case SKF_AD_QUEUE:
  269. BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
  270. *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
  271. offsetof(struct sk_buff, queue_mapping));
  272. break;
  273. case SKF_AD_VLAN_TAG:
  274. BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
  275. /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
  276. *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
  277. offsetof(struct sk_buff, vlan_tci));
  278. break;
  279. case SKF_AD_VLAN_TAG_PRESENT:
  280. *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
  281. if (PKT_VLAN_PRESENT_BIT)
  282. *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
  283. if (PKT_VLAN_PRESENT_BIT < 7)
  284. *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
  285. break;
  286. }
  287. return insn - insn_buf;
  288. }
  289. static bool convert_bpf_extensions(struct sock_filter *fp,
  290. struct bpf_insn **insnp)
  291. {
  292. struct bpf_insn *insn = *insnp;
  293. u32 cnt;
  294. switch (fp->k) {
  295. case SKF_AD_OFF + SKF_AD_PROTOCOL:
  296. BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
  297. /* A = *(u16 *) (CTX + offsetof(protocol)) */
  298. *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
  299. offsetof(struct sk_buff, protocol));
  300. /* A = ntohs(A) [emitting a nop or swap16] */
  301. *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
  302. break;
  303. case SKF_AD_OFF + SKF_AD_PKTTYPE:
  304. cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
  305. insn += cnt - 1;
  306. break;
  307. case SKF_AD_OFF + SKF_AD_IFINDEX:
  308. case SKF_AD_OFF + SKF_AD_HATYPE:
  309. BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
  310. BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
  311. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
  312. BPF_REG_TMP, BPF_REG_CTX,
  313. offsetof(struct sk_buff, dev));
  314. /* if (tmp != 0) goto pc + 1 */
  315. *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
  316. *insn++ = BPF_EXIT_INSN();
  317. if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
  318. *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
  319. offsetof(struct net_device, ifindex));
  320. else
  321. *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
  322. offsetof(struct net_device, type));
  323. break;
  324. case SKF_AD_OFF + SKF_AD_MARK:
  325. cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
  326. insn += cnt - 1;
  327. break;
  328. case SKF_AD_OFF + SKF_AD_RXHASH:
  329. BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
  330. *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
  331. offsetof(struct sk_buff, hash));
  332. break;
  333. case SKF_AD_OFF + SKF_AD_QUEUE:
  334. cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
  335. insn += cnt - 1;
  336. break;
  337. case SKF_AD_OFF + SKF_AD_VLAN_TAG:
  338. cnt = convert_skb_access(SKF_AD_VLAN_TAG,
  339. BPF_REG_A, BPF_REG_CTX, insn);
  340. insn += cnt - 1;
  341. break;
  342. case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
  343. cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
  344. BPF_REG_A, BPF_REG_CTX, insn);
  345. insn += cnt - 1;
  346. break;
  347. case SKF_AD_OFF + SKF_AD_VLAN_TPID:
  348. BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
  349. /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
  350. *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
  351. offsetof(struct sk_buff, vlan_proto));
  352. /* A = ntohs(A) [emitting a nop or swap16] */
  353. *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
  354. break;
  355. case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
  356. case SKF_AD_OFF + SKF_AD_NLATTR:
  357. case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
  358. case SKF_AD_OFF + SKF_AD_CPU:
  359. case SKF_AD_OFF + SKF_AD_RANDOM:
  360. /* arg1 = CTX */
  361. *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
  362. /* arg2 = A */
  363. *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
  364. /* arg3 = X */
  365. *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
  366. /* Emit call(arg1=CTX, arg2=A, arg3=X) */
  367. switch (fp->k) {
  368. case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
  369. *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
  370. break;
  371. case SKF_AD_OFF + SKF_AD_NLATTR:
  372. *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
  373. break;
  374. case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
  375. *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
  376. break;
  377. case SKF_AD_OFF + SKF_AD_CPU:
  378. *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
  379. break;
  380. case SKF_AD_OFF + SKF_AD_RANDOM:
  381. *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
  382. bpf_user_rnd_init_once();
  383. break;
  384. }
  385. break;
  386. case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
  387. /* A ^= X */
  388. *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
  389. break;
  390. default:
  391. /* This is just a dummy call to avoid letting the compiler
  392. * evict __bpf_call_base() as an optimization. Placed here
  393. * where no-one bothers.
  394. */
  395. BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
  396. return false;
  397. }
  398. *insnp = insn;
  399. return true;
  400. }
  401. static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
  402. {
  403. const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
  404. int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
  405. bool endian = BPF_SIZE(fp->code) == BPF_H ||
  406. BPF_SIZE(fp->code) == BPF_W;
  407. bool indirect = BPF_MODE(fp->code) == BPF_IND;
  408. const int ip_align = NET_IP_ALIGN;
  409. struct bpf_insn *insn = *insnp;
  410. int offset = fp->k;
  411. if (!indirect &&
  412. ((unaligned_ok && offset >= 0) ||
  413. (!unaligned_ok && offset >= 0 &&
  414. offset + ip_align >= 0 &&
  415. offset + ip_align % size == 0))) {
  416. bool ldx_off_ok = offset <= S16_MAX;
  417. *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
  418. if (offset)
  419. *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
  420. *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
  421. size, 2 + endian + (!ldx_off_ok * 2));
  422. if (ldx_off_ok) {
  423. *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
  424. BPF_REG_D, offset);
  425. } else {
  426. *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
  427. *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
  428. *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
  429. BPF_REG_TMP, 0);
  430. }
  431. if (endian)
  432. *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
  433. *insn++ = BPF_JMP_A(8);
  434. }
  435. *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
  436. *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
  437. *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
  438. if (!indirect) {
  439. *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
  440. } else {
  441. *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
  442. if (fp->k)
  443. *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
  444. }
  445. switch (BPF_SIZE(fp->code)) {
  446. case BPF_B:
  447. *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
  448. break;
  449. case BPF_H:
  450. *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
  451. break;
  452. case BPF_W:
  453. *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
  454. break;
  455. default:
  456. return false;
  457. }
  458. *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
  459. *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
  460. *insn = BPF_EXIT_INSN();
  461. *insnp = insn;
  462. return true;
  463. }
  464. /**
  465. * bpf_convert_filter - convert filter program
  466. * @prog: the user passed filter program
  467. * @len: the length of the user passed filter program
  468. * @new_prog: allocated 'struct bpf_prog' or NULL
  469. * @new_len: pointer to store length of converted program
  470. * @seen_ld_abs: bool whether we've seen ld_abs/ind
  471. *
  472. * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
  473. * style extended BPF (eBPF).
  474. * Conversion workflow:
  475. *
  476. * 1) First pass for calculating the new program length:
  477. * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
  478. *
  479. * 2) 2nd pass to remap in two passes: 1st pass finds new
  480. * jump offsets, 2nd pass remapping:
  481. * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
  482. */
  483. static int bpf_convert_filter(struct sock_filter *prog, int len,
  484. struct bpf_prog *new_prog, int *new_len,
  485. bool *seen_ld_abs)
  486. {
  487. int new_flen = 0, pass = 0, target, i, stack_off;
  488. struct bpf_insn *new_insn, *first_insn = NULL;
  489. struct sock_filter *fp;
  490. int *addrs = NULL;
  491. u8 bpf_src;
  492. BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
  493. BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
  494. if (len <= 0 || len > BPF_MAXINSNS)
  495. return -EINVAL;
  496. if (new_prog) {
  497. first_insn = new_prog->insnsi;
  498. addrs = kcalloc(len, sizeof(*addrs),
  499. GFP_KERNEL | __GFP_NOWARN);
  500. if (!addrs)
  501. return -ENOMEM;
  502. }
  503. do_pass:
  504. new_insn = first_insn;
  505. fp = prog;
  506. /* Classic BPF related prologue emission. */
  507. if (new_prog) {
  508. /* Classic BPF expects A and X to be reset first. These need
  509. * to be guaranteed to be the first two instructions.
  510. */
  511. *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
  512. *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
  513. /* All programs must keep CTX in callee saved BPF_REG_CTX.
  514. * In eBPF case it's done by the compiler, here we need to
  515. * do this ourself. Initial CTX is present in BPF_REG_ARG1.
  516. */
  517. *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
  518. if (*seen_ld_abs) {
  519. /* For packet access in classic BPF, cache skb->data
  520. * in callee-saved BPF R8 and skb->len - skb->data_len
  521. * (headlen) in BPF R9. Since classic BPF is read-only
  522. * on CTX, we only need to cache it once.
  523. */
  524. *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
  525. BPF_REG_D, BPF_REG_CTX,
  526. offsetof(struct sk_buff, data));
  527. *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
  528. offsetof(struct sk_buff, len));
  529. *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
  530. offsetof(struct sk_buff, data_len));
  531. *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
  532. }
  533. } else {
  534. new_insn += 3;
  535. }
  536. for (i = 0; i < len; fp++, i++) {
  537. struct bpf_insn tmp_insns[32] = { };
  538. struct bpf_insn *insn = tmp_insns;
  539. if (addrs)
  540. addrs[i] = new_insn - first_insn;
  541. switch (fp->code) {
  542. /* All arithmetic insns and skb loads map as-is. */
  543. case BPF_ALU | BPF_ADD | BPF_X:
  544. case BPF_ALU | BPF_ADD | BPF_K:
  545. case BPF_ALU | BPF_SUB | BPF_X:
  546. case BPF_ALU | BPF_SUB | BPF_K:
  547. case BPF_ALU | BPF_AND | BPF_X:
  548. case BPF_ALU | BPF_AND | BPF_K:
  549. case BPF_ALU | BPF_OR | BPF_X:
  550. case BPF_ALU | BPF_OR | BPF_K:
  551. case BPF_ALU | BPF_LSH | BPF_X:
  552. case BPF_ALU | BPF_LSH | BPF_K:
  553. case BPF_ALU | BPF_RSH | BPF_X:
  554. case BPF_ALU | BPF_RSH | BPF_K:
  555. case BPF_ALU | BPF_XOR | BPF_X:
  556. case BPF_ALU | BPF_XOR | BPF_K:
  557. case BPF_ALU | BPF_MUL | BPF_X:
  558. case BPF_ALU | BPF_MUL | BPF_K:
  559. case BPF_ALU | BPF_DIV | BPF_X:
  560. case BPF_ALU | BPF_DIV | BPF_K:
  561. case BPF_ALU | BPF_MOD | BPF_X:
  562. case BPF_ALU | BPF_MOD | BPF_K:
  563. case BPF_ALU | BPF_NEG:
  564. case BPF_LD | BPF_ABS | BPF_W:
  565. case BPF_LD | BPF_ABS | BPF_H:
  566. case BPF_LD | BPF_ABS | BPF_B:
  567. case BPF_LD | BPF_IND | BPF_W:
  568. case BPF_LD | BPF_IND | BPF_H:
  569. case BPF_LD | BPF_IND | BPF_B:
  570. /* Check for overloaded BPF extension and
  571. * directly convert it if found, otherwise
  572. * just move on with mapping.
  573. */
  574. if (BPF_CLASS(fp->code) == BPF_LD &&
  575. BPF_MODE(fp->code) == BPF_ABS &&
  576. convert_bpf_extensions(fp, &insn))
  577. break;
  578. if (BPF_CLASS(fp->code) == BPF_LD &&
  579. convert_bpf_ld_abs(fp, &insn)) {
  580. *seen_ld_abs = true;
  581. break;
  582. }
  583. if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
  584. fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
  585. *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
  586. /* Error with exception code on div/mod by 0.
  587. * For cBPF programs, this was always return 0.
  588. */
  589. *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
  590. *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
  591. *insn++ = BPF_EXIT_INSN();
  592. }
  593. *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
  594. break;
  595. /* Jump transformation cannot use BPF block macros
  596. * everywhere as offset calculation and target updates
  597. * require a bit more work than the rest, i.e. jump
  598. * opcodes map as-is, but offsets need adjustment.
  599. */
  600. #define BPF_EMIT_JMP \
  601. do { \
  602. const s32 off_min = S16_MIN, off_max = S16_MAX; \
  603. s32 off; \
  604. \
  605. if (target >= len || target < 0) \
  606. goto err; \
  607. off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
  608. /* Adjust pc relative offset for 2nd or 3rd insn. */ \
  609. off -= insn - tmp_insns; \
  610. /* Reject anything not fitting into insn->off. */ \
  611. if (off < off_min || off > off_max) \
  612. goto err; \
  613. insn->off = off; \
  614. } while (0)
  615. case BPF_JMP | BPF_JA:
  616. target = i + fp->k + 1;
  617. insn->code = fp->code;
  618. BPF_EMIT_JMP;
  619. break;
  620. case BPF_JMP | BPF_JEQ | BPF_K:
  621. case BPF_JMP | BPF_JEQ | BPF_X:
  622. case BPF_JMP | BPF_JSET | BPF_K:
  623. case BPF_JMP | BPF_JSET | BPF_X:
  624. case BPF_JMP | BPF_JGT | BPF_K:
  625. case BPF_JMP | BPF_JGT | BPF_X:
  626. case BPF_JMP | BPF_JGE | BPF_K:
  627. case BPF_JMP | BPF_JGE | BPF_X:
  628. if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
  629. /* BPF immediates are signed, zero extend
  630. * immediate into tmp register and use it
  631. * in compare insn.
  632. */
  633. *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
  634. insn->dst_reg = BPF_REG_A;
  635. insn->src_reg = BPF_REG_TMP;
  636. bpf_src = BPF_X;
  637. } else {
  638. insn->dst_reg = BPF_REG_A;
  639. insn->imm = fp->k;
  640. bpf_src = BPF_SRC(fp->code);
  641. insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
  642. }
  643. /* Common case where 'jump_false' is next insn. */
  644. if (fp->jf == 0) {
  645. insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
  646. target = i + fp->jt + 1;
  647. BPF_EMIT_JMP;
  648. break;
  649. }
  650. /* Convert some jumps when 'jump_true' is next insn. */
  651. if (fp->jt == 0) {
  652. switch (BPF_OP(fp->code)) {
  653. case BPF_JEQ:
  654. insn->code = BPF_JMP | BPF_JNE | bpf_src;
  655. break;
  656. case BPF_JGT:
  657. insn->code = BPF_JMP | BPF_JLE | bpf_src;
  658. break;
  659. case BPF_JGE:
  660. insn->code = BPF_JMP | BPF_JLT | bpf_src;
  661. break;
  662. default:
  663. goto jmp_rest;
  664. }
  665. target = i + fp->jf + 1;
  666. BPF_EMIT_JMP;
  667. break;
  668. }
  669. jmp_rest:
  670. /* Other jumps are mapped into two insns: Jxx and JA. */
  671. target = i + fp->jt + 1;
  672. insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
  673. BPF_EMIT_JMP;
  674. insn++;
  675. insn->code = BPF_JMP | BPF_JA;
  676. target = i + fp->jf + 1;
  677. BPF_EMIT_JMP;
  678. break;
  679. /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
  680. case BPF_LDX | BPF_MSH | BPF_B: {
  681. struct sock_filter tmp = {
  682. .code = BPF_LD | BPF_ABS | BPF_B,
  683. .k = fp->k,
  684. };
  685. *seen_ld_abs = true;
  686. /* X = A */
  687. *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
  688. /* A = BPF_R0 = *(u8 *) (skb->data + K) */
  689. convert_bpf_ld_abs(&tmp, &insn);
  690. insn++;
  691. /* A &= 0xf */
  692. *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
  693. /* A <<= 2 */
  694. *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
  695. /* tmp = X */
  696. *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
  697. /* X = A */
  698. *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
  699. /* A = tmp */
  700. *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
  701. break;
  702. }
  703. /* RET_K is remaped into 2 insns. RET_A case doesn't need an
  704. * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
  705. */
  706. case BPF_RET | BPF_A:
  707. case BPF_RET | BPF_K:
  708. if (BPF_RVAL(fp->code) == BPF_K)
  709. *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
  710. 0, fp->k);
  711. *insn = BPF_EXIT_INSN();
  712. break;
  713. /* Store to stack. */
  714. case BPF_ST:
  715. case BPF_STX:
  716. stack_off = fp->k * 4 + 4;
  717. *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
  718. BPF_ST ? BPF_REG_A : BPF_REG_X,
  719. -stack_off);
  720. /* check_load_and_stores() verifies that classic BPF can
  721. * load from stack only after write, so tracking
  722. * stack_depth for ST|STX insns is enough
  723. */
  724. if (new_prog && new_prog->aux->stack_depth < stack_off)
  725. new_prog->aux->stack_depth = stack_off;
  726. break;
  727. /* Load from stack. */
  728. case BPF_LD | BPF_MEM:
  729. case BPF_LDX | BPF_MEM:
  730. stack_off = fp->k * 4 + 4;
  731. *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
  732. BPF_REG_A : BPF_REG_X, BPF_REG_FP,
  733. -stack_off);
  734. break;
  735. /* A = K or X = K */
  736. case BPF_LD | BPF_IMM:
  737. case BPF_LDX | BPF_IMM:
  738. *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
  739. BPF_REG_A : BPF_REG_X, fp->k);
  740. break;
  741. /* X = A */
  742. case BPF_MISC | BPF_TAX:
  743. *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
  744. break;
  745. /* A = X */
  746. case BPF_MISC | BPF_TXA:
  747. *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
  748. break;
  749. /* A = skb->len or X = skb->len */
  750. case BPF_LD | BPF_W | BPF_LEN:
  751. case BPF_LDX | BPF_W | BPF_LEN:
  752. *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
  753. BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
  754. offsetof(struct sk_buff, len));
  755. break;
  756. /* Access seccomp_data fields. */
  757. case BPF_LDX | BPF_ABS | BPF_W:
  758. /* A = *(u32 *) (ctx + K) */
  759. *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
  760. break;
  761. /* Unknown instruction. */
  762. default:
  763. goto err;
  764. }
  765. insn++;
  766. if (new_prog)
  767. memcpy(new_insn, tmp_insns,
  768. sizeof(*insn) * (insn - tmp_insns));
  769. new_insn += insn - tmp_insns;
  770. }
  771. if (!new_prog) {
  772. /* Only calculating new length. */
  773. *new_len = new_insn - first_insn;
  774. if (*seen_ld_abs)
  775. *new_len += 4; /* Prologue bits. */
  776. return 0;
  777. }
  778. pass++;
  779. if (new_flen != new_insn - first_insn) {
  780. new_flen = new_insn - first_insn;
  781. if (pass > 2)
  782. goto err;
  783. goto do_pass;
  784. }
  785. kfree(addrs);
  786. BUG_ON(*new_len != new_flen);
  787. return 0;
  788. err:
  789. kfree(addrs);
  790. return -EINVAL;
  791. }
  792. /* Security:
  793. *
  794. * As we dont want to clear mem[] array for each packet going through
  795. * __bpf_prog_run(), we check that filter loaded by user never try to read
  796. * a cell if not previously written, and we check all branches to be sure
  797. * a malicious user doesn't try to abuse us.
  798. */
  799. static int check_load_and_stores(const struct sock_filter *filter, int flen)
  800. {
  801. u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
  802. int pc, ret = 0;
  803. BUILD_BUG_ON(BPF_MEMWORDS > 16);
  804. masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
  805. if (!masks)
  806. return -ENOMEM;
  807. memset(masks, 0xff, flen * sizeof(*masks));
  808. for (pc = 0; pc < flen; pc++) {
  809. memvalid &= masks[pc];
  810. switch (filter[pc].code) {
  811. case BPF_ST:
  812. case BPF_STX:
  813. memvalid |= (1 << filter[pc].k);
  814. break;
  815. case BPF_LD | BPF_MEM:
  816. case BPF_LDX | BPF_MEM:
  817. if (!(memvalid & (1 << filter[pc].k))) {
  818. ret = -EINVAL;
  819. goto error;
  820. }
  821. break;
  822. case BPF_JMP | BPF_JA:
  823. /* A jump must set masks on target */
  824. masks[pc + 1 + filter[pc].k] &= memvalid;
  825. memvalid = ~0;
  826. break;
  827. case BPF_JMP | BPF_JEQ | BPF_K:
  828. case BPF_JMP | BPF_JEQ | BPF_X:
  829. case BPF_JMP | BPF_JGE | BPF_K:
  830. case BPF_JMP | BPF_JGE | BPF_X:
  831. case BPF_JMP | BPF_JGT | BPF_K:
  832. case BPF_JMP | BPF_JGT | BPF_X:
  833. case BPF_JMP | BPF_JSET | BPF_K:
  834. case BPF_JMP | BPF_JSET | BPF_X:
  835. /* A jump must set masks on targets */
  836. masks[pc + 1 + filter[pc].jt] &= memvalid;
  837. masks[pc + 1 + filter[pc].jf] &= memvalid;
  838. memvalid = ~0;
  839. break;
  840. }
  841. }
  842. error:
  843. kfree(masks);
  844. return ret;
  845. }
  846. static bool chk_code_allowed(u16 code_to_probe)
  847. {
  848. static const bool codes[] = {
  849. /* 32 bit ALU operations */
  850. [BPF_ALU | BPF_ADD | BPF_K] = true,
  851. [BPF_ALU | BPF_ADD | BPF_X] = true,
  852. [BPF_ALU | BPF_SUB | BPF_K] = true,
  853. [BPF_ALU | BPF_SUB | BPF_X] = true,
  854. [BPF_ALU | BPF_MUL | BPF_K] = true,
  855. [BPF_ALU | BPF_MUL | BPF_X] = true,
  856. [BPF_ALU | BPF_DIV | BPF_K] = true,
  857. [BPF_ALU | BPF_DIV | BPF_X] = true,
  858. [BPF_ALU | BPF_MOD | BPF_K] = true,
  859. [BPF_ALU | BPF_MOD | BPF_X] = true,
  860. [BPF_ALU | BPF_AND | BPF_K] = true,
  861. [BPF_ALU | BPF_AND | BPF_X] = true,
  862. [BPF_ALU | BPF_OR | BPF_K] = true,
  863. [BPF_ALU | BPF_OR | BPF_X] = true,
  864. [BPF_ALU | BPF_XOR | BPF_K] = true,
  865. [BPF_ALU | BPF_XOR | BPF_X] = true,
  866. [BPF_ALU | BPF_LSH | BPF_K] = true,
  867. [BPF_ALU | BPF_LSH | BPF_X] = true,
  868. [BPF_ALU | BPF_RSH | BPF_K] = true,
  869. [BPF_ALU | BPF_RSH | BPF_X] = true,
  870. [BPF_ALU | BPF_NEG] = true,
  871. /* Load instructions */
  872. [BPF_LD | BPF_W | BPF_ABS] = true,
  873. [BPF_LD | BPF_H | BPF_ABS] = true,
  874. [BPF_LD | BPF_B | BPF_ABS] = true,
  875. [BPF_LD | BPF_W | BPF_LEN] = true,
  876. [BPF_LD | BPF_W | BPF_IND] = true,
  877. [BPF_LD | BPF_H | BPF_IND] = true,
  878. [BPF_LD | BPF_B | BPF_IND] = true,
  879. [BPF_LD | BPF_IMM] = true,
  880. [BPF_LD | BPF_MEM] = true,
  881. [BPF_LDX | BPF_W | BPF_LEN] = true,
  882. [BPF_LDX | BPF_B | BPF_MSH] = true,
  883. [BPF_LDX | BPF_IMM] = true,
  884. [BPF_LDX | BPF_MEM] = true,
  885. /* Store instructions */
  886. [BPF_ST] = true,
  887. [BPF_STX] = true,
  888. /* Misc instructions */
  889. [BPF_MISC | BPF_TAX] = true,
  890. [BPF_MISC | BPF_TXA] = true,
  891. /* Return instructions */
  892. [BPF_RET | BPF_K] = true,
  893. [BPF_RET | BPF_A] = true,
  894. /* Jump instructions */
  895. [BPF_JMP | BPF_JA] = true,
  896. [BPF_JMP | BPF_JEQ | BPF_K] = true,
  897. [BPF_JMP | BPF_JEQ | BPF_X] = true,
  898. [BPF_JMP | BPF_JGE | BPF_K] = true,
  899. [BPF_JMP | BPF_JGE | BPF_X] = true,
  900. [BPF_JMP | BPF_JGT | BPF_K] = true,
  901. [BPF_JMP | BPF_JGT | BPF_X] = true,
  902. [BPF_JMP | BPF_JSET | BPF_K] = true,
  903. [BPF_JMP | BPF_JSET | BPF_X] = true,
  904. };
  905. if (code_to_probe >= ARRAY_SIZE(codes))
  906. return false;
  907. return codes[code_to_probe];
  908. }
  909. static bool bpf_check_basics_ok(const struct sock_filter *filter,
  910. unsigned int flen)
  911. {
  912. if (filter == NULL)
  913. return false;
  914. if (flen == 0 || flen > BPF_MAXINSNS)
  915. return false;
  916. return true;
  917. }
  918. /**
  919. * bpf_check_classic - verify socket filter code
  920. * @filter: filter to verify
  921. * @flen: length of filter
  922. *
  923. * Check the user's filter code. If we let some ugly
  924. * filter code slip through kaboom! The filter must contain
  925. * no references or jumps that are out of range, no illegal
  926. * instructions, and must end with a RET instruction.
  927. *
  928. * All jumps are forward as they are not signed.
  929. *
  930. * Returns 0 if the rule set is legal or -EINVAL if not.
  931. */
  932. static int bpf_check_classic(const struct sock_filter *filter,
  933. unsigned int flen)
  934. {
  935. bool anc_found;
  936. int pc;
  937. /* Check the filter code now */
  938. for (pc = 0; pc < flen; pc++) {
  939. const struct sock_filter *ftest = &filter[pc];
  940. /* May we actually operate on this code? */
  941. if (!chk_code_allowed(ftest->code))
  942. return -EINVAL;
  943. /* Some instructions need special checks */
  944. switch (ftest->code) {
  945. case BPF_ALU | BPF_DIV | BPF_K:
  946. case BPF_ALU | BPF_MOD | BPF_K:
  947. /* Check for division by zero */
  948. if (ftest->k == 0)
  949. return -EINVAL;
  950. break;
  951. case BPF_ALU | BPF_LSH | BPF_K:
  952. case BPF_ALU | BPF_RSH | BPF_K:
  953. if (ftest->k >= 32)
  954. return -EINVAL;
  955. break;
  956. case BPF_LD | BPF_MEM:
  957. case BPF_LDX | BPF_MEM:
  958. case BPF_ST:
  959. case BPF_STX:
  960. /* Check for invalid memory addresses */
  961. if (ftest->k >= BPF_MEMWORDS)
  962. return -EINVAL;
  963. break;
  964. case BPF_JMP | BPF_JA:
  965. /* Note, the large ftest->k might cause loops.
  966. * Compare this with conditional jumps below,
  967. * where offsets are limited. --ANK (981016)
  968. */
  969. if (ftest->k >= (unsigned int)(flen - pc - 1))
  970. return -EINVAL;
  971. break;
  972. case BPF_JMP | BPF_JEQ | BPF_K:
  973. case BPF_JMP | BPF_JEQ | BPF_X:
  974. case BPF_JMP | BPF_JGE | BPF_K:
  975. case BPF_JMP | BPF_JGE | BPF_X:
  976. case BPF_JMP | BPF_JGT | BPF_K:
  977. case BPF_JMP | BPF_JGT | BPF_X:
  978. case BPF_JMP | BPF_JSET | BPF_K:
  979. case BPF_JMP | BPF_JSET | BPF_X:
  980. /* Both conditionals must be safe */
  981. if (pc + ftest->jt + 1 >= flen ||
  982. pc + ftest->jf + 1 >= flen)
  983. return -EINVAL;
  984. break;
  985. case BPF_LD | BPF_W | BPF_ABS:
  986. case BPF_LD | BPF_H | BPF_ABS:
  987. case BPF_LD | BPF_B | BPF_ABS:
  988. anc_found = false;
  989. if (bpf_anc_helper(ftest) & BPF_ANC)
  990. anc_found = true;
  991. /* Ancillary operation unknown or unsupported */
  992. if (anc_found == false && ftest->k >= SKF_AD_OFF)
  993. return -EINVAL;
  994. }
  995. }
  996. /* Last instruction must be a RET code */
  997. switch (filter[flen - 1].code) {
  998. case BPF_RET | BPF_K:
  999. case BPF_RET | BPF_A:
  1000. return check_load_and_stores(filter, flen);
  1001. }
  1002. return -EINVAL;
  1003. }
  1004. static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
  1005. const struct sock_fprog *fprog)
  1006. {
  1007. unsigned int fsize = bpf_classic_proglen(fprog);
  1008. struct sock_fprog_kern *fkprog;
  1009. fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
  1010. if (!fp->orig_prog)
  1011. return -ENOMEM;
  1012. fkprog = fp->orig_prog;
  1013. fkprog->len = fprog->len;
  1014. fkprog->filter = kmemdup(fp->insns, fsize,
  1015. GFP_KERNEL | __GFP_NOWARN);
  1016. if (!fkprog->filter) {
  1017. kfree(fp->orig_prog);
  1018. return -ENOMEM;
  1019. }
  1020. return 0;
  1021. }
  1022. static void bpf_release_orig_filter(struct bpf_prog *fp)
  1023. {
  1024. struct sock_fprog_kern *fprog = fp->orig_prog;
  1025. if (fprog) {
  1026. kfree(fprog->filter);
  1027. kfree(fprog);
  1028. }
  1029. }
  1030. static void __bpf_prog_release(struct bpf_prog *prog)
  1031. {
  1032. if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
  1033. bpf_prog_put(prog);
  1034. } else {
  1035. bpf_release_orig_filter(prog);
  1036. bpf_prog_free(prog);
  1037. }
  1038. }
  1039. static void __sk_filter_release(struct sk_filter *fp)
  1040. {
  1041. __bpf_prog_release(fp->prog);
  1042. kfree(fp);
  1043. }
  1044. /**
  1045. * sk_filter_release_rcu - Release a socket filter by rcu_head
  1046. * @rcu: rcu_head that contains the sk_filter to free
  1047. */
  1048. static void sk_filter_release_rcu(struct rcu_head *rcu)
  1049. {
  1050. struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
  1051. __sk_filter_release(fp);
  1052. }
  1053. /**
  1054. * sk_filter_release - release a socket filter
  1055. * @fp: filter to remove
  1056. *
  1057. * Remove a filter from a socket and release its resources.
  1058. */
  1059. static void sk_filter_release(struct sk_filter *fp)
  1060. {
  1061. if (refcount_dec_and_test(&fp->refcnt))
  1062. call_rcu(&fp->rcu, sk_filter_release_rcu);
  1063. }
  1064. void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
  1065. {
  1066. u32 filter_size = bpf_prog_size(fp->prog->len);
  1067. atomic_sub(filter_size, &sk->sk_omem_alloc);
  1068. sk_filter_release(fp);
  1069. }
  1070. /* try to charge the socket memory if there is space available
  1071. * return true on success
  1072. */
  1073. static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
  1074. {
  1075. u32 filter_size = bpf_prog_size(fp->prog->len);
  1076. /* same check as in sock_kmalloc() */
  1077. if (filter_size <= sysctl_optmem_max &&
  1078. atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
  1079. atomic_add(filter_size, &sk->sk_omem_alloc);
  1080. return true;
  1081. }
  1082. return false;
  1083. }
  1084. bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
  1085. {
  1086. if (!refcount_inc_not_zero(&fp->refcnt))
  1087. return false;
  1088. if (!__sk_filter_charge(sk, fp)) {
  1089. sk_filter_release(fp);
  1090. return false;
  1091. }
  1092. return true;
  1093. }
  1094. static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
  1095. {
  1096. struct sock_filter *old_prog;
  1097. struct bpf_prog *old_fp;
  1098. int err, new_len, old_len = fp->len;
  1099. bool seen_ld_abs = false;
  1100. /* We are free to overwrite insns et al right here as it
  1101. * won't be used at this point in time anymore internally
  1102. * after the migration to the internal BPF instruction
  1103. * representation.
  1104. */
  1105. BUILD_BUG_ON(sizeof(struct sock_filter) !=
  1106. sizeof(struct bpf_insn));
  1107. /* Conversion cannot happen on overlapping memory areas,
  1108. * so we need to keep the user BPF around until the 2nd
  1109. * pass. At this time, the user BPF is stored in fp->insns.
  1110. */
  1111. old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
  1112. GFP_KERNEL | __GFP_NOWARN);
  1113. if (!old_prog) {
  1114. err = -ENOMEM;
  1115. goto out_err;
  1116. }
  1117. /* 1st pass: calculate the new program length. */
  1118. err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
  1119. &seen_ld_abs);
  1120. if (err)
  1121. goto out_err_free;
  1122. /* Expand fp for appending the new filter representation. */
  1123. old_fp = fp;
  1124. fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
  1125. if (!fp) {
  1126. /* The old_fp is still around in case we couldn't
  1127. * allocate new memory, so uncharge on that one.
  1128. */
  1129. fp = old_fp;
  1130. err = -ENOMEM;
  1131. goto out_err_free;
  1132. }
  1133. fp->len = new_len;
  1134. /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
  1135. err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
  1136. &seen_ld_abs);
  1137. if (err)
  1138. /* 2nd bpf_convert_filter() can fail only if it fails
  1139. * to allocate memory, remapping must succeed. Note,
  1140. * that at this time old_fp has already been released
  1141. * by krealloc().
  1142. */
  1143. goto out_err_free;
  1144. fp = bpf_prog_select_runtime(fp, &err);
  1145. if (err)
  1146. goto out_err_free;
  1147. kfree(old_prog);
  1148. return fp;
  1149. out_err_free:
  1150. kfree(old_prog);
  1151. out_err:
  1152. __bpf_prog_release(fp);
  1153. return ERR_PTR(err);
  1154. }
  1155. static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
  1156. bpf_aux_classic_check_t trans)
  1157. {
  1158. int err;
  1159. fp->bpf_func = NULL;
  1160. fp->jited = 0;
  1161. err = bpf_check_classic(fp->insns, fp->len);
  1162. if (err) {
  1163. __bpf_prog_release(fp);
  1164. return ERR_PTR(err);
  1165. }
  1166. /* There might be additional checks and transformations
  1167. * needed on classic filters, f.e. in case of seccomp.
  1168. */
  1169. if (trans) {
  1170. err = trans(fp->insns, fp->len);
  1171. if (err) {
  1172. __bpf_prog_release(fp);
  1173. return ERR_PTR(err);
  1174. }
  1175. }
  1176. /* Probe if we can JIT compile the filter and if so, do
  1177. * the compilation of the filter.
  1178. */
  1179. bpf_jit_compile(fp);
  1180. /* JIT compiler couldn't process this filter, so do the
  1181. * internal BPF translation for the optimized interpreter.
  1182. */
  1183. if (!fp->jited)
  1184. fp = bpf_migrate_filter(fp);
  1185. return fp;
  1186. }
  1187. /**
  1188. * bpf_prog_create - create an unattached filter
  1189. * @pfp: the unattached filter that is created
  1190. * @fprog: the filter program
  1191. *
  1192. * Create a filter independent of any socket. We first run some
  1193. * sanity checks on it to make sure it does not explode on us later.
  1194. * If an error occurs or there is insufficient memory for the filter
  1195. * a negative errno code is returned. On success the return is zero.
  1196. */
  1197. int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
  1198. {
  1199. unsigned int fsize = bpf_classic_proglen(fprog);
  1200. struct bpf_prog *fp;
  1201. /* Make sure new filter is there and in the right amounts. */
  1202. if (!bpf_check_basics_ok(fprog->filter, fprog->len))
  1203. return -EINVAL;
  1204. fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
  1205. if (!fp)
  1206. return -ENOMEM;
  1207. memcpy(fp->insns, fprog->filter, fsize);
  1208. fp->len = fprog->len;
  1209. /* Since unattached filters are not copied back to user
  1210. * space through sk_get_filter(), we do not need to hold
  1211. * a copy here, and can spare us the work.
  1212. */
  1213. fp->orig_prog = NULL;
  1214. /* bpf_prepare_filter() already takes care of freeing
  1215. * memory in case something goes wrong.
  1216. */
  1217. fp = bpf_prepare_filter(fp, NULL);
  1218. if (IS_ERR(fp))
  1219. return PTR_ERR(fp);
  1220. *pfp = fp;
  1221. return 0;
  1222. }
  1223. EXPORT_SYMBOL_GPL(bpf_prog_create);
  1224. /**
  1225. * bpf_prog_create_from_user - create an unattached filter from user buffer
  1226. * @pfp: the unattached filter that is created
  1227. * @fprog: the filter program
  1228. * @trans: post-classic verifier transformation handler
  1229. * @save_orig: save classic BPF program
  1230. *
  1231. * This function effectively does the same as bpf_prog_create(), only
  1232. * that it builds up its insns buffer from user space provided buffer.
  1233. * It also allows for passing a bpf_aux_classic_check_t handler.
  1234. */
  1235. int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
  1236. bpf_aux_classic_check_t trans, bool save_orig)
  1237. {
  1238. unsigned int fsize = bpf_classic_proglen(fprog);
  1239. struct bpf_prog *fp;
  1240. int err;
  1241. /* Make sure new filter is there and in the right amounts. */
  1242. if (!bpf_check_basics_ok(fprog->filter, fprog->len))
  1243. return -EINVAL;
  1244. fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
  1245. if (!fp)
  1246. return -ENOMEM;
  1247. if (copy_from_user(fp->insns, fprog->filter, fsize)) {
  1248. __bpf_prog_free(fp);
  1249. return -EFAULT;
  1250. }
  1251. fp->len = fprog->len;
  1252. fp->orig_prog = NULL;
  1253. if (save_orig) {
  1254. err = bpf_prog_store_orig_filter(fp, fprog);
  1255. if (err) {
  1256. __bpf_prog_free(fp);
  1257. return -ENOMEM;
  1258. }
  1259. }
  1260. /* bpf_prepare_filter() already takes care of freeing
  1261. * memory in case something goes wrong.
  1262. */
  1263. fp = bpf_prepare_filter(fp, trans);
  1264. if (IS_ERR(fp))
  1265. return PTR_ERR(fp);
  1266. *pfp = fp;
  1267. return 0;
  1268. }
  1269. EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
  1270. void bpf_prog_destroy(struct bpf_prog *fp)
  1271. {
  1272. __bpf_prog_release(fp);
  1273. }
  1274. EXPORT_SYMBOL_GPL(bpf_prog_destroy);
  1275. static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
  1276. {
  1277. struct sk_filter *fp, *old_fp;
  1278. fp = kmalloc(sizeof(*fp), GFP_KERNEL);
  1279. if (!fp)
  1280. return -ENOMEM;
  1281. fp->prog = prog;
  1282. if (!__sk_filter_charge(sk, fp)) {
  1283. kfree(fp);
  1284. return -ENOMEM;
  1285. }
  1286. refcount_set(&fp->refcnt, 1);
  1287. old_fp = rcu_dereference_protected(sk->sk_filter,
  1288. lockdep_sock_is_held(sk));
  1289. rcu_assign_pointer(sk->sk_filter, fp);
  1290. if (old_fp)
  1291. sk_filter_uncharge(sk, old_fp);
  1292. return 0;
  1293. }
  1294. static
  1295. struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
  1296. {
  1297. unsigned int fsize = bpf_classic_proglen(fprog);
  1298. struct bpf_prog *prog;
  1299. int err;
  1300. if (sock_flag(sk, SOCK_FILTER_LOCKED))
  1301. return ERR_PTR(-EPERM);
  1302. /* Make sure new filter is there and in the right amounts. */
  1303. if (!bpf_check_basics_ok(fprog->filter, fprog->len))
  1304. return ERR_PTR(-EINVAL);
  1305. prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
  1306. if (!prog)
  1307. return ERR_PTR(-ENOMEM);
  1308. if (copy_from_user(prog->insns, fprog->filter, fsize)) {
  1309. __bpf_prog_free(prog);
  1310. return ERR_PTR(-EFAULT);
  1311. }
  1312. prog->len = fprog->len;
  1313. err = bpf_prog_store_orig_filter(prog, fprog);
  1314. if (err) {
  1315. __bpf_prog_free(prog);
  1316. return ERR_PTR(-ENOMEM);
  1317. }
  1318. /* bpf_prepare_filter() already takes care of freeing
  1319. * memory in case something goes wrong.
  1320. */
  1321. return bpf_prepare_filter(prog, NULL);
  1322. }
  1323. /**
  1324. * sk_attach_filter - attach a socket filter
  1325. * @fprog: the filter program
  1326. * @sk: the socket to use
  1327. *
  1328. * Attach the user's filter code. We first run some sanity checks on
  1329. * it to make sure it does not explode on us later. If an error
  1330. * occurs or there is insufficient memory for the filter a negative
  1331. * errno code is returned. On success the return is zero.
  1332. */
  1333. int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
  1334. {
  1335. struct bpf_prog *prog = __get_filter(fprog, sk);
  1336. int err;
  1337. if (IS_ERR(prog))
  1338. return PTR_ERR(prog);
  1339. err = __sk_attach_prog(prog, sk);
  1340. if (err < 0) {
  1341. __bpf_prog_release(prog);
  1342. return err;
  1343. }
  1344. return 0;
  1345. }
  1346. EXPORT_SYMBOL_GPL(sk_attach_filter);
  1347. int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
  1348. {
  1349. struct bpf_prog *prog = __get_filter(fprog, sk);
  1350. int err;
  1351. if (IS_ERR(prog))
  1352. return PTR_ERR(prog);
  1353. if (bpf_prog_size(prog->len) > sysctl_optmem_max)
  1354. err = -ENOMEM;
  1355. else
  1356. err = reuseport_attach_prog(sk, prog);
  1357. if (err)
  1358. __bpf_prog_release(prog);
  1359. return err;
  1360. }
  1361. static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
  1362. {
  1363. if (sock_flag(sk, SOCK_FILTER_LOCKED))
  1364. return ERR_PTR(-EPERM);
  1365. return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
  1366. }
  1367. int sk_attach_bpf(u32 ufd, struct sock *sk)
  1368. {
  1369. struct bpf_prog *prog = __get_bpf(ufd, sk);
  1370. int err;
  1371. if (IS_ERR(prog))
  1372. return PTR_ERR(prog);
  1373. err = __sk_attach_prog(prog, sk);
  1374. if (err < 0) {
  1375. bpf_prog_put(prog);
  1376. return err;
  1377. }
  1378. return 0;
  1379. }
  1380. int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
  1381. {
  1382. struct bpf_prog *prog;
  1383. int err;
  1384. if (sock_flag(sk, SOCK_FILTER_LOCKED))
  1385. return -EPERM;
  1386. prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
  1387. if (PTR_ERR(prog) == -EINVAL)
  1388. prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
  1389. if (IS_ERR(prog))
  1390. return PTR_ERR(prog);
  1391. if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
  1392. /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
  1393. * bpf prog (e.g. sockmap). It depends on the
  1394. * limitation imposed by bpf_prog_load().
  1395. * Hence, sysctl_optmem_max is not checked.
  1396. */
  1397. if ((sk->sk_type != SOCK_STREAM &&
  1398. sk->sk_type != SOCK_DGRAM) ||
  1399. (sk->sk_protocol != IPPROTO_UDP &&
  1400. sk->sk_protocol != IPPROTO_TCP) ||
  1401. (sk->sk_family != AF_INET &&
  1402. sk->sk_family != AF_INET6)) {
  1403. err = -ENOTSUPP;
  1404. goto err_prog_put;
  1405. }
  1406. } else {
  1407. /* BPF_PROG_TYPE_SOCKET_FILTER */
  1408. if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
  1409. err = -ENOMEM;
  1410. goto err_prog_put;
  1411. }
  1412. }
  1413. err = reuseport_attach_prog(sk, prog);
  1414. err_prog_put:
  1415. if (err)
  1416. bpf_prog_put(prog);
  1417. return err;
  1418. }
  1419. void sk_reuseport_prog_free(struct bpf_prog *prog)
  1420. {
  1421. if (!prog)
  1422. return;
  1423. if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
  1424. bpf_prog_put(prog);
  1425. else
  1426. bpf_prog_destroy(prog);
  1427. }
  1428. struct bpf_scratchpad {
  1429. union {
  1430. __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
  1431. u8 buff[MAX_BPF_STACK];
  1432. };
  1433. };
  1434. static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
  1435. static inline int __bpf_try_make_writable(struct sk_buff *skb,
  1436. unsigned int write_len)
  1437. {
  1438. return skb_ensure_writable(skb, write_len);
  1439. }
  1440. static inline int bpf_try_make_writable(struct sk_buff *skb,
  1441. unsigned int write_len)
  1442. {
  1443. int err = __bpf_try_make_writable(skb, write_len);
  1444. bpf_compute_data_pointers(skb);
  1445. return err;
  1446. }
  1447. static int bpf_try_make_head_writable(struct sk_buff *skb)
  1448. {
  1449. return bpf_try_make_writable(skb, skb_headlen(skb));
  1450. }
  1451. static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
  1452. {
  1453. if (skb_at_tc_ingress(skb))
  1454. skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
  1455. }
  1456. static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
  1457. {
  1458. if (skb_at_tc_ingress(skb))
  1459. skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
  1460. }
  1461. BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
  1462. const void *, from, u32, len, u64, flags)
  1463. {
  1464. void *ptr;
  1465. if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
  1466. return -EINVAL;
  1467. if (unlikely(offset > 0xffff))
  1468. return -EFAULT;
  1469. if (unlikely(bpf_try_make_writable(skb, offset + len)))
  1470. return -EFAULT;
  1471. ptr = skb->data + offset;
  1472. if (flags & BPF_F_RECOMPUTE_CSUM)
  1473. __skb_postpull_rcsum(skb, ptr, len, offset);
  1474. memcpy(ptr, from, len);
  1475. if (flags & BPF_F_RECOMPUTE_CSUM)
  1476. __skb_postpush_rcsum(skb, ptr, len, offset);
  1477. if (flags & BPF_F_INVALIDATE_HASH)
  1478. skb_clear_hash(skb);
  1479. return 0;
  1480. }
  1481. static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
  1482. .func = bpf_skb_store_bytes,
  1483. .gpl_only = false,
  1484. .ret_type = RET_INTEGER,
  1485. .arg1_type = ARG_PTR_TO_CTX,
  1486. .arg2_type = ARG_ANYTHING,
  1487. .arg3_type = ARG_PTR_TO_MEM,
  1488. .arg4_type = ARG_CONST_SIZE,
  1489. .arg5_type = ARG_ANYTHING,
  1490. };
  1491. BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
  1492. void *, to, u32, len)
  1493. {
  1494. void *ptr;
  1495. if (unlikely(offset > 0xffff))
  1496. goto err_clear;
  1497. ptr = skb_header_pointer(skb, offset, len, to);
  1498. if (unlikely(!ptr))
  1499. goto err_clear;
  1500. if (ptr != to)
  1501. memcpy(to, ptr, len);
  1502. return 0;
  1503. err_clear:
  1504. memset(to, 0, len);
  1505. return -EFAULT;
  1506. }
  1507. static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
  1508. .func = bpf_skb_load_bytes,
  1509. .gpl_only = false,
  1510. .ret_type = RET_INTEGER,
  1511. .arg1_type = ARG_PTR_TO_CTX,
  1512. .arg2_type = ARG_ANYTHING,
  1513. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  1514. .arg4_type = ARG_CONST_SIZE,
  1515. };
  1516. BPF_CALL_4(bpf_flow_dissector_load_bytes,
  1517. const struct bpf_flow_dissector *, ctx, u32, offset,
  1518. void *, to, u32, len)
  1519. {
  1520. void *ptr;
  1521. if (unlikely(offset > 0xffff))
  1522. goto err_clear;
  1523. if (unlikely(!ctx->skb))
  1524. goto err_clear;
  1525. ptr = skb_header_pointer(ctx->skb, offset, len, to);
  1526. if (unlikely(!ptr))
  1527. goto err_clear;
  1528. if (ptr != to)
  1529. memcpy(to, ptr, len);
  1530. return 0;
  1531. err_clear:
  1532. memset(to, 0, len);
  1533. return -EFAULT;
  1534. }
  1535. static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
  1536. .func = bpf_flow_dissector_load_bytes,
  1537. .gpl_only = false,
  1538. .ret_type = RET_INTEGER,
  1539. .arg1_type = ARG_PTR_TO_CTX,
  1540. .arg2_type = ARG_ANYTHING,
  1541. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  1542. .arg4_type = ARG_CONST_SIZE,
  1543. };
  1544. BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
  1545. u32, offset, void *, to, u32, len, u32, start_header)
  1546. {
  1547. u8 *end = skb_tail_pointer(skb);
  1548. u8 *start, *ptr;
  1549. if (unlikely(offset > 0xffff))
  1550. goto err_clear;
  1551. switch (start_header) {
  1552. case BPF_HDR_START_MAC:
  1553. if (unlikely(!skb_mac_header_was_set(skb)))
  1554. goto err_clear;
  1555. start = skb_mac_header(skb);
  1556. break;
  1557. case BPF_HDR_START_NET:
  1558. start = skb_network_header(skb);
  1559. break;
  1560. default:
  1561. goto err_clear;
  1562. }
  1563. ptr = start + offset;
  1564. if (likely(ptr + len <= end)) {
  1565. memcpy(to, ptr, len);
  1566. return 0;
  1567. }
  1568. err_clear:
  1569. memset(to, 0, len);
  1570. return -EFAULT;
  1571. }
  1572. static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
  1573. .func = bpf_skb_load_bytes_relative,
  1574. .gpl_only = false,
  1575. .ret_type = RET_INTEGER,
  1576. .arg1_type = ARG_PTR_TO_CTX,
  1577. .arg2_type = ARG_ANYTHING,
  1578. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  1579. .arg4_type = ARG_CONST_SIZE,
  1580. .arg5_type = ARG_ANYTHING,
  1581. };
  1582. BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
  1583. {
  1584. /* Idea is the following: should the needed direct read/write
  1585. * test fail during runtime, we can pull in more data and redo
  1586. * again, since implicitly, we invalidate previous checks here.
  1587. *
  1588. * Or, since we know how much we need to make read/writeable,
  1589. * this can be done once at the program beginning for direct
  1590. * access case. By this we overcome limitations of only current
  1591. * headroom being accessible.
  1592. */
  1593. return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
  1594. }
  1595. static const struct bpf_func_proto bpf_skb_pull_data_proto = {
  1596. .func = bpf_skb_pull_data,
  1597. .gpl_only = false,
  1598. .ret_type = RET_INTEGER,
  1599. .arg1_type = ARG_PTR_TO_CTX,
  1600. .arg2_type = ARG_ANYTHING,
  1601. };
  1602. BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
  1603. {
  1604. return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
  1605. }
  1606. static const struct bpf_func_proto bpf_sk_fullsock_proto = {
  1607. .func = bpf_sk_fullsock,
  1608. .gpl_only = false,
  1609. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  1610. .arg1_type = ARG_PTR_TO_SOCK_COMMON,
  1611. };
  1612. static inline int sk_skb_try_make_writable(struct sk_buff *skb,
  1613. unsigned int write_len)
  1614. {
  1615. int err = __bpf_try_make_writable(skb, write_len);
  1616. bpf_compute_data_end_sk_skb(skb);
  1617. return err;
  1618. }
  1619. BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
  1620. {
  1621. /* Idea is the following: should the needed direct read/write
  1622. * test fail during runtime, we can pull in more data and redo
  1623. * again, since implicitly, we invalidate previous checks here.
  1624. *
  1625. * Or, since we know how much we need to make read/writeable,
  1626. * this can be done once at the program beginning for direct
  1627. * access case. By this we overcome limitations of only current
  1628. * headroom being accessible.
  1629. */
  1630. return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
  1631. }
  1632. static const struct bpf_func_proto sk_skb_pull_data_proto = {
  1633. .func = sk_skb_pull_data,
  1634. .gpl_only = false,
  1635. .ret_type = RET_INTEGER,
  1636. .arg1_type = ARG_PTR_TO_CTX,
  1637. .arg2_type = ARG_ANYTHING,
  1638. };
  1639. BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
  1640. u64, from, u64, to, u64, flags)
  1641. {
  1642. __sum16 *ptr;
  1643. if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
  1644. return -EINVAL;
  1645. if (unlikely(offset > 0xffff || offset & 1))
  1646. return -EFAULT;
  1647. if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
  1648. return -EFAULT;
  1649. ptr = (__sum16 *)(skb->data + offset);
  1650. switch (flags & BPF_F_HDR_FIELD_MASK) {
  1651. case 0:
  1652. if (unlikely(from != 0))
  1653. return -EINVAL;
  1654. csum_replace_by_diff(ptr, to);
  1655. break;
  1656. case 2:
  1657. csum_replace2(ptr, from, to);
  1658. break;
  1659. case 4:
  1660. csum_replace4(ptr, from, to);
  1661. break;
  1662. default:
  1663. return -EINVAL;
  1664. }
  1665. return 0;
  1666. }
  1667. static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
  1668. .func = bpf_l3_csum_replace,
  1669. .gpl_only = false,
  1670. .ret_type = RET_INTEGER,
  1671. .arg1_type = ARG_PTR_TO_CTX,
  1672. .arg2_type = ARG_ANYTHING,
  1673. .arg3_type = ARG_ANYTHING,
  1674. .arg4_type = ARG_ANYTHING,
  1675. .arg5_type = ARG_ANYTHING,
  1676. };
  1677. BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
  1678. u64, from, u64, to, u64, flags)
  1679. {
  1680. bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
  1681. bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
  1682. bool do_mforce = flags & BPF_F_MARK_ENFORCE;
  1683. __sum16 *ptr;
  1684. if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
  1685. BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
  1686. return -EINVAL;
  1687. if (unlikely(offset > 0xffff || offset & 1))
  1688. return -EFAULT;
  1689. if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
  1690. return -EFAULT;
  1691. ptr = (__sum16 *)(skb->data + offset);
  1692. if (is_mmzero && !do_mforce && !*ptr)
  1693. return 0;
  1694. switch (flags & BPF_F_HDR_FIELD_MASK) {
  1695. case 0:
  1696. if (unlikely(from != 0))
  1697. return -EINVAL;
  1698. inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
  1699. break;
  1700. case 2:
  1701. inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
  1702. break;
  1703. case 4:
  1704. inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
  1705. break;
  1706. default:
  1707. return -EINVAL;
  1708. }
  1709. if (is_mmzero && !*ptr)
  1710. *ptr = CSUM_MANGLED_0;
  1711. return 0;
  1712. }
  1713. static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
  1714. .func = bpf_l4_csum_replace,
  1715. .gpl_only = false,
  1716. .ret_type = RET_INTEGER,
  1717. .arg1_type = ARG_PTR_TO_CTX,
  1718. .arg2_type = ARG_ANYTHING,
  1719. .arg3_type = ARG_ANYTHING,
  1720. .arg4_type = ARG_ANYTHING,
  1721. .arg5_type = ARG_ANYTHING,
  1722. };
  1723. BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
  1724. __be32 *, to, u32, to_size, __wsum, seed)
  1725. {
  1726. struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
  1727. u32 diff_size = from_size + to_size;
  1728. int i, j = 0;
  1729. /* This is quite flexible, some examples:
  1730. *
  1731. * from_size == 0, to_size > 0, seed := csum --> pushing data
  1732. * from_size > 0, to_size == 0, seed := csum --> pulling data
  1733. * from_size > 0, to_size > 0, seed := 0 --> diffing data
  1734. *
  1735. * Even for diffing, from_size and to_size don't need to be equal.
  1736. */
  1737. if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
  1738. diff_size > sizeof(sp->diff)))
  1739. return -EINVAL;
  1740. for (i = 0; i < from_size / sizeof(__be32); i++, j++)
  1741. sp->diff[j] = ~from[i];
  1742. for (i = 0; i < to_size / sizeof(__be32); i++, j++)
  1743. sp->diff[j] = to[i];
  1744. return csum_partial(sp->diff, diff_size, seed);
  1745. }
  1746. static const struct bpf_func_proto bpf_csum_diff_proto = {
  1747. .func = bpf_csum_diff,
  1748. .gpl_only = false,
  1749. .pkt_access = true,
  1750. .ret_type = RET_INTEGER,
  1751. .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
  1752. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  1753. .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
  1754. .arg4_type = ARG_CONST_SIZE_OR_ZERO,
  1755. .arg5_type = ARG_ANYTHING,
  1756. };
  1757. BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
  1758. {
  1759. /* The interface is to be used in combination with bpf_csum_diff()
  1760. * for direct packet writes. csum rotation for alignment as well
  1761. * as emulating csum_sub() can be done from the eBPF program.
  1762. */
  1763. if (skb->ip_summed == CHECKSUM_COMPLETE)
  1764. return (skb->csum = csum_add(skb->csum, csum));
  1765. return -ENOTSUPP;
  1766. }
  1767. static const struct bpf_func_proto bpf_csum_update_proto = {
  1768. .func = bpf_csum_update,
  1769. .gpl_only = false,
  1770. .ret_type = RET_INTEGER,
  1771. .arg1_type = ARG_PTR_TO_CTX,
  1772. .arg2_type = ARG_ANYTHING,
  1773. };
  1774. BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level)
  1775. {
  1776. /* The interface is to be used in combination with bpf_skb_adjust_room()
  1777. * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET
  1778. * is passed as flags, for example.
  1779. */
  1780. switch (level) {
  1781. case BPF_CSUM_LEVEL_INC:
  1782. __skb_incr_checksum_unnecessary(skb);
  1783. break;
  1784. case BPF_CSUM_LEVEL_DEC:
  1785. __skb_decr_checksum_unnecessary(skb);
  1786. break;
  1787. case BPF_CSUM_LEVEL_RESET:
  1788. __skb_reset_checksum_unnecessary(skb);
  1789. break;
  1790. case BPF_CSUM_LEVEL_QUERY:
  1791. return skb->ip_summed == CHECKSUM_UNNECESSARY ?
  1792. skb->csum_level : -EACCES;
  1793. default:
  1794. return -EINVAL;
  1795. }
  1796. return 0;
  1797. }
  1798. static const struct bpf_func_proto bpf_csum_level_proto = {
  1799. .func = bpf_csum_level,
  1800. .gpl_only = false,
  1801. .ret_type = RET_INTEGER,
  1802. .arg1_type = ARG_PTR_TO_CTX,
  1803. .arg2_type = ARG_ANYTHING,
  1804. };
  1805. static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
  1806. {
  1807. return dev_forward_skb(dev, skb);
  1808. }
  1809. static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
  1810. struct sk_buff *skb)
  1811. {
  1812. int ret = ____dev_forward_skb(dev, skb);
  1813. if (likely(!ret)) {
  1814. skb->dev = dev;
  1815. ret = netif_rx(skb);
  1816. }
  1817. return ret;
  1818. }
  1819. static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
  1820. {
  1821. int ret;
  1822. if (dev_xmit_recursion()) {
  1823. net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
  1824. kfree_skb(skb);
  1825. return -ENETDOWN;
  1826. }
  1827. skb->dev = dev;
  1828. skb->tstamp = 0;
  1829. dev_xmit_recursion_inc();
  1830. ret = dev_queue_xmit(skb);
  1831. dev_xmit_recursion_dec();
  1832. return ret;
  1833. }
  1834. static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
  1835. u32 flags)
  1836. {
  1837. unsigned int mlen = skb_network_offset(skb);
  1838. if (mlen) {
  1839. __skb_pull(skb, mlen);
  1840. /* At ingress, the mac header has already been pulled once.
  1841. * At egress, skb_pospull_rcsum has to be done in case that
  1842. * the skb is originated from ingress (i.e. a forwarded skb)
  1843. * to ensure that rcsum starts at net header.
  1844. */
  1845. if (!skb_at_tc_ingress(skb))
  1846. skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
  1847. }
  1848. skb_pop_mac_header(skb);
  1849. skb_reset_mac_len(skb);
  1850. return flags & BPF_F_INGRESS ?
  1851. __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
  1852. }
  1853. static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
  1854. u32 flags)
  1855. {
  1856. /* Verify that a link layer header is carried */
  1857. if (unlikely(skb->mac_header >= skb->network_header)) {
  1858. kfree_skb(skb);
  1859. return -ERANGE;
  1860. }
  1861. bpf_push_mac_rcsum(skb);
  1862. return flags & BPF_F_INGRESS ?
  1863. __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
  1864. }
  1865. static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
  1866. u32 flags)
  1867. {
  1868. if (dev_is_mac_header_xmit(dev))
  1869. return __bpf_redirect_common(skb, dev, flags);
  1870. else
  1871. return __bpf_redirect_no_mac(skb, dev, flags);
  1872. }
  1873. #if IS_ENABLED(CONFIG_IPV6)
  1874. static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
  1875. struct net_device *dev, struct bpf_nh_params *nh)
  1876. {
  1877. u32 hh_len = LL_RESERVED_SPACE(dev);
  1878. const struct in6_addr *nexthop;
  1879. struct dst_entry *dst = NULL;
  1880. struct neighbour *neigh;
  1881. if (dev_xmit_recursion()) {
  1882. net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
  1883. goto out_drop;
  1884. }
  1885. skb->dev = dev;
  1886. skb->tstamp = 0;
  1887. if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
  1888. struct sk_buff *skb2;
  1889. skb2 = skb_realloc_headroom(skb, hh_len);
  1890. if (unlikely(!skb2)) {
  1891. kfree_skb(skb);
  1892. return -ENOMEM;
  1893. }
  1894. if (skb->sk)
  1895. skb_set_owner_w(skb2, skb->sk);
  1896. consume_skb(skb);
  1897. skb = skb2;
  1898. }
  1899. rcu_read_lock_bh();
  1900. if (!nh) {
  1901. dst = skb_dst(skb);
  1902. nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
  1903. &ipv6_hdr(skb)->daddr);
  1904. } else {
  1905. nexthop = &nh->ipv6_nh;
  1906. }
  1907. neigh = ip_neigh_gw6(dev, nexthop);
  1908. if (likely(!IS_ERR(neigh))) {
  1909. int ret;
  1910. sock_confirm_neigh(skb, neigh);
  1911. dev_xmit_recursion_inc();
  1912. ret = neigh_output(neigh, skb, false);
  1913. dev_xmit_recursion_dec();
  1914. rcu_read_unlock_bh();
  1915. return ret;
  1916. }
  1917. rcu_read_unlock_bh();
  1918. if (dst)
  1919. IP6_INC_STATS(dev_net(dst->dev),
  1920. ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  1921. out_drop:
  1922. kfree_skb(skb);
  1923. return -ENETDOWN;
  1924. }
  1925. static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
  1926. struct bpf_nh_params *nh)
  1927. {
  1928. const struct ipv6hdr *ip6h = ipv6_hdr(skb);
  1929. struct net *net = dev_net(dev);
  1930. int err, ret = NET_XMIT_DROP;
  1931. if (!nh) {
  1932. struct dst_entry *dst;
  1933. struct flowi6 fl6 = {
  1934. .flowi6_flags = FLOWI_FLAG_ANYSRC,
  1935. .flowi6_mark = skb->mark,
  1936. .flowlabel = ip6_flowinfo(ip6h),
  1937. .flowi6_oif = dev->ifindex,
  1938. .flowi6_proto = ip6h->nexthdr,
  1939. .daddr = ip6h->daddr,
  1940. .saddr = ip6h->saddr,
  1941. };
  1942. dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
  1943. if (IS_ERR(dst))
  1944. goto out_drop;
  1945. skb_dst_set(skb, dst);
  1946. } else if (nh->nh_family != AF_INET6) {
  1947. goto out_drop;
  1948. }
  1949. err = bpf_out_neigh_v6(net, skb, dev, nh);
  1950. if (unlikely(net_xmit_eval(err)))
  1951. dev->stats.tx_errors++;
  1952. else
  1953. ret = NET_XMIT_SUCCESS;
  1954. goto out_xmit;
  1955. out_drop:
  1956. dev->stats.tx_errors++;
  1957. kfree_skb(skb);
  1958. out_xmit:
  1959. return ret;
  1960. }
  1961. #else
  1962. static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
  1963. struct bpf_nh_params *nh)
  1964. {
  1965. kfree_skb(skb);
  1966. return NET_XMIT_DROP;
  1967. }
  1968. #endif /* CONFIG_IPV6 */
  1969. #if IS_ENABLED(CONFIG_INET)
  1970. static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
  1971. struct net_device *dev, struct bpf_nh_params *nh)
  1972. {
  1973. u32 hh_len = LL_RESERVED_SPACE(dev);
  1974. struct neighbour *neigh;
  1975. bool is_v6gw = false;
  1976. if (dev_xmit_recursion()) {
  1977. net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
  1978. goto out_drop;
  1979. }
  1980. skb->dev = dev;
  1981. skb->tstamp = 0;
  1982. if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
  1983. struct sk_buff *skb2;
  1984. skb2 = skb_realloc_headroom(skb, hh_len);
  1985. if (unlikely(!skb2)) {
  1986. kfree_skb(skb);
  1987. return -ENOMEM;
  1988. }
  1989. if (skb->sk)
  1990. skb_set_owner_w(skb2, skb->sk);
  1991. consume_skb(skb);
  1992. skb = skb2;
  1993. }
  1994. rcu_read_lock_bh();
  1995. if (!nh) {
  1996. struct dst_entry *dst = skb_dst(skb);
  1997. struct rtable *rt = container_of(dst, struct rtable, dst);
  1998. neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
  1999. } else if (nh->nh_family == AF_INET6) {
  2000. neigh = ip_neigh_gw6(dev, &nh->ipv6_nh);
  2001. is_v6gw = true;
  2002. } else if (nh->nh_family == AF_INET) {
  2003. neigh = ip_neigh_gw4(dev, nh->ipv4_nh);
  2004. } else {
  2005. rcu_read_unlock_bh();
  2006. goto out_drop;
  2007. }
  2008. if (likely(!IS_ERR(neigh))) {
  2009. int ret;
  2010. sock_confirm_neigh(skb, neigh);
  2011. dev_xmit_recursion_inc();
  2012. ret = neigh_output(neigh, skb, is_v6gw);
  2013. dev_xmit_recursion_dec();
  2014. rcu_read_unlock_bh();
  2015. return ret;
  2016. }
  2017. rcu_read_unlock_bh();
  2018. out_drop:
  2019. kfree_skb(skb);
  2020. return -ENETDOWN;
  2021. }
  2022. static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
  2023. struct bpf_nh_params *nh)
  2024. {
  2025. const struct iphdr *ip4h = ip_hdr(skb);
  2026. struct net *net = dev_net(dev);
  2027. int err, ret = NET_XMIT_DROP;
  2028. if (!nh) {
  2029. struct flowi4 fl4 = {
  2030. .flowi4_flags = FLOWI_FLAG_ANYSRC,
  2031. .flowi4_mark = skb->mark,
  2032. .flowi4_tos = RT_TOS(ip4h->tos),
  2033. .flowi4_oif = dev->ifindex,
  2034. .flowi4_proto = ip4h->protocol,
  2035. .daddr = ip4h->daddr,
  2036. .saddr = ip4h->saddr,
  2037. };
  2038. struct rtable *rt;
  2039. rt = ip_route_output_flow(net, &fl4, NULL);
  2040. if (IS_ERR(rt))
  2041. goto out_drop;
  2042. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  2043. ip_rt_put(rt);
  2044. goto out_drop;
  2045. }
  2046. skb_dst_set(skb, &rt->dst);
  2047. }
  2048. err = bpf_out_neigh_v4(net, skb, dev, nh);
  2049. if (unlikely(net_xmit_eval(err)))
  2050. dev->stats.tx_errors++;
  2051. else
  2052. ret = NET_XMIT_SUCCESS;
  2053. goto out_xmit;
  2054. out_drop:
  2055. dev->stats.tx_errors++;
  2056. kfree_skb(skb);
  2057. out_xmit:
  2058. return ret;
  2059. }
  2060. #else
  2061. static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
  2062. struct bpf_nh_params *nh)
  2063. {
  2064. kfree_skb(skb);
  2065. return NET_XMIT_DROP;
  2066. }
  2067. #endif /* CONFIG_INET */
  2068. static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
  2069. struct bpf_nh_params *nh)
  2070. {
  2071. struct ethhdr *ethh = eth_hdr(skb);
  2072. if (unlikely(skb->mac_header >= skb->network_header))
  2073. goto out;
  2074. bpf_push_mac_rcsum(skb);
  2075. if (is_multicast_ether_addr(ethh->h_dest))
  2076. goto out;
  2077. skb_pull(skb, sizeof(*ethh));
  2078. skb_unset_mac_header(skb);
  2079. skb_reset_network_header(skb);
  2080. if (skb->protocol == htons(ETH_P_IP))
  2081. return __bpf_redirect_neigh_v4(skb, dev, nh);
  2082. else if (skb->protocol == htons(ETH_P_IPV6))
  2083. return __bpf_redirect_neigh_v6(skb, dev, nh);
  2084. out:
  2085. kfree_skb(skb);
  2086. return -ENOTSUPP;
  2087. }
  2088. /* Internal, non-exposed redirect flags. */
  2089. enum {
  2090. BPF_F_NEIGH = (1ULL << 1),
  2091. BPF_F_PEER = (1ULL << 2),
  2092. BPF_F_NEXTHOP = (1ULL << 3),
  2093. #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
  2094. };
  2095. BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
  2096. {
  2097. struct net_device *dev;
  2098. struct sk_buff *clone;
  2099. int ret;
  2100. if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
  2101. return -EINVAL;
  2102. dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
  2103. if (unlikely(!dev))
  2104. return -EINVAL;
  2105. clone = skb_clone(skb, GFP_ATOMIC);
  2106. if (unlikely(!clone))
  2107. return -ENOMEM;
  2108. /* For direct write, we need to keep the invariant that the skbs
  2109. * we're dealing with need to be uncloned. Should uncloning fail
  2110. * here, we need to free the just generated clone to unclone once
  2111. * again.
  2112. */
  2113. ret = bpf_try_make_head_writable(skb);
  2114. if (unlikely(ret)) {
  2115. kfree_skb(clone);
  2116. return -ENOMEM;
  2117. }
  2118. return __bpf_redirect(clone, dev, flags);
  2119. }
  2120. static const struct bpf_func_proto bpf_clone_redirect_proto = {
  2121. .func = bpf_clone_redirect,
  2122. .gpl_only = false,
  2123. .ret_type = RET_INTEGER,
  2124. .arg1_type = ARG_PTR_TO_CTX,
  2125. .arg2_type = ARG_ANYTHING,
  2126. .arg3_type = ARG_ANYTHING,
  2127. };
  2128. DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
  2129. EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
  2130. int skb_do_redirect(struct sk_buff *skb)
  2131. {
  2132. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  2133. struct net *net = dev_net(skb->dev);
  2134. struct net_device *dev;
  2135. u32 flags = ri->flags;
  2136. dev = dev_get_by_index_rcu(net, ri->tgt_index);
  2137. ri->tgt_index = 0;
  2138. ri->flags = 0;
  2139. if (unlikely(!dev))
  2140. goto out_drop;
  2141. if (flags & BPF_F_PEER) {
  2142. const struct net_device_ops *ops = dev->netdev_ops;
  2143. if (unlikely(!ops->ndo_get_peer_dev ||
  2144. !skb_at_tc_ingress(skb)))
  2145. goto out_drop;
  2146. dev = ops->ndo_get_peer_dev(dev);
  2147. if (unlikely(!dev ||
  2148. !is_skb_forwardable(dev, skb) ||
  2149. net_eq(net, dev_net(dev))))
  2150. goto out_drop;
  2151. skb->dev = dev;
  2152. return -EAGAIN;
  2153. }
  2154. return flags & BPF_F_NEIGH ?
  2155. __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ?
  2156. &ri->nh : NULL) :
  2157. __bpf_redirect(skb, dev, flags);
  2158. out_drop:
  2159. kfree_skb(skb);
  2160. return -EINVAL;
  2161. }
  2162. BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
  2163. {
  2164. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  2165. if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
  2166. return TC_ACT_SHOT;
  2167. ri->flags = flags;
  2168. ri->tgt_index = ifindex;
  2169. return TC_ACT_REDIRECT;
  2170. }
  2171. static const struct bpf_func_proto bpf_redirect_proto = {
  2172. .func = bpf_redirect,
  2173. .gpl_only = false,
  2174. .ret_type = RET_INTEGER,
  2175. .arg1_type = ARG_ANYTHING,
  2176. .arg2_type = ARG_ANYTHING,
  2177. };
  2178. BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
  2179. {
  2180. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  2181. if (unlikely(flags))
  2182. return TC_ACT_SHOT;
  2183. ri->flags = BPF_F_PEER;
  2184. ri->tgt_index = ifindex;
  2185. return TC_ACT_REDIRECT;
  2186. }
  2187. static const struct bpf_func_proto bpf_redirect_peer_proto = {
  2188. .func = bpf_redirect_peer,
  2189. .gpl_only = false,
  2190. .ret_type = RET_INTEGER,
  2191. .arg1_type = ARG_ANYTHING,
  2192. .arg2_type = ARG_ANYTHING,
  2193. };
  2194. BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
  2195. int, plen, u64, flags)
  2196. {
  2197. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  2198. if (unlikely((plen && plen < sizeof(*params)) || flags))
  2199. return TC_ACT_SHOT;
  2200. ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0);
  2201. ri->tgt_index = ifindex;
  2202. BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params));
  2203. if (plen)
  2204. memcpy(&ri->nh, params, sizeof(ri->nh));
  2205. return TC_ACT_REDIRECT;
  2206. }
  2207. static const struct bpf_func_proto bpf_redirect_neigh_proto = {
  2208. .func = bpf_redirect_neigh,
  2209. .gpl_only = false,
  2210. .ret_type = RET_INTEGER,
  2211. .arg1_type = ARG_ANYTHING,
  2212. .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
  2213. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  2214. .arg4_type = ARG_ANYTHING,
  2215. };
  2216. BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
  2217. {
  2218. msg->apply_bytes = bytes;
  2219. return 0;
  2220. }
  2221. static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
  2222. .func = bpf_msg_apply_bytes,
  2223. .gpl_only = false,
  2224. .ret_type = RET_INTEGER,
  2225. .arg1_type = ARG_PTR_TO_CTX,
  2226. .arg2_type = ARG_ANYTHING,
  2227. };
  2228. BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
  2229. {
  2230. msg->cork_bytes = bytes;
  2231. return 0;
  2232. }
  2233. static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
  2234. .func = bpf_msg_cork_bytes,
  2235. .gpl_only = false,
  2236. .ret_type = RET_INTEGER,
  2237. .arg1_type = ARG_PTR_TO_CTX,
  2238. .arg2_type = ARG_ANYTHING,
  2239. };
  2240. BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
  2241. u32, end, u64, flags)
  2242. {
  2243. u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
  2244. u32 first_sge, last_sge, i, shift, bytes_sg_total;
  2245. struct scatterlist *sge;
  2246. u8 *raw, *to, *from;
  2247. struct page *page;
  2248. if (unlikely(flags || end <= start))
  2249. return -EINVAL;
  2250. /* First find the starting scatterlist element */
  2251. i = msg->sg.start;
  2252. do {
  2253. offset += len;
  2254. len = sk_msg_elem(msg, i)->length;
  2255. if (start < offset + len)
  2256. break;
  2257. sk_msg_iter_var_next(i);
  2258. } while (i != msg->sg.end);
  2259. if (unlikely(start >= offset + len))
  2260. return -EINVAL;
  2261. first_sge = i;
  2262. /* The start may point into the sg element so we need to also
  2263. * account for the headroom.
  2264. */
  2265. bytes_sg_total = start - offset + bytes;
  2266. if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len)
  2267. goto out;
  2268. /* At this point we need to linearize multiple scatterlist
  2269. * elements or a single shared page. Either way we need to
  2270. * copy into a linear buffer exclusively owned by BPF. Then
  2271. * place the buffer in the scatterlist and fixup the original
  2272. * entries by removing the entries now in the linear buffer
  2273. * and shifting the remaining entries. For now we do not try
  2274. * to copy partial entries to avoid complexity of running out
  2275. * of sg_entry slots. The downside is reading a single byte
  2276. * will copy the entire sg entry.
  2277. */
  2278. do {
  2279. copy += sk_msg_elem(msg, i)->length;
  2280. sk_msg_iter_var_next(i);
  2281. if (bytes_sg_total <= copy)
  2282. break;
  2283. } while (i != msg->sg.end);
  2284. last_sge = i;
  2285. if (unlikely(bytes_sg_total > copy))
  2286. return -EINVAL;
  2287. page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
  2288. get_order(copy));
  2289. if (unlikely(!page))
  2290. return -ENOMEM;
  2291. raw = page_address(page);
  2292. i = first_sge;
  2293. do {
  2294. sge = sk_msg_elem(msg, i);
  2295. from = sg_virt(sge);
  2296. len = sge->length;
  2297. to = raw + poffset;
  2298. memcpy(to, from, len);
  2299. poffset += len;
  2300. sge->length = 0;
  2301. put_page(sg_page(sge));
  2302. sk_msg_iter_var_next(i);
  2303. } while (i != last_sge);
  2304. sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
  2305. /* To repair sg ring we need to shift entries. If we only
  2306. * had a single entry though we can just replace it and
  2307. * be done. Otherwise walk the ring and shift the entries.
  2308. */
  2309. WARN_ON_ONCE(last_sge == first_sge);
  2310. shift = last_sge > first_sge ?
  2311. last_sge - first_sge - 1 :
  2312. NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
  2313. if (!shift)
  2314. goto out;
  2315. i = first_sge;
  2316. sk_msg_iter_var_next(i);
  2317. do {
  2318. u32 move_from;
  2319. if (i + shift >= NR_MSG_FRAG_IDS)
  2320. move_from = i + shift - NR_MSG_FRAG_IDS;
  2321. else
  2322. move_from = i + shift;
  2323. if (move_from == msg->sg.end)
  2324. break;
  2325. msg->sg.data[i] = msg->sg.data[move_from];
  2326. msg->sg.data[move_from].length = 0;
  2327. msg->sg.data[move_from].page_link = 0;
  2328. msg->sg.data[move_from].offset = 0;
  2329. sk_msg_iter_var_next(i);
  2330. } while (1);
  2331. msg->sg.end = msg->sg.end - shift > msg->sg.end ?
  2332. msg->sg.end - shift + NR_MSG_FRAG_IDS :
  2333. msg->sg.end - shift;
  2334. out:
  2335. msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
  2336. msg->data_end = msg->data + bytes;
  2337. return 0;
  2338. }
  2339. static const struct bpf_func_proto bpf_msg_pull_data_proto = {
  2340. .func = bpf_msg_pull_data,
  2341. .gpl_only = false,
  2342. .ret_type = RET_INTEGER,
  2343. .arg1_type = ARG_PTR_TO_CTX,
  2344. .arg2_type = ARG_ANYTHING,
  2345. .arg3_type = ARG_ANYTHING,
  2346. .arg4_type = ARG_ANYTHING,
  2347. };
  2348. BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
  2349. u32, len, u64, flags)
  2350. {
  2351. struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
  2352. u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
  2353. u8 *raw, *to, *from;
  2354. struct page *page;
  2355. if (unlikely(flags))
  2356. return -EINVAL;
  2357. if (unlikely(len == 0))
  2358. return 0;
  2359. /* First find the starting scatterlist element */
  2360. i = msg->sg.start;
  2361. do {
  2362. offset += l;
  2363. l = sk_msg_elem(msg, i)->length;
  2364. if (start < offset + l)
  2365. break;
  2366. sk_msg_iter_var_next(i);
  2367. } while (i != msg->sg.end);
  2368. if (start >= offset + l)
  2369. return -EINVAL;
  2370. space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
  2371. /* If no space available will fallback to copy, we need at
  2372. * least one scatterlist elem available to push data into
  2373. * when start aligns to the beginning of an element or two
  2374. * when it falls inside an element. We handle the start equals
  2375. * offset case because its the common case for inserting a
  2376. * header.
  2377. */
  2378. if (!space || (space == 1 && start != offset))
  2379. copy = msg->sg.data[i].length;
  2380. page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
  2381. get_order(copy + len));
  2382. if (unlikely(!page))
  2383. return -ENOMEM;
  2384. if (copy) {
  2385. int front, back;
  2386. raw = page_address(page);
  2387. psge = sk_msg_elem(msg, i);
  2388. front = start - offset;
  2389. back = psge->length - front;
  2390. from = sg_virt(psge);
  2391. if (front)
  2392. memcpy(raw, from, front);
  2393. if (back) {
  2394. from += front;
  2395. to = raw + front + len;
  2396. memcpy(to, from, back);
  2397. }
  2398. put_page(sg_page(psge));
  2399. } else if (start - offset) {
  2400. psge = sk_msg_elem(msg, i);
  2401. rsge = sk_msg_elem_cpy(msg, i);
  2402. psge->length = start - offset;
  2403. rsge.length -= psge->length;
  2404. rsge.offset += start;
  2405. sk_msg_iter_var_next(i);
  2406. sg_unmark_end(psge);
  2407. sg_unmark_end(&rsge);
  2408. sk_msg_iter_next(msg, end);
  2409. }
  2410. /* Slot(s) to place newly allocated data */
  2411. new = i;
  2412. /* Shift one or two slots as needed */
  2413. if (!copy) {
  2414. sge = sk_msg_elem_cpy(msg, i);
  2415. sk_msg_iter_var_next(i);
  2416. sg_unmark_end(&sge);
  2417. sk_msg_iter_next(msg, end);
  2418. nsge = sk_msg_elem_cpy(msg, i);
  2419. if (rsge.length) {
  2420. sk_msg_iter_var_next(i);
  2421. nnsge = sk_msg_elem_cpy(msg, i);
  2422. }
  2423. while (i != msg->sg.end) {
  2424. msg->sg.data[i] = sge;
  2425. sge = nsge;
  2426. sk_msg_iter_var_next(i);
  2427. if (rsge.length) {
  2428. nsge = nnsge;
  2429. nnsge = sk_msg_elem_cpy(msg, i);
  2430. } else {
  2431. nsge = sk_msg_elem_cpy(msg, i);
  2432. }
  2433. }
  2434. }
  2435. /* Place newly allocated data buffer */
  2436. sk_mem_charge(msg->sk, len);
  2437. msg->sg.size += len;
  2438. __clear_bit(new, &msg->sg.copy);
  2439. sg_set_page(&msg->sg.data[new], page, len + copy, 0);
  2440. if (rsge.length) {
  2441. get_page(sg_page(&rsge));
  2442. sk_msg_iter_var_next(new);
  2443. msg->sg.data[new] = rsge;
  2444. }
  2445. sk_msg_compute_data_pointers(msg);
  2446. return 0;
  2447. }
  2448. static const struct bpf_func_proto bpf_msg_push_data_proto = {
  2449. .func = bpf_msg_push_data,
  2450. .gpl_only = false,
  2451. .ret_type = RET_INTEGER,
  2452. .arg1_type = ARG_PTR_TO_CTX,
  2453. .arg2_type = ARG_ANYTHING,
  2454. .arg3_type = ARG_ANYTHING,
  2455. .arg4_type = ARG_ANYTHING,
  2456. };
  2457. static void sk_msg_shift_left(struct sk_msg *msg, int i)
  2458. {
  2459. int prev;
  2460. do {
  2461. prev = i;
  2462. sk_msg_iter_var_next(i);
  2463. msg->sg.data[prev] = msg->sg.data[i];
  2464. } while (i != msg->sg.end);
  2465. sk_msg_iter_prev(msg, end);
  2466. }
  2467. static void sk_msg_shift_right(struct sk_msg *msg, int i)
  2468. {
  2469. struct scatterlist tmp, sge;
  2470. sk_msg_iter_next(msg, end);
  2471. sge = sk_msg_elem_cpy(msg, i);
  2472. sk_msg_iter_var_next(i);
  2473. tmp = sk_msg_elem_cpy(msg, i);
  2474. while (i != msg->sg.end) {
  2475. msg->sg.data[i] = sge;
  2476. sk_msg_iter_var_next(i);
  2477. sge = tmp;
  2478. tmp = sk_msg_elem_cpy(msg, i);
  2479. }
  2480. }
  2481. BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
  2482. u32, len, u64, flags)
  2483. {
  2484. u32 i = 0, l = 0, space, offset = 0;
  2485. u64 last = start + len;
  2486. int pop;
  2487. if (unlikely(flags))
  2488. return -EINVAL;
  2489. /* First find the starting scatterlist element */
  2490. i = msg->sg.start;
  2491. do {
  2492. offset += l;
  2493. l = sk_msg_elem(msg, i)->length;
  2494. if (start < offset + l)
  2495. break;
  2496. sk_msg_iter_var_next(i);
  2497. } while (i != msg->sg.end);
  2498. /* Bounds checks: start and pop must be inside message */
  2499. if (start >= offset + l || last >= msg->sg.size)
  2500. return -EINVAL;
  2501. space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
  2502. pop = len;
  2503. /* --------------| offset
  2504. * -| start |-------- len -------|
  2505. *
  2506. * |----- a ----|-------- pop -------|----- b ----|
  2507. * |______________________________________________| length
  2508. *
  2509. *
  2510. * a: region at front of scatter element to save
  2511. * b: region at back of scatter element to save when length > A + pop
  2512. * pop: region to pop from element, same as input 'pop' here will be
  2513. * decremented below per iteration.
  2514. *
  2515. * Two top-level cases to handle when start != offset, first B is non
  2516. * zero and second B is zero corresponding to when a pop includes more
  2517. * than one element.
  2518. *
  2519. * Then if B is non-zero AND there is no space allocate space and
  2520. * compact A, B regions into page. If there is space shift ring to
  2521. * the rigth free'ing the next element in ring to place B, leaving
  2522. * A untouched except to reduce length.
  2523. */
  2524. if (start != offset) {
  2525. struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
  2526. int a = start;
  2527. int b = sge->length - pop - a;
  2528. sk_msg_iter_var_next(i);
  2529. if (pop < sge->length - a) {
  2530. if (space) {
  2531. sge->length = a;
  2532. sk_msg_shift_right(msg, i);
  2533. nsge = sk_msg_elem(msg, i);
  2534. get_page(sg_page(sge));
  2535. sg_set_page(nsge,
  2536. sg_page(sge),
  2537. b, sge->offset + pop + a);
  2538. } else {
  2539. struct page *page, *orig;
  2540. u8 *to, *from;
  2541. page = alloc_pages(__GFP_NOWARN |
  2542. __GFP_COMP | GFP_ATOMIC,
  2543. get_order(a + b));
  2544. if (unlikely(!page))
  2545. return -ENOMEM;
  2546. sge->length = a;
  2547. orig = sg_page(sge);
  2548. from = sg_virt(sge);
  2549. to = page_address(page);
  2550. memcpy(to, from, a);
  2551. memcpy(to + a, from + a + pop, b);
  2552. sg_set_page(sge, page, a + b, 0);
  2553. put_page(orig);
  2554. }
  2555. pop = 0;
  2556. } else if (pop >= sge->length - a) {
  2557. pop -= (sge->length - a);
  2558. sge->length = a;
  2559. }
  2560. }
  2561. /* From above the current layout _must_ be as follows,
  2562. *
  2563. * -| offset
  2564. * -| start
  2565. *
  2566. * |---- pop ---|---------------- b ------------|
  2567. * |____________________________________________| length
  2568. *
  2569. * Offset and start of the current msg elem are equal because in the
  2570. * previous case we handled offset != start and either consumed the
  2571. * entire element and advanced to the next element OR pop == 0.
  2572. *
  2573. * Two cases to handle here are first pop is less than the length
  2574. * leaving some remainder b above. Simply adjust the element's layout
  2575. * in this case. Or pop >= length of the element so that b = 0. In this
  2576. * case advance to next element decrementing pop.
  2577. */
  2578. while (pop) {
  2579. struct scatterlist *sge = sk_msg_elem(msg, i);
  2580. if (pop < sge->length) {
  2581. sge->length -= pop;
  2582. sge->offset += pop;
  2583. pop = 0;
  2584. } else {
  2585. pop -= sge->length;
  2586. sk_msg_shift_left(msg, i);
  2587. }
  2588. sk_msg_iter_var_next(i);
  2589. }
  2590. sk_mem_uncharge(msg->sk, len - pop);
  2591. msg->sg.size -= (len - pop);
  2592. sk_msg_compute_data_pointers(msg);
  2593. return 0;
  2594. }
  2595. static const struct bpf_func_proto bpf_msg_pop_data_proto = {
  2596. .func = bpf_msg_pop_data,
  2597. .gpl_only = false,
  2598. .ret_type = RET_INTEGER,
  2599. .arg1_type = ARG_PTR_TO_CTX,
  2600. .arg2_type = ARG_ANYTHING,
  2601. .arg3_type = ARG_ANYTHING,
  2602. .arg4_type = ARG_ANYTHING,
  2603. };
  2604. #ifdef CONFIG_CGROUP_NET_CLASSID
  2605. BPF_CALL_0(bpf_get_cgroup_classid_curr)
  2606. {
  2607. return __task_get_classid(current);
  2608. }
  2609. static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = {
  2610. .func = bpf_get_cgroup_classid_curr,
  2611. .gpl_only = false,
  2612. .ret_type = RET_INTEGER,
  2613. };
  2614. BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb)
  2615. {
  2616. struct sock *sk = skb_to_full_sk(skb);
  2617. if (!sk || !sk_fullsock(sk))
  2618. return 0;
  2619. return sock_cgroup_classid(&sk->sk_cgrp_data);
  2620. }
  2621. static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = {
  2622. .func = bpf_skb_cgroup_classid,
  2623. .gpl_only = false,
  2624. .ret_type = RET_INTEGER,
  2625. .arg1_type = ARG_PTR_TO_CTX,
  2626. };
  2627. #endif
  2628. BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
  2629. {
  2630. return task_get_classid(skb);
  2631. }
  2632. static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
  2633. .func = bpf_get_cgroup_classid,
  2634. .gpl_only = false,
  2635. .ret_type = RET_INTEGER,
  2636. .arg1_type = ARG_PTR_TO_CTX,
  2637. };
  2638. BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
  2639. {
  2640. return dst_tclassid(skb);
  2641. }
  2642. static const struct bpf_func_proto bpf_get_route_realm_proto = {
  2643. .func = bpf_get_route_realm,
  2644. .gpl_only = false,
  2645. .ret_type = RET_INTEGER,
  2646. .arg1_type = ARG_PTR_TO_CTX,
  2647. };
  2648. BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
  2649. {
  2650. /* If skb_clear_hash() was called due to mangling, we can
  2651. * trigger SW recalculation here. Later access to hash
  2652. * can then use the inline skb->hash via context directly
  2653. * instead of calling this helper again.
  2654. */
  2655. return skb_get_hash(skb);
  2656. }
  2657. static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
  2658. .func = bpf_get_hash_recalc,
  2659. .gpl_only = false,
  2660. .ret_type = RET_INTEGER,
  2661. .arg1_type = ARG_PTR_TO_CTX,
  2662. };
  2663. BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
  2664. {
  2665. /* After all direct packet write, this can be used once for
  2666. * triggering a lazy recalc on next skb_get_hash() invocation.
  2667. */
  2668. skb_clear_hash(skb);
  2669. return 0;
  2670. }
  2671. static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
  2672. .func = bpf_set_hash_invalid,
  2673. .gpl_only = false,
  2674. .ret_type = RET_INTEGER,
  2675. .arg1_type = ARG_PTR_TO_CTX,
  2676. };
  2677. BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
  2678. {
  2679. /* Set user specified hash as L4(+), so that it gets returned
  2680. * on skb_get_hash() call unless BPF prog later on triggers a
  2681. * skb_clear_hash().
  2682. */
  2683. __skb_set_sw_hash(skb, hash, true);
  2684. return 0;
  2685. }
  2686. static const struct bpf_func_proto bpf_set_hash_proto = {
  2687. .func = bpf_set_hash,
  2688. .gpl_only = false,
  2689. .ret_type = RET_INTEGER,
  2690. .arg1_type = ARG_PTR_TO_CTX,
  2691. .arg2_type = ARG_ANYTHING,
  2692. };
  2693. BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
  2694. u16, vlan_tci)
  2695. {
  2696. int ret;
  2697. if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
  2698. vlan_proto != htons(ETH_P_8021AD)))
  2699. vlan_proto = htons(ETH_P_8021Q);
  2700. bpf_push_mac_rcsum(skb);
  2701. ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
  2702. bpf_pull_mac_rcsum(skb);
  2703. bpf_compute_data_pointers(skb);
  2704. return ret;
  2705. }
  2706. static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
  2707. .func = bpf_skb_vlan_push,
  2708. .gpl_only = false,
  2709. .ret_type = RET_INTEGER,
  2710. .arg1_type = ARG_PTR_TO_CTX,
  2711. .arg2_type = ARG_ANYTHING,
  2712. .arg3_type = ARG_ANYTHING,
  2713. };
  2714. BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
  2715. {
  2716. int ret;
  2717. bpf_push_mac_rcsum(skb);
  2718. ret = skb_vlan_pop(skb);
  2719. bpf_pull_mac_rcsum(skb);
  2720. bpf_compute_data_pointers(skb);
  2721. return ret;
  2722. }
  2723. static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
  2724. .func = bpf_skb_vlan_pop,
  2725. .gpl_only = false,
  2726. .ret_type = RET_INTEGER,
  2727. .arg1_type = ARG_PTR_TO_CTX,
  2728. };
  2729. static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
  2730. {
  2731. /* Caller already did skb_cow() with len as headroom,
  2732. * so no need to do it here.
  2733. */
  2734. skb_push(skb, len);
  2735. memmove(skb->data, skb->data + len, off);
  2736. memset(skb->data + off, 0, len);
  2737. /* No skb_postpush_rcsum(skb, skb->data + off, len)
  2738. * needed here as it does not change the skb->csum
  2739. * result for checksum complete when summing over
  2740. * zeroed blocks.
  2741. */
  2742. return 0;
  2743. }
  2744. static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
  2745. {
  2746. /* skb_ensure_writable() is not needed here, as we're
  2747. * already working on an uncloned skb.
  2748. */
  2749. if (unlikely(!pskb_may_pull(skb, off + len)))
  2750. return -ENOMEM;
  2751. skb_postpull_rcsum(skb, skb->data + off, len);
  2752. memmove(skb->data + len, skb->data, off);
  2753. __skb_pull(skb, len);
  2754. return 0;
  2755. }
  2756. static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
  2757. {
  2758. bool trans_same = skb->transport_header == skb->network_header;
  2759. int ret;
  2760. /* There's no need for __skb_push()/__skb_pull() pair to
  2761. * get to the start of the mac header as we're guaranteed
  2762. * to always start from here under eBPF.
  2763. */
  2764. ret = bpf_skb_generic_push(skb, off, len);
  2765. if (likely(!ret)) {
  2766. skb->mac_header -= len;
  2767. skb->network_header -= len;
  2768. if (trans_same)
  2769. skb->transport_header = skb->network_header;
  2770. }
  2771. return ret;
  2772. }
  2773. static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
  2774. {
  2775. bool trans_same = skb->transport_header == skb->network_header;
  2776. int ret;
  2777. /* Same here, __skb_push()/__skb_pull() pair not needed. */
  2778. ret = bpf_skb_generic_pop(skb, off, len);
  2779. if (likely(!ret)) {
  2780. skb->mac_header += len;
  2781. skb->network_header += len;
  2782. if (trans_same)
  2783. skb->transport_header = skb->network_header;
  2784. }
  2785. return ret;
  2786. }
  2787. static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
  2788. {
  2789. const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
  2790. u32 off = skb_mac_header_len(skb);
  2791. int ret;
  2792. ret = skb_cow(skb, len_diff);
  2793. if (unlikely(ret < 0))
  2794. return ret;
  2795. ret = bpf_skb_net_hdr_push(skb, off, len_diff);
  2796. if (unlikely(ret < 0))
  2797. return ret;
  2798. if (skb_is_gso(skb)) {
  2799. struct skb_shared_info *shinfo = skb_shinfo(skb);
  2800. /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */
  2801. if (shinfo->gso_type & SKB_GSO_TCPV4) {
  2802. shinfo->gso_type &= ~SKB_GSO_TCPV4;
  2803. shinfo->gso_type |= SKB_GSO_TCPV6;
  2804. }
  2805. }
  2806. skb->protocol = htons(ETH_P_IPV6);
  2807. skb_clear_hash(skb);
  2808. return 0;
  2809. }
  2810. static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
  2811. {
  2812. const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
  2813. u32 off = skb_mac_header_len(skb);
  2814. int ret;
  2815. ret = skb_unclone(skb, GFP_ATOMIC);
  2816. if (unlikely(ret < 0))
  2817. return ret;
  2818. ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
  2819. if (unlikely(ret < 0))
  2820. return ret;
  2821. if (skb_is_gso(skb)) {
  2822. struct skb_shared_info *shinfo = skb_shinfo(skb);
  2823. /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */
  2824. if (shinfo->gso_type & SKB_GSO_TCPV6) {
  2825. shinfo->gso_type &= ~SKB_GSO_TCPV6;
  2826. shinfo->gso_type |= SKB_GSO_TCPV4;
  2827. }
  2828. }
  2829. skb->protocol = htons(ETH_P_IP);
  2830. skb_clear_hash(skb);
  2831. return 0;
  2832. }
  2833. static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
  2834. {
  2835. __be16 from_proto = skb->protocol;
  2836. if (from_proto == htons(ETH_P_IP) &&
  2837. to_proto == htons(ETH_P_IPV6))
  2838. return bpf_skb_proto_4_to_6(skb);
  2839. if (from_proto == htons(ETH_P_IPV6) &&
  2840. to_proto == htons(ETH_P_IP))
  2841. return bpf_skb_proto_6_to_4(skb);
  2842. return -ENOTSUPP;
  2843. }
  2844. BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
  2845. u64, flags)
  2846. {
  2847. int ret;
  2848. if (unlikely(flags))
  2849. return -EINVAL;
  2850. /* General idea is that this helper does the basic groundwork
  2851. * needed for changing the protocol, and eBPF program fills the
  2852. * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
  2853. * and other helpers, rather than passing a raw buffer here.
  2854. *
  2855. * The rationale is to keep this minimal and without a need to
  2856. * deal with raw packet data. F.e. even if we would pass buffers
  2857. * here, the program still needs to call the bpf_lX_csum_replace()
  2858. * helpers anyway. Plus, this way we keep also separation of
  2859. * concerns, since f.e. bpf_skb_store_bytes() should only take
  2860. * care of stores.
  2861. *
  2862. * Currently, additional options and extension header space are
  2863. * not supported, but flags register is reserved so we can adapt
  2864. * that. For offloads, we mark packet as dodgy, so that headers
  2865. * need to be verified first.
  2866. */
  2867. ret = bpf_skb_proto_xlat(skb, proto);
  2868. bpf_compute_data_pointers(skb);
  2869. return ret;
  2870. }
  2871. static const struct bpf_func_proto bpf_skb_change_proto_proto = {
  2872. .func = bpf_skb_change_proto,
  2873. .gpl_only = false,
  2874. .ret_type = RET_INTEGER,
  2875. .arg1_type = ARG_PTR_TO_CTX,
  2876. .arg2_type = ARG_ANYTHING,
  2877. .arg3_type = ARG_ANYTHING,
  2878. };
  2879. BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
  2880. {
  2881. /* We only allow a restricted subset to be changed for now. */
  2882. if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
  2883. !skb_pkt_type_ok(pkt_type)))
  2884. return -EINVAL;
  2885. skb->pkt_type = pkt_type;
  2886. return 0;
  2887. }
  2888. static const struct bpf_func_proto bpf_skb_change_type_proto = {
  2889. .func = bpf_skb_change_type,
  2890. .gpl_only = false,
  2891. .ret_type = RET_INTEGER,
  2892. .arg1_type = ARG_PTR_TO_CTX,
  2893. .arg2_type = ARG_ANYTHING,
  2894. };
  2895. static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
  2896. {
  2897. switch (skb->protocol) {
  2898. case htons(ETH_P_IP):
  2899. return sizeof(struct iphdr);
  2900. case htons(ETH_P_IPV6):
  2901. return sizeof(struct ipv6hdr);
  2902. default:
  2903. return ~0U;
  2904. }
  2905. }
  2906. #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
  2907. BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
  2908. #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
  2909. BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
  2910. BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
  2911. BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
  2912. BPF_F_ADJ_ROOM_ENCAP_L2( \
  2913. BPF_ADJ_ROOM_ENCAP_L2_MASK))
  2914. static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
  2915. u64 flags)
  2916. {
  2917. u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
  2918. bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
  2919. u16 mac_len = 0, inner_net = 0, inner_trans = 0;
  2920. unsigned int gso_type = SKB_GSO_DODGY;
  2921. int ret;
  2922. if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
  2923. /* udp gso_size delineates datagrams, only allow if fixed */
  2924. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
  2925. !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
  2926. return -ENOTSUPP;
  2927. }
  2928. ret = skb_cow_head(skb, len_diff);
  2929. if (unlikely(ret < 0))
  2930. return ret;
  2931. if (encap) {
  2932. if (skb->protocol != htons(ETH_P_IP) &&
  2933. skb->protocol != htons(ETH_P_IPV6))
  2934. return -ENOTSUPP;
  2935. if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
  2936. flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
  2937. return -EINVAL;
  2938. if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
  2939. flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
  2940. return -EINVAL;
  2941. if (skb->encapsulation)
  2942. return -EALREADY;
  2943. mac_len = skb->network_header - skb->mac_header;
  2944. inner_net = skb->network_header;
  2945. if (inner_mac_len > len_diff)
  2946. return -EINVAL;
  2947. inner_trans = skb->transport_header;
  2948. }
  2949. ret = bpf_skb_net_hdr_push(skb, off, len_diff);
  2950. if (unlikely(ret < 0))
  2951. return ret;
  2952. if (encap) {
  2953. skb->inner_mac_header = inner_net - inner_mac_len;
  2954. skb->inner_network_header = inner_net;
  2955. skb->inner_transport_header = inner_trans;
  2956. skb_set_inner_protocol(skb, skb->protocol);
  2957. skb->encapsulation = 1;
  2958. skb_set_network_header(skb, mac_len);
  2959. if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
  2960. gso_type |= SKB_GSO_UDP_TUNNEL;
  2961. else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
  2962. gso_type |= SKB_GSO_GRE;
  2963. else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
  2964. gso_type |= SKB_GSO_IPXIP6;
  2965. else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
  2966. gso_type |= SKB_GSO_IPXIP4;
  2967. if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
  2968. flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
  2969. int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
  2970. sizeof(struct ipv6hdr) :
  2971. sizeof(struct iphdr);
  2972. skb_set_transport_header(skb, mac_len + nh_len);
  2973. }
  2974. /* Match skb->protocol to new outer l3 protocol */
  2975. if (skb->protocol == htons(ETH_P_IP) &&
  2976. flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
  2977. skb->protocol = htons(ETH_P_IPV6);
  2978. else if (skb->protocol == htons(ETH_P_IPV6) &&
  2979. flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
  2980. skb->protocol = htons(ETH_P_IP);
  2981. }
  2982. if (skb_is_gso(skb)) {
  2983. struct skb_shared_info *shinfo = skb_shinfo(skb);
  2984. /* Due to header grow, MSS needs to be downgraded. */
  2985. if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
  2986. skb_decrease_gso_size(shinfo, len_diff);
  2987. /* Header must be checked, and gso_segs recomputed. */
  2988. shinfo->gso_type |= gso_type;
  2989. shinfo->gso_segs = 0;
  2990. }
  2991. return 0;
  2992. }
  2993. static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
  2994. u64 flags)
  2995. {
  2996. int ret;
  2997. if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO |
  2998. BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
  2999. return -EINVAL;
  3000. if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
  3001. /* udp gso_size delineates datagrams, only allow if fixed */
  3002. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
  3003. !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
  3004. return -ENOTSUPP;
  3005. }
  3006. ret = skb_unclone(skb, GFP_ATOMIC);
  3007. if (unlikely(ret < 0))
  3008. return ret;
  3009. ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
  3010. if (unlikely(ret < 0))
  3011. return ret;
  3012. if (skb_is_gso(skb)) {
  3013. struct skb_shared_info *shinfo = skb_shinfo(skb);
  3014. /* Due to header shrink, MSS can be upgraded. */
  3015. if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
  3016. skb_increase_gso_size(shinfo, len_diff);
  3017. /* Header must be checked, and gso_segs recomputed. */
  3018. shinfo->gso_type |= SKB_GSO_DODGY;
  3019. shinfo->gso_segs = 0;
  3020. }
  3021. return 0;
  3022. }
  3023. #define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
  3024. BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
  3025. u32, mode, u64, flags)
  3026. {
  3027. u32 len_diff_abs = abs(len_diff);
  3028. bool shrink = len_diff < 0;
  3029. int ret = 0;
  3030. if (unlikely(flags || mode))
  3031. return -EINVAL;
  3032. if (unlikely(len_diff_abs > 0xfffU))
  3033. return -EFAULT;
  3034. if (!shrink) {
  3035. ret = skb_cow(skb, len_diff);
  3036. if (unlikely(ret < 0))
  3037. return ret;
  3038. __skb_push(skb, len_diff_abs);
  3039. memset(skb->data, 0, len_diff_abs);
  3040. } else {
  3041. if (unlikely(!pskb_may_pull(skb, len_diff_abs)))
  3042. return -ENOMEM;
  3043. __skb_pull(skb, len_diff_abs);
  3044. }
  3045. bpf_compute_data_end_sk_skb(skb);
  3046. if (tls_sw_has_ctx_rx(skb->sk)) {
  3047. struct strp_msg *rxm = strp_msg(skb);
  3048. rxm->full_len += len_diff;
  3049. }
  3050. return ret;
  3051. }
  3052. static const struct bpf_func_proto sk_skb_adjust_room_proto = {
  3053. .func = sk_skb_adjust_room,
  3054. .gpl_only = false,
  3055. .ret_type = RET_INTEGER,
  3056. .arg1_type = ARG_PTR_TO_CTX,
  3057. .arg2_type = ARG_ANYTHING,
  3058. .arg3_type = ARG_ANYTHING,
  3059. .arg4_type = ARG_ANYTHING,
  3060. };
  3061. BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
  3062. u32, mode, u64, flags)
  3063. {
  3064. u32 len_cur, len_diff_abs = abs(len_diff);
  3065. u32 len_min = bpf_skb_net_base_len(skb);
  3066. u32 len_max = BPF_SKB_MAX_LEN;
  3067. __be16 proto = skb->protocol;
  3068. bool shrink = len_diff < 0;
  3069. u32 off;
  3070. int ret;
  3071. if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK |
  3072. BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
  3073. return -EINVAL;
  3074. if (unlikely(len_diff_abs > 0xfffU))
  3075. return -EFAULT;
  3076. if (unlikely(proto != htons(ETH_P_IP) &&
  3077. proto != htons(ETH_P_IPV6)))
  3078. return -ENOTSUPP;
  3079. off = skb_mac_header_len(skb);
  3080. switch (mode) {
  3081. case BPF_ADJ_ROOM_NET:
  3082. off += bpf_skb_net_base_len(skb);
  3083. break;
  3084. case BPF_ADJ_ROOM_MAC:
  3085. break;
  3086. default:
  3087. return -ENOTSUPP;
  3088. }
  3089. len_cur = skb->len - skb_network_offset(skb);
  3090. if ((shrink && (len_diff_abs >= len_cur ||
  3091. len_cur - len_diff_abs < len_min)) ||
  3092. (!shrink && (skb->len + len_diff_abs > len_max &&
  3093. !skb_is_gso(skb))))
  3094. return -ENOTSUPP;
  3095. ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
  3096. bpf_skb_net_grow(skb, off, len_diff_abs, flags);
  3097. if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET))
  3098. __skb_reset_checksum_unnecessary(skb);
  3099. bpf_compute_data_pointers(skb);
  3100. return ret;
  3101. }
  3102. static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
  3103. .func = bpf_skb_adjust_room,
  3104. .gpl_only = false,
  3105. .ret_type = RET_INTEGER,
  3106. .arg1_type = ARG_PTR_TO_CTX,
  3107. .arg2_type = ARG_ANYTHING,
  3108. .arg3_type = ARG_ANYTHING,
  3109. .arg4_type = ARG_ANYTHING,
  3110. };
  3111. static u32 __bpf_skb_min_len(const struct sk_buff *skb)
  3112. {
  3113. u32 min_len = skb_network_offset(skb);
  3114. if (skb_transport_header_was_set(skb))
  3115. min_len = skb_transport_offset(skb);
  3116. if (skb->ip_summed == CHECKSUM_PARTIAL)
  3117. min_len = skb_checksum_start_offset(skb) +
  3118. skb->csum_offset + sizeof(__sum16);
  3119. return min_len;
  3120. }
  3121. static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
  3122. {
  3123. unsigned int old_len = skb->len;
  3124. int ret;
  3125. ret = __skb_grow_rcsum(skb, new_len);
  3126. if (!ret)
  3127. memset(skb->data + old_len, 0, new_len - old_len);
  3128. return ret;
  3129. }
  3130. static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
  3131. {
  3132. return __skb_trim_rcsum(skb, new_len);
  3133. }
  3134. static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
  3135. u64 flags)
  3136. {
  3137. u32 max_len = BPF_SKB_MAX_LEN;
  3138. u32 min_len = __bpf_skb_min_len(skb);
  3139. int ret;
  3140. if (unlikely(flags || new_len > max_len || new_len < min_len))
  3141. return -EINVAL;
  3142. if (skb->encapsulation)
  3143. return -ENOTSUPP;
  3144. /* The basic idea of this helper is that it's performing the
  3145. * needed work to either grow or trim an skb, and eBPF program
  3146. * rewrites the rest via helpers like bpf_skb_store_bytes(),
  3147. * bpf_lX_csum_replace() and others rather than passing a raw
  3148. * buffer here. This one is a slow path helper and intended
  3149. * for replies with control messages.
  3150. *
  3151. * Like in bpf_skb_change_proto(), we want to keep this rather
  3152. * minimal and without protocol specifics so that we are able
  3153. * to separate concerns as in bpf_skb_store_bytes() should only
  3154. * be the one responsible for writing buffers.
  3155. *
  3156. * It's really expected to be a slow path operation here for
  3157. * control message replies, so we're implicitly linearizing,
  3158. * uncloning and drop offloads from the skb by this.
  3159. */
  3160. ret = __bpf_try_make_writable(skb, skb->len);
  3161. if (!ret) {
  3162. if (new_len > skb->len)
  3163. ret = bpf_skb_grow_rcsum(skb, new_len);
  3164. else if (new_len < skb->len)
  3165. ret = bpf_skb_trim_rcsum(skb, new_len);
  3166. if (!ret && skb_is_gso(skb))
  3167. skb_gso_reset(skb);
  3168. }
  3169. return ret;
  3170. }
  3171. BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
  3172. u64, flags)
  3173. {
  3174. int ret = __bpf_skb_change_tail(skb, new_len, flags);
  3175. bpf_compute_data_pointers(skb);
  3176. return ret;
  3177. }
  3178. static const struct bpf_func_proto bpf_skb_change_tail_proto = {
  3179. .func = bpf_skb_change_tail,
  3180. .gpl_only = false,
  3181. .ret_type = RET_INTEGER,
  3182. .arg1_type = ARG_PTR_TO_CTX,
  3183. .arg2_type = ARG_ANYTHING,
  3184. .arg3_type = ARG_ANYTHING,
  3185. };
  3186. BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
  3187. u64, flags)
  3188. {
  3189. int ret = __bpf_skb_change_tail(skb, new_len, flags);
  3190. bpf_compute_data_end_sk_skb(skb);
  3191. return ret;
  3192. }
  3193. static const struct bpf_func_proto sk_skb_change_tail_proto = {
  3194. .func = sk_skb_change_tail,
  3195. .gpl_only = false,
  3196. .ret_type = RET_INTEGER,
  3197. .arg1_type = ARG_PTR_TO_CTX,
  3198. .arg2_type = ARG_ANYTHING,
  3199. .arg3_type = ARG_ANYTHING,
  3200. };
  3201. static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
  3202. u64 flags)
  3203. {
  3204. u32 max_len = BPF_SKB_MAX_LEN;
  3205. u32 new_len = skb->len + head_room;
  3206. int ret;
  3207. if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
  3208. new_len < skb->len))
  3209. return -EINVAL;
  3210. ret = skb_cow(skb, head_room);
  3211. if (likely(!ret)) {
  3212. /* Idea for this helper is that we currently only
  3213. * allow to expand on mac header. This means that
  3214. * skb->protocol network header, etc, stay as is.
  3215. * Compared to bpf_skb_change_tail(), we're more
  3216. * flexible due to not needing to linearize or
  3217. * reset GSO. Intention for this helper is to be
  3218. * used by an L3 skb that needs to push mac header
  3219. * for redirection into L2 device.
  3220. */
  3221. __skb_push(skb, head_room);
  3222. memset(skb->data, 0, head_room);
  3223. skb_reset_mac_header(skb);
  3224. skb_reset_mac_len(skb);
  3225. }
  3226. return ret;
  3227. }
  3228. BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
  3229. u64, flags)
  3230. {
  3231. int ret = __bpf_skb_change_head(skb, head_room, flags);
  3232. bpf_compute_data_pointers(skb);
  3233. return ret;
  3234. }
  3235. static const struct bpf_func_proto bpf_skb_change_head_proto = {
  3236. .func = bpf_skb_change_head,
  3237. .gpl_only = false,
  3238. .ret_type = RET_INTEGER,
  3239. .arg1_type = ARG_PTR_TO_CTX,
  3240. .arg2_type = ARG_ANYTHING,
  3241. .arg3_type = ARG_ANYTHING,
  3242. };
  3243. BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
  3244. u64, flags)
  3245. {
  3246. int ret = __bpf_skb_change_head(skb, head_room, flags);
  3247. bpf_compute_data_end_sk_skb(skb);
  3248. return ret;
  3249. }
  3250. static const struct bpf_func_proto sk_skb_change_head_proto = {
  3251. .func = sk_skb_change_head,
  3252. .gpl_only = false,
  3253. .ret_type = RET_INTEGER,
  3254. .arg1_type = ARG_PTR_TO_CTX,
  3255. .arg2_type = ARG_ANYTHING,
  3256. .arg3_type = ARG_ANYTHING,
  3257. };
  3258. static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
  3259. {
  3260. return xdp_data_meta_unsupported(xdp) ? 0 :
  3261. xdp->data - xdp->data_meta;
  3262. }
  3263. BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
  3264. {
  3265. void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
  3266. unsigned long metalen = xdp_get_metalen(xdp);
  3267. void *data_start = xdp_frame_end + metalen;
  3268. void *data = xdp->data + offset;
  3269. if (unlikely(data < data_start ||
  3270. data > xdp->data_end - ETH_HLEN))
  3271. return -EINVAL;
  3272. if (metalen)
  3273. memmove(xdp->data_meta + offset,
  3274. xdp->data_meta, metalen);
  3275. xdp->data_meta += offset;
  3276. xdp->data = data;
  3277. return 0;
  3278. }
  3279. static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
  3280. .func = bpf_xdp_adjust_head,
  3281. .gpl_only = false,
  3282. .ret_type = RET_INTEGER,
  3283. .arg1_type = ARG_PTR_TO_CTX,
  3284. .arg2_type = ARG_ANYTHING,
  3285. };
  3286. BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
  3287. {
  3288. void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
  3289. void *data_end = xdp->data_end + offset;
  3290. /* Notice that xdp_data_hard_end have reserved some tailroom */
  3291. if (unlikely(data_end > data_hard_end))
  3292. return -EINVAL;
  3293. /* ALL drivers MUST init xdp->frame_sz, chicken check below */
  3294. if (unlikely(xdp->frame_sz > PAGE_SIZE)) {
  3295. WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz);
  3296. return -EINVAL;
  3297. }
  3298. if (unlikely(data_end < xdp->data + ETH_HLEN))
  3299. return -EINVAL;
  3300. /* Clear memory area on grow, can contain uninit kernel memory */
  3301. if (offset > 0)
  3302. memset(xdp->data_end, 0, offset);
  3303. xdp->data_end = data_end;
  3304. return 0;
  3305. }
  3306. static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
  3307. .func = bpf_xdp_adjust_tail,
  3308. .gpl_only = false,
  3309. .ret_type = RET_INTEGER,
  3310. .arg1_type = ARG_PTR_TO_CTX,
  3311. .arg2_type = ARG_ANYTHING,
  3312. };
  3313. BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
  3314. {
  3315. void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
  3316. void *meta = xdp->data_meta + offset;
  3317. unsigned long metalen = xdp->data - meta;
  3318. if (xdp_data_meta_unsupported(xdp))
  3319. return -ENOTSUPP;
  3320. if (unlikely(meta < xdp_frame_end ||
  3321. meta > xdp->data))
  3322. return -EINVAL;
  3323. if (unlikely((metalen & (sizeof(__u32) - 1)) ||
  3324. (metalen > 32)))
  3325. return -EACCES;
  3326. xdp->data_meta = meta;
  3327. return 0;
  3328. }
  3329. static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
  3330. .func = bpf_xdp_adjust_meta,
  3331. .gpl_only = false,
  3332. .ret_type = RET_INTEGER,
  3333. .arg1_type = ARG_PTR_TO_CTX,
  3334. .arg2_type = ARG_ANYTHING,
  3335. };
  3336. static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
  3337. struct bpf_map *map, struct xdp_buff *xdp)
  3338. {
  3339. switch (map->map_type) {
  3340. case BPF_MAP_TYPE_DEVMAP:
  3341. case BPF_MAP_TYPE_DEVMAP_HASH:
  3342. return dev_map_enqueue(fwd, xdp, dev_rx);
  3343. case BPF_MAP_TYPE_CPUMAP:
  3344. return cpu_map_enqueue(fwd, xdp, dev_rx);
  3345. case BPF_MAP_TYPE_XSKMAP:
  3346. return __xsk_map_redirect(fwd, xdp);
  3347. default:
  3348. return -EBADRQC;
  3349. }
  3350. return 0;
  3351. }
  3352. void xdp_do_flush(void)
  3353. {
  3354. __dev_flush();
  3355. __cpu_map_flush();
  3356. __xsk_map_flush();
  3357. }
  3358. EXPORT_SYMBOL_GPL(xdp_do_flush);
  3359. static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
  3360. {
  3361. switch (map->map_type) {
  3362. case BPF_MAP_TYPE_DEVMAP:
  3363. return __dev_map_lookup_elem(map, index);
  3364. case BPF_MAP_TYPE_DEVMAP_HASH:
  3365. return __dev_map_hash_lookup_elem(map, index);
  3366. case BPF_MAP_TYPE_CPUMAP:
  3367. return __cpu_map_lookup_elem(map, index);
  3368. case BPF_MAP_TYPE_XSKMAP:
  3369. return __xsk_map_lookup_elem(map, index);
  3370. default:
  3371. return NULL;
  3372. }
  3373. }
  3374. void bpf_clear_redirect_map(struct bpf_map *map)
  3375. {
  3376. struct bpf_redirect_info *ri;
  3377. int cpu;
  3378. for_each_possible_cpu(cpu) {
  3379. ri = per_cpu_ptr(&bpf_redirect_info, cpu);
  3380. /* Avoid polluting remote cacheline due to writes if
  3381. * not needed. Once we pass this test, we need the
  3382. * cmpxchg() to make sure it hasn't been changed in
  3383. * the meantime by remote CPU.
  3384. */
  3385. if (unlikely(READ_ONCE(ri->map) == map))
  3386. cmpxchg(&ri->map, map, NULL);
  3387. }
  3388. }
  3389. int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
  3390. struct bpf_prog *xdp_prog)
  3391. {
  3392. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  3393. struct bpf_map *map = READ_ONCE(ri->map);
  3394. u32 index = ri->tgt_index;
  3395. void *fwd = ri->tgt_value;
  3396. int err;
  3397. ri->tgt_index = 0;
  3398. ri->tgt_value = NULL;
  3399. WRITE_ONCE(ri->map, NULL);
  3400. if (unlikely(!map)) {
  3401. fwd = dev_get_by_index_rcu(dev_net(dev), index);
  3402. if (unlikely(!fwd)) {
  3403. err = -EINVAL;
  3404. goto err;
  3405. }
  3406. err = dev_xdp_enqueue(fwd, xdp, dev);
  3407. } else {
  3408. err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
  3409. }
  3410. if (unlikely(err))
  3411. goto err;
  3412. _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
  3413. return 0;
  3414. err:
  3415. _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
  3416. return err;
  3417. }
  3418. EXPORT_SYMBOL_GPL(xdp_do_redirect);
  3419. static int xdp_do_generic_redirect_map(struct net_device *dev,
  3420. struct sk_buff *skb,
  3421. struct xdp_buff *xdp,
  3422. struct bpf_prog *xdp_prog,
  3423. struct bpf_map *map)
  3424. {
  3425. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  3426. u32 index = ri->tgt_index;
  3427. void *fwd = ri->tgt_value;
  3428. int err = 0;
  3429. ri->tgt_index = 0;
  3430. ri->tgt_value = NULL;
  3431. WRITE_ONCE(ri->map, NULL);
  3432. if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
  3433. map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
  3434. struct bpf_dtab_netdev *dst = fwd;
  3435. err = dev_map_generic_redirect(dst, skb, xdp_prog);
  3436. if (unlikely(err))
  3437. goto err;
  3438. } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
  3439. struct xdp_sock *xs = fwd;
  3440. err = xsk_generic_rcv(xs, xdp);
  3441. if (err)
  3442. goto err;
  3443. consume_skb(skb);
  3444. } else {
  3445. /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
  3446. err = -EBADRQC;
  3447. goto err;
  3448. }
  3449. _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
  3450. return 0;
  3451. err:
  3452. _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
  3453. return err;
  3454. }
  3455. int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
  3456. struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
  3457. {
  3458. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  3459. struct bpf_map *map = READ_ONCE(ri->map);
  3460. u32 index = ri->tgt_index;
  3461. struct net_device *fwd;
  3462. int err = 0;
  3463. if (map)
  3464. return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
  3465. map);
  3466. ri->tgt_index = 0;
  3467. fwd = dev_get_by_index_rcu(dev_net(dev), index);
  3468. if (unlikely(!fwd)) {
  3469. err = -EINVAL;
  3470. goto err;
  3471. }
  3472. err = xdp_ok_fwd_dev(fwd, skb->len);
  3473. if (unlikely(err))
  3474. goto err;
  3475. skb->dev = fwd;
  3476. _trace_xdp_redirect(dev, xdp_prog, index);
  3477. generic_xdp_tx(skb, xdp_prog);
  3478. return 0;
  3479. err:
  3480. _trace_xdp_redirect_err(dev, xdp_prog, index, err);
  3481. return err;
  3482. }
  3483. BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
  3484. {
  3485. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  3486. if (unlikely(flags))
  3487. return XDP_ABORTED;
  3488. ri->flags = flags;
  3489. ri->tgt_index = ifindex;
  3490. ri->tgt_value = NULL;
  3491. WRITE_ONCE(ri->map, NULL);
  3492. return XDP_REDIRECT;
  3493. }
  3494. static const struct bpf_func_proto bpf_xdp_redirect_proto = {
  3495. .func = bpf_xdp_redirect,
  3496. .gpl_only = false,
  3497. .ret_type = RET_INTEGER,
  3498. .arg1_type = ARG_ANYTHING,
  3499. .arg2_type = ARG_ANYTHING,
  3500. };
  3501. BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
  3502. u64, flags)
  3503. {
  3504. struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
  3505. /* Lower bits of the flags are used as return code on lookup failure */
  3506. if (unlikely(flags > XDP_TX))
  3507. return XDP_ABORTED;
  3508. ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
  3509. if (unlikely(!ri->tgt_value)) {
  3510. /* If the lookup fails we want to clear out the state in the
  3511. * redirect_info struct completely, so that if an eBPF program
  3512. * performs multiple lookups, the last one always takes
  3513. * precedence.
  3514. */
  3515. WRITE_ONCE(ri->map, NULL);
  3516. return flags;
  3517. }
  3518. ri->flags = flags;
  3519. ri->tgt_index = ifindex;
  3520. WRITE_ONCE(ri->map, map);
  3521. return XDP_REDIRECT;
  3522. }
  3523. static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
  3524. .func = bpf_xdp_redirect_map,
  3525. .gpl_only = false,
  3526. .ret_type = RET_INTEGER,
  3527. .arg1_type = ARG_CONST_MAP_PTR,
  3528. .arg2_type = ARG_ANYTHING,
  3529. .arg3_type = ARG_ANYTHING,
  3530. };
  3531. static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
  3532. unsigned long off, unsigned long len)
  3533. {
  3534. void *ptr = skb_header_pointer(skb, off, len, dst_buff);
  3535. if (unlikely(!ptr))
  3536. return len;
  3537. if (ptr != dst_buff)
  3538. memcpy(dst_buff, ptr, len);
  3539. return 0;
  3540. }
  3541. BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
  3542. u64, flags, void *, meta, u64, meta_size)
  3543. {
  3544. u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
  3545. if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
  3546. return -EINVAL;
  3547. if (unlikely(!skb || skb_size > skb->len))
  3548. return -EFAULT;
  3549. return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
  3550. bpf_skb_copy);
  3551. }
  3552. static const struct bpf_func_proto bpf_skb_event_output_proto = {
  3553. .func = bpf_skb_event_output,
  3554. .gpl_only = true,
  3555. .ret_type = RET_INTEGER,
  3556. .arg1_type = ARG_PTR_TO_CTX,
  3557. .arg2_type = ARG_CONST_MAP_PTR,
  3558. .arg3_type = ARG_ANYTHING,
  3559. .arg4_type = ARG_PTR_TO_MEM,
  3560. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  3561. };
  3562. BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
  3563. const struct bpf_func_proto bpf_skb_output_proto = {
  3564. .func = bpf_skb_event_output,
  3565. .gpl_only = true,
  3566. .ret_type = RET_INTEGER,
  3567. .arg1_type = ARG_PTR_TO_BTF_ID,
  3568. .arg1_btf_id = &bpf_skb_output_btf_ids[0],
  3569. .arg2_type = ARG_CONST_MAP_PTR,
  3570. .arg3_type = ARG_ANYTHING,
  3571. .arg4_type = ARG_PTR_TO_MEM,
  3572. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  3573. };
  3574. static unsigned short bpf_tunnel_key_af(u64 flags)
  3575. {
  3576. return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
  3577. }
  3578. BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
  3579. u32, size, u64, flags)
  3580. {
  3581. const struct ip_tunnel_info *info = skb_tunnel_info(skb);
  3582. u8 compat[sizeof(struct bpf_tunnel_key)];
  3583. void *to_orig = to;
  3584. int err;
  3585. if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
  3586. err = -EINVAL;
  3587. goto err_clear;
  3588. }
  3589. if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
  3590. err = -EPROTO;
  3591. goto err_clear;
  3592. }
  3593. if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
  3594. err = -EINVAL;
  3595. switch (size) {
  3596. case offsetof(struct bpf_tunnel_key, tunnel_label):
  3597. case offsetof(struct bpf_tunnel_key, tunnel_ext):
  3598. goto set_compat;
  3599. case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
  3600. /* Fixup deprecated structure layouts here, so we have
  3601. * a common path later on.
  3602. */
  3603. if (ip_tunnel_info_af(info) != AF_INET)
  3604. goto err_clear;
  3605. set_compat:
  3606. to = (struct bpf_tunnel_key *)compat;
  3607. break;
  3608. default:
  3609. goto err_clear;
  3610. }
  3611. }
  3612. to->tunnel_id = be64_to_cpu(info->key.tun_id);
  3613. to->tunnel_tos = info->key.tos;
  3614. to->tunnel_ttl = info->key.ttl;
  3615. to->tunnel_ext = 0;
  3616. if (flags & BPF_F_TUNINFO_IPV6) {
  3617. memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
  3618. sizeof(to->remote_ipv6));
  3619. to->tunnel_label = be32_to_cpu(info->key.label);
  3620. } else {
  3621. to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
  3622. memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
  3623. to->tunnel_label = 0;
  3624. }
  3625. if (unlikely(size != sizeof(struct bpf_tunnel_key)))
  3626. memcpy(to_orig, to, size);
  3627. return 0;
  3628. err_clear:
  3629. memset(to_orig, 0, size);
  3630. return err;
  3631. }
  3632. static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
  3633. .func = bpf_skb_get_tunnel_key,
  3634. .gpl_only = false,
  3635. .ret_type = RET_INTEGER,
  3636. .arg1_type = ARG_PTR_TO_CTX,
  3637. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  3638. .arg3_type = ARG_CONST_SIZE,
  3639. .arg4_type = ARG_ANYTHING,
  3640. };
  3641. BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
  3642. {
  3643. const struct ip_tunnel_info *info = skb_tunnel_info(skb);
  3644. int err;
  3645. if (unlikely(!info ||
  3646. !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
  3647. err = -ENOENT;
  3648. goto err_clear;
  3649. }
  3650. if (unlikely(size < info->options_len)) {
  3651. err = -ENOMEM;
  3652. goto err_clear;
  3653. }
  3654. ip_tunnel_info_opts_get(to, info);
  3655. if (size > info->options_len)
  3656. memset(to + info->options_len, 0, size - info->options_len);
  3657. return info->options_len;
  3658. err_clear:
  3659. memset(to, 0, size);
  3660. return err;
  3661. }
  3662. static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
  3663. .func = bpf_skb_get_tunnel_opt,
  3664. .gpl_only = false,
  3665. .ret_type = RET_INTEGER,
  3666. .arg1_type = ARG_PTR_TO_CTX,
  3667. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  3668. .arg3_type = ARG_CONST_SIZE,
  3669. };
  3670. static struct metadata_dst __percpu *md_dst;
  3671. BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
  3672. const struct bpf_tunnel_key *, from, u32, size, u64, flags)
  3673. {
  3674. struct metadata_dst *md = this_cpu_ptr(md_dst);
  3675. u8 compat[sizeof(struct bpf_tunnel_key)];
  3676. struct ip_tunnel_info *info;
  3677. if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
  3678. BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
  3679. return -EINVAL;
  3680. if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
  3681. switch (size) {
  3682. case offsetof(struct bpf_tunnel_key, tunnel_label):
  3683. case offsetof(struct bpf_tunnel_key, tunnel_ext):
  3684. case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
  3685. /* Fixup deprecated structure layouts here, so we have
  3686. * a common path later on.
  3687. */
  3688. memcpy(compat, from, size);
  3689. memset(compat + size, 0, sizeof(compat) - size);
  3690. from = (const struct bpf_tunnel_key *) compat;
  3691. break;
  3692. default:
  3693. return -EINVAL;
  3694. }
  3695. }
  3696. if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
  3697. from->tunnel_ext))
  3698. return -EINVAL;
  3699. skb_dst_drop(skb);
  3700. dst_hold((struct dst_entry *) md);
  3701. skb_dst_set(skb, (struct dst_entry *) md);
  3702. info = &md->u.tun_info;
  3703. memset(info, 0, sizeof(*info));
  3704. info->mode = IP_TUNNEL_INFO_TX;
  3705. info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
  3706. if (flags & BPF_F_DONT_FRAGMENT)
  3707. info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
  3708. if (flags & BPF_F_ZERO_CSUM_TX)
  3709. info->key.tun_flags &= ~TUNNEL_CSUM;
  3710. if (flags & BPF_F_SEQ_NUMBER)
  3711. info->key.tun_flags |= TUNNEL_SEQ;
  3712. info->key.tun_id = cpu_to_be64(from->tunnel_id);
  3713. info->key.tos = from->tunnel_tos;
  3714. info->key.ttl = from->tunnel_ttl;
  3715. if (flags & BPF_F_TUNINFO_IPV6) {
  3716. info->mode |= IP_TUNNEL_INFO_IPV6;
  3717. memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
  3718. sizeof(from->remote_ipv6));
  3719. info->key.label = cpu_to_be32(from->tunnel_label) &
  3720. IPV6_FLOWLABEL_MASK;
  3721. } else {
  3722. info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
  3723. }
  3724. return 0;
  3725. }
  3726. static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
  3727. .func = bpf_skb_set_tunnel_key,
  3728. .gpl_only = false,
  3729. .ret_type = RET_INTEGER,
  3730. .arg1_type = ARG_PTR_TO_CTX,
  3731. .arg2_type = ARG_PTR_TO_MEM,
  3732. .arg3_type = ARG_CONST_SIZE,
  3733. .arg4_type = ARG_ANYTHING,
  3734. };
  3735. BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
  3736. const u8 *, from, u32, size)
  3737. {
  3738. struct ip_tunnel_info *info = skb_tunnel_info(skb);
  3739. const struct metadata_dst *md = this_cpu_ptr(md_dst);
  3740. if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
  3741. return -EINVAL;
  3742. if (unlikely(size > IP_TUNNEL_OPTS_MAX))
  3743. return -ENOMEM;
  3744. ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
  3745. return 0;
  3746. }
  3747. static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
  3748. .func = bpf_skb_set_tunnel_opt,
  3749. .gpl_only = false,
  3750. .ret_type = RET_INTEGER,
  3751. .arg1_type = ARG_PTR_TO_CTX,
  3752. .arg2_type = ARG_PTR_TO_MEM,
  3753. .arg3_type = ARG_CONST_SIZE,
  3754. };
  3755. static const struct bpf_func_proto *
  3756. bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
  3757. {
  3758. if (!md_dst) {
  3759. struct metadata_dst __percpu *tmp;
  3760. tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
  3761. METADATA_IP_TUNNEL,
  3762. GFP_KERNEL);
  3763. if (!tmp)
  3764. return NULL;
  3765. if (cmpxchg(&md_dst, NULL, tmp))
  3766. metadata_dst_free_percpu(tmp);
  3767. }
  3768. switch (which) {
  3769. case BPF_FUNC_skb_set_tunnel_key:
  3770. return &bpf_skb_set_tunnel_key_proto;
  3771. case BPF_FUNC_skb_set_tunnel_opt:
  3772. return &bpf_skb_set_tunnel_opt_proto;
  3773. default:
  3774. return NULL;
  3775. }
  3776. }
  3777. BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
  3778. u32, idx)
  3779. {
  3780. struct bpf_array *array = container_of(map, struct bpf_array, map);
  3781. struct cgroup *cgrp;
  3782. struct sock *sk;
  3783. sk = skb_to_full_sk(skb);
  3784. if (!sk || !sk_fullsock(sk))
  3785. return -ENOENT;
  3786. if (unlikely(idx >= array->map.max_entries))
  3787. return -E2BIG;
  3788. cgrp = READ_ONCE(array->ptrs[idx]);
  3789. if (unlikely(!cgrp))
  3790. return -EAGAIN;
  3791. return sk_under_cgroup_hierarchy(sk, cgrp);
  3792. }
  3793. static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
  3794. .func = bpf_skb_under_cgroup,
  3795. .gpl_only = false,
  3796. .ret_type = RET_INTEGER,
  3797. .arg1_type = ARG_PTR_TO_CTX,
  3798. .arg2_type = ARG_CONST_MAP_PTR,
  3799. .arg3_type = ARG_ANYTHING,
  3800. };
  3801. #ifdef CONFIG_SOCK_CGROUP_DATA
  3802. static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
  3803. {
  3804. struct cgroup *cgrp;
  3805. sk = sk_to_full_sk(sk);
  3806. if (!sk || !sk_fullsock(sk))
  3807. return 0;
  3808. cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  3809. return cgroup_id(cgrp);
  3810. }
  3811. BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
  3812. {
  3813. return __bpf_sk_cgroup_id(skb->sk);
  3814. }
  3815. static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
  3816. .func = bpf_skb_cgroup_id,
  3817. .gpl_only = false,
  3818. .ret_type = RET_INTEGER,
  3819. .arg1_type = ARG_PTR_TO_CTX,
  3820. };
  3821. static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
  3822. int ancestor_level)
  3823. {
  3824. struct cgroup *ancestor;
  3825. struct cgroup *cgrp;
  3826. sk = sk_to_full_sk(sk);
  3827. if (!sk || !sk_fullsock(sk))
  3828. return 0;
  3829. cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  3830. ancestor = cgroup_ancestor(cgrp, ancestor_level);
  3831. if (!ancestor)
  3832. return 0;
  3833. return cgroup_id(ancestor);
  3834. }
  3835. BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
  3836. ancestor_level)
  3837. {
  3838. return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level);
  3839. }
  3840. static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
  3841. .func = bpf_skb_ancestor_cgroup_id,
  3842. .gpl_only = false,
  3843. .ret_type = RET_INTEGER,
  3844. .arg1_type = ARG_PTR_TO_CTX,
  3845. .arg2_type = ARG_ANYTHING,
  3846. };
  3847. BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk)
  3848. {
  3849. return __bpf_sk_cgroup_id(sk);
  3850. }
  3851. static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
  3852. .func = bpf_sk_cgroup_id,
  3853. .gpl_only = false,
  3854. .ret_type = RET_INTEGER,
  3855. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  3856. };
  3857. BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
  3858. {
  3859. return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
  3860. }
  3861. static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
  3862. .func = bpf_sk_ancestor_cgroup_id,
  3863. .gpl_only = false,
  3864. .ret_type = RET_INTEGER,
  3865. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  3866. .arg2_type = ARG_ANYTHING,
  3867. };
  3868. #endif
  3869. static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
  3870. unsigned long off, unsigned long len)
  3871. {
  3872. memcpy(dst_buff, src_buff + off, len);
  3873. return 0;
  3874. }
  3875. BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
  3876. u64, flags, void *, meta, u64, meta_size)
  3877. {
  3878. u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
  3879. if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
  3880. return -EINVAL;
  3881. if (unlikely(!xdp ||
  3882. xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
  3883. return -EFAULT;
  3884. return bpf_event_output(map, flags, meta, meta_size, xdp->data,
  3885. xdp_size, bpf_xdp_copy);
  3886. }
  3887. static const struct bpf_func_proto bpf_xdp_event_output_proto = {
  3888. .func = bpf_xdp_event_output,
  3889. .gpl_only = true,
  3890. .ret_type = RET_INTEGER,
  3891. .arg1_type = ARG_PTR_TO_CTX,
  3892. .arg2_type = ARG_CONST_MAP_PTR,
  3893. .arg3_type = ARG_ANYTHING,
  3894. .arg4_type = ARG_PTR_TO_MEM,
  3895. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  3896. };
  3897. BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
  3898. const struct bpf_func_proto bpf_xdp_output_proto = {
  3899. .func = bpf_xdp_event_output,
  3900. .gpl_only = true,
  3901. .ret_type = RET_INTEGER,
  3902. .arg1_type = ARG_PTR_TO_BTF_ID,
  3903. .arg1_btf_id = &bpf_xdp_output_btf_ids[0],
  3904. .arg2_type = ARG_CONST_MAP_PTR,
  3905. .arg3_type = ARG_ANYTHING,
  3906. .arg4_type = ARG_PTR_TO_MEM,
  3907. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  3908. };
  3909. BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
  3910. {
  3911. return skb->sk ? __sock_gen_cookie(skb->sk) : 0;
  3912. }
  3913. static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
  3914. .func = bpf_get_socket_cookie,
  3915. .gpl_only = false,
  3916. .ret_type = RET_INTEGER,
  3917. .arg1_type = ARG_PTR_TO_CTX,
  3918. };
  3919. BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
  3920. {
  3921. return __sock_gen_cookie(ctx->sk);
  3922. }
  3923. static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
  3924. .func = bpf_get_socket_cookie_sock_addr,
  3925. .gpl_only = false,
  3926. .ret_type = RET_INTEGER,
  3927. .arg1_type = ARG_PTR_TO_CTX,
  3928. };
  3929. BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
  3930. {
  3931. return __sock_gen_cookie(ctx);
  3932. }
  3933. static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
  3934. .func = bpf_get_socket_cookie_sock,
  3935. .gpl_only = false,
  3936. .ret_type = RET_INTEGER,
  3937. .arg1_type = ARG_PTR_TO_CTX,
  3938. };
  3939. BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
  3940. {
  3941. return __sock_gen_cookie(ctx->sk);
  3942. }
  3943. static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
  3944. .func = bpf_get_socket_cookie_sock_ops,
  3945. .gpl_only = false,
  3946. .ret_type = RET_INTEGER,
  3947. .arg1_type = ARG_PTR_TO_CTX,
  3948. };
  3949. static u64 __bpf_get_netns_cookie(struct sock *sk)
  3950. {
  3951. #ifdef CONFIG_NET_NS
  3952. return __net_gen_cookie(sk ? sk->sk_net.net : &init_net);
  3953. #else
  3954. return 0;
  3955. #endif
  3956. }
  3957. BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
  3958. {
  3959. return __bpf_get_netns_cookie(ctx);
  3960. }
  3961. static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = {
  3962. .func = bpf_get_netns_cookie_sock,
  3963. .gpl_only = false,
  3964. .ret_type = RET_INTEGER,
  3965. .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
  3966. };
  3967. BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
  3968. {
  3969. return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
  3970. }
  3971. static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = {
  3972. .func = bpf_get_netns_cookie_sock_addr,
  3973. .gpl_only = false,
  3974. .ret_type = RET_INTEGER,
  3975. .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
  3976. };
  3977. BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
  3978. {
  3979. struct sock *sk = sk_to_full_sk(skb->sk);
  3980. kuid_t kuid;
  3981. if (!sk || !sk_fullsock(sk))
  3982. return overflowuid;
  3983. kuid = sock_net_uid(sock_net(sk), sk);
  3984. return from_kuid_munged(sock_net(sk)->user_ns, kuid);
  3985. }
  3986. static const struct bpf_func_proto bpf_get_socket_uid_proto = {
  3987. .func = bpf_get_socket_uid,
  3988. .gpl_only = false,
  3989. .ret_type = RET_INTEGER,
  3990. .arg1_type = ARG_PTR_TO_CTX,
  3991. };
  3992. static int _bpf_setsockopt(struct sock *sk, int level, int optname,
  3993. char *optval, int optlen)
  3994. {
  3995. char devname[IFNAMSIZ];
  3996. int val, valbool;
  3997. struct net *net;
  3998. int ifindex;
  3999. int ret = 0;
  4000. if (!sk_fullsock(sk))
  4001. return -EINVAL;
  4002. sock_owned_by_me(sk);
  4003. if (level == SOL_SOCKET) {
  4004. if (optlen != sizeof(int) && optname != SO_BINDTODEVICE)
  4005. return -EINVAL;
  4006. val = *((int *)optval);
  4007. valbool = val ? 1 : 0;
  4008. /* Only some socketops are supported */
  4009. switch (optname) {
  4010. case SO_RCVBUF:
  4011. val = min_t(u32, val, sysctl_rmem_max);
  4012. val = min_t(int, val, INT_MAX / 2);
  4013. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  4014. WRITE_ONCE(sk->sk_rcvbuf,
  4015. max_t(int, val * 2, SOCK_MIN_RCVBUF));
  4016. break;
  4017. case SO_SNDBUF:
  4018. val = min_t(u32, val, sysctl_wmem_max);
  4019. val = min_t(int, val, INT_MAX / 2);
  4020. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  4021. WRITE_ONCE(sk->sk_sndbuf,
  4022. max_t(int, val * 2, SOCK_MIN_SNDBUF));
  4023. break;
  4024. case SO_MAX_PACING_RATE: /* 32bit version */
  4025. if (val != ~0U)
  4026. cmpxchg(&sk->sk_pacing_status,
  4027. SK_PACING_NONE,
  4028. SK_PACING_NEEDED);
  4029. sk->sk_max_pacing_rate = (val == ~0U) ?
  4030. ~0UL : (unsigned int)val;
  4031. sk->sk_pacing_rate = min(sk->sk_pacing_rate,
  4032. sk->sk_max_pacing_rate);
  4033. break;
  4034. case SO_PRIORITY:
  4035. sk->sk_priority = val;
  4036. break;
  4037. case SO_RCVLOWAT:
  4038. if (val < 0)
  4039. val = INT_MAX;
  4040. WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
  4041. break;
  4042. case SO_MARK:
  4043. if (sk->sk_mark != val) {
  4044. sk->sk_mark = val;
  4045. sk_dst_reset(sk);
  4046. }
  4047. break;
  4048. case SO_BINDTODEVICE:
  4049. optlen = min_t(long, optlen, IFNAMSIZ - 1);
  4050. strncpy(devname, optval, optlen);
  4051. devname[optlen] = 0;
  4052. ifindex = 0;
  4053. if (devname[0] != '\0') {
  4054. struct net_device *dev;
  4055. ret = -ENODEV;
  4056. net = sock_net(sk);
  4057. dev = dev_get_by_name(net, devname);
  4058. if (!dev)
  4059. break;
  4060. ifindex = dev->ifindex;
  4061. dev_put(dev);
  4062. }
  4063. ret = sock_bindtoindex(sk, ifindex, false);
  4064. break;
  4065. case SO_KEEPALIVE:
  4066. if (sk->sk_prot->keepalive)
  4067. sk->sk_prot->keepalive(sk, valbool);
  4068. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  4069. break;
  4070. default:
  4071. ret = -EINVAL;
  4072. }
  4073. #ifdef CONFIG_INET
  4074. } else if (level == SOL_IP) {
  4075. if (optlen != sizeof(int) || sk->sk_family != AF_INET)
  4076. return -EINVAL;
  4077. val = *((int *)optval);
  4078. /* Only some options are supported */
  4079. switch (optname) {
  4080. case IP_TOS:
  4081. if (val < -1 || val > 0xff) {
  4082. ret = -EINVAL;
  4083. } else {
  4084. struct inet_sock *inet = inet_sk(sk);
  4085. if (val == -1)
  4086. val = 0;
  4087. inet->tos = val;
  4088. }
  4089. break;
  4090. default:
  4091. ret = -EINVAL;
  4092. }
  4093. #if IS_ENABLED(CONFIG_IPV6)
  4094. } else if (level == SOL_IPV6) {
  4095. if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
  4096. return -EINVAL;
  4097. val = *((int *)optval);
  4098. /* Only some options are supported */
  4099. switch (optname) {
  4100. case IPV6_TCLASS:
  4101. if (val < -1 || val > 0xff) {
  4102. ret = -EINVAL;
  4103. } else {
  4104. struct ipv6_pinfo *np = inet6_sk(sk);
  4105. if (val == -1)
  4106. val = 0;
  4107. np->tclass = val;
  4108. }
  4109. break;
  4110. default:
  4111. ret = -EINVAL;
  4112. }
  4113. #endif
  4114. } else if (level == SOL_TCP &&
  4115. sk->sk_prot->setsockopt == tcp_setsockopt) {
  4116. if (optname == TCP_CONGESTION) {
  4117. char name[TCP_CA_NAME_MAX];
  4118. strncpy(name, optval, min_t(long, optlen,
  4119. TCP_CA_NAME_MAX-1));
  4120. name[TCP_CA_NAME_MAX-1] = 0;
  4121. ret = tcp_set_congestion_control(sk, name, false, true);
  4122. } else {
  4123. struct inet_connection_sock *icsk = inet_csk(sk);
  4124. struct tcp_sock *tp = tcp_sk(sk);
  4125. unsigned long timeout;
  4126. if (optlen != sizeof(int))
  4127. return -EINVAL;
  4128. val = *((int *)optval);
  4129. /* Only some options are supported */
  4130. switch (optname) {
  4131. case TCP_BPF_IW:
  4132. if (val <= 0 || tp->data_segs_out > tp->syn_data)
  4133. ret = -EINVAL;
  4134. else
  4135. tp->snd_cwnd = val;
  4136. break;
  4137. case TCP_BPF_SNDCWND_CLAMP:
  4138. if (val <= 0) {
  4139. ret = -EINVAL;
  4140. } else {
  4141. tp->snd_cwnd_clamp = val;
  4142. tp->snd_ssthresh = val;
  4143. }
  4144. break;
  4145. case TCP_BPF_DELACK_MAX:
  4146. timeout = usecs_to_jiffies(val);
  4147. if (timeout > TCP_DELACK_MAX ||
  4148. timeout < TCP_TIMEOUT_MIN)
  4149. return -EINVAL;
  4150. inet_csk(sk)->icsk_delack_max = timeout;
  4151. break;
  4152. case TCP_BPF_RTO_MIN:
  4153. timeout = usecs_to_jiffies(val);
  4154. if (timeout > TCP_RTO_MIN ||
  4155. timeout < TCP_TIMEOUT_MIN)
  4156. return -EINVAL;
  4157. inet_csk(sk)->icsk_rto_min = timeout;
  4158. break;
  4159. case TCP_SAVE_SYN:
  4160. if (val < 0 || val > 1)
  4161. ret = -EINVAL;
  4162. else
  4163. tp->save_syn = val;
  4164. break;
  4165. case TCP_KEEPIDLE:
  4166. ret = tcp_sock_set_keepidle_locked(sk, val);
  4167. break;
  4168. case TCP_KEEPINTVL:
  4169. if (val < 1 || val > MAX_TCP_KEEPINTVL)
  4170. ret = -EINVAL;
  4171. else
  4172. tp->keepalive_intvl = val * HZ;
  4173. break;
  4174. case TCP_KEEPCNT:
  4175. if (val < 1 || val > MAX_TCP_KEEPCNT)
  4176. ret = -EINVAL;
  4177. else
  4178. tp->keepalive_probes = val;
  4179. break;
  4180. case TCP_SYNCNT:
  4181. if (val < 1 || val > MAX_TCP_SYNCNT)
  4182. ret = -EINVAL;
  4183. else
  4184. icsk->icsk_syn_retries = val;
  4185. break;
  4186. case TCP_USER_TIMEOUT:
  4187. if (val < 0)
  4188. ret = -EINVAL;
  4189. else
  4190. icsk->icsk_user_timeout = val;
  4191. break;
  4192. case TCP_NOTSENT_LOWAT:
  4193. tp->notsent_lowat = val;
  4194. sk->sk_write_space(sk);
  4195. break;
  4196. default:
  4197. ret = -EINVAL;
  4198. }
  4199. }
  4200. #endif
  4201. } else {
  4202. ret = -EINVAL;
  4203. }
  4204. return ret;
  4205. }
  4206. static int _bpf_getsockopt(struct sock *sk, int level, int optname,
  4207. char *optval, int optlen)
  4208. {
  4209. if (!sk_fullsock(sk))
  4210. goto err_clear;
  4211. sock_owned_by_me(sk);
  4212. #ifdef CONFIG_INET
  4213. if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
  4214. struct inet_connection_sock *icsk;
  4215. struct tcp_sock *tp;
  4216. switch (optname) {
  4217. case TCP_CONGESTION:
  4218. icsk = inet_csk(sk);
  4219. if (!icsk->icsk_ca_ops || optlen <= 1)
  4220. goto err_clear;
  4221. strncpy(optval, icsk->icsk_ca_ops->name, optlen);
  4222. optval[optlen - 1] = 0;
  4223. break;
  4224. case TCP_SAVED_SYN:
  4225. tp = tcp_sk(sk);
  4226. if (optlen <= 0 || !tp->saved_syn ||
  4227. optlen > tcp_saved_syn_len(tp->saved_syn))
  4228. goto err_clear;
  4229. memcpy(optval, tp->saved_syn->data, optlen);
  4230. break;
  4231. default:
  4232. goto err_clear;
  4233. }
  4234. } else if (level == SOL_IP) {
  4235. struct inet_sock *inet = inet_sk(sk);
  4236. if (optlen != sizeof(int) || sk->sk_family != AF_INET)
  4237. goto err_clear;
  4238. /* Only some options are supported */
  4239. switch (optname) {
  4240. case IP_TOS:
  4241. *((int *)optval) = (int)inet->tos;
  4242. break;
  4243. default:
  4244. goto err_clear;
  4245. }
  4246. #if IS_ENABLED(CONFIG_IPV6)
  4247. } else if (level == SOL_IPV6) {
  4248. struct ipv6_pinfo *np = inet6_sk(sk);
  4249. if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
  4250. goto err_clear;
  4251. /* Only some options are supported */
  4252. switch (optname) {
  4253. case IPV6_TCLASS:
  4254. *((int *)optval) = (int)np->tclass;
  4255. break;
  4256. default:
  4257. goto err_clear;
  4258. }
  4259. #endif
  4260. } else {
  4261. goto err_clear;
  4262. }
  4263. return 0;
  4264. #endif
  4265. err_clear:
  4266. memset(optval, 0, optlen);
  4267. return -EINVAL;
  4268. }
  4269. BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
  4270. int, level, int, optname, char *, optval, int, optlen)
  4271. {
  4272. return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
  4273. }
  4274. static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
  4275. .func = bpf_sock_addr_setsockopt,
  4276. .gpl_only = false,
  4277. .ret_type = RET_INTEGER,
  4278. .arg1_type = ARG_PTR_TO_CTX,
  4279. .arg2_type = ARG_ANYTHING,
  4280. .arg3_type = ARG_ANYTHING,
  4281. .arg4_type = ARG_PTR_TO_MEM,
  4282. .arg5_type = ARG_CONST_SIZE,
  4283. };
  4284. BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
  4285. int, level, int, optname, char *, optval, int, optlen)
  4286. {
  4287. return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen);
  4288. }
  4289. static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
  4290. .func = bpf_sock_addr_getsockopt,
  4291. .gpl_only = false,
  4292. .ret_type = RET_INTEGER,
  4293. .arg1_type = ARG_PTR_TO_CTX,
  4294. .arg2_type = ARG_ANYTHING,
  4295. .arg3_type = ARG_ANYTHING,
  4296. .arg4_type = ARG_PTR_TO_UNINIT_MEM,
  4297. .arg5_type = ARG_CONST_SIZE,
  4298. };
  4299. BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
  4300. int, level, int, optname, char *, optval, int, optlen)
  4301. {
  4302. return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
  4303. }
  4304. static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
  4305. .func = bpf_sock_ops_setsockopt,
  4306. .gpl_only = false,
  4307. .ret_type = RET_INTEGER,
  4308. .arg1_type = ARG_PTR_TO_CTX,
  4309. .arg2_type = ARG_ANYTHING,
  4310. .arg3_type = ARG_ANYTHING,
  4311. .arg4_type = ARG_PTR_TO_MEM,
  4312. .arg5_type = ARG_CONST_SIZE,
  4313. };
  4314. static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock,
  4315. int optname, const u8 **start)
  4316. {
  4317. struct sk_buff *syn_skb = bpf_sock->syn_skb;
  4318. const u8 *hdr_start;
  4319. int ret;
  4320. if (syn_skb) {
  4321. /* sk is a request_sock here */
  4322. if (optname == TCP_BPF_SYN) {
  4323. hdr_start = syn_skb->data;
  4324. ret = tcp_hdrlen(syn_skb);
  4325. } else if (optname == TCP_BPF_SYN_IP) {
  4326. hdr_start = skb_network_header(syn_skb);
  4327. ret = skb_network_header_len(syn_skb) +
  4328. tcp_hdrlen(syn_skb);
  4329. } else {
  4330. /* optname == TCP_BPF_SYN_MAC */
  4331. hdr_start = skb_mac_header(syn_skb);
  4332. ret = skb_mac_header_len(syn_skb) +
  4333. skb_network_header_len(syn_skb) +
  4334. tcp_hdrlen(syn_skb);
  4335. }
  4336. } else {
  4337. struct sock *sk = bpf_sock->sk;
  4338. struct saved_syn *saved_syn;
  4339. if (sk->sk_state == TCP_NEW_SYN_RECV)
  4340. /* synack retransmit. bpf_sock->syn_skb will
  4341. * not be available. It has to resort to
  4342. * saved_syn (if it is saved).
  4343. */
  4344. saved_syn = inet_reqsk(sk)->saved_syn;
  4345. else
  4346. saved_syn = tcp_sk(sk)->saved_syn;
  4347. if (!saved_syn)
  4348. return -ENOENT;
  4349. if (optname == TCP_BPF_SYN) {
  4350. hdr_start = saved_syn->data +
  4351. saved_syn->mac_hdrlen +
  4352. saved_syn->network_hdrlen;
  4353. ret = saved_syn->tcp_hdrlen;
  4354. } else if (optname == TCP_BPF_SYN_IP) {
  4355. hdr_start = saved_syn->data +
  4356. saved_syn->mac_hdrlen;
  4357. ret = saved_syn->network_hdrlen +
  4358. saved_syn->tcp_hdrlen;
  4359. } else {
  4360. /* optname == TCP_BPF_SYN_MAC */
  4361. /* TCP_SAVE_SYN may not have saved the mac hdr */
  4362. if (!saved_syn->mac_hdrlen)
  4363. return -ENOENT;
  4364. hdr_start = saved_syn->data;
  4365. ret = saved_syn->mac_hdrlen +
  4366. saved_syn->network_hdrlen +
  4367. saved_syn->tcp_hdrlen;
  4368. }
  4369. }
  4370. *start = hdr_start;
  4371. return ret;
  4372. }
  4373. BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
  4374. int, level, int, optname, char *, optval, int, optlen)
  4375. {
  4376. if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
  4377. optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
  4378. int ret, copy_len = 0;
  4379. const u8 *start;
  4380. ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start);
  4381. if (ret > 0) {
  4382. copy_len = ret;
  4383. if (optlen < copy_len) {
  4384. copy_len = optlen;
  4385. ret = -ENOSPC;
  4386. }
  4387. memcpy(optval, start, copy_len);
  4388. }
  4389. /* Zero out unused buffer at the end */
  4390. memset(optval + copy_len, 0, optlen - copy_len);
  4391. return ret;
  4392. }
  4393. return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen);
  4394. }
  4395. static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = {
  4396. .func = bpf_sock_ops_getsockopt,
  4397. .gpl_only = false,
  4398. .ret_type = RET_INTEGER,
  4399. .arg1_type = ARG_PTR_TO_CTX,
  4400. .arg2_type = ARG_ANYTHING,
  4401. .arg3_type = ARG_ANYTHING,
  4402. .arg4_type = ARG_PTR_TO_UNINIT_MEM,
  4403. .arg5_type = ARG_CONST_SIZE,
  4404. };
  4405. BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
  4406. int, argval)
  4407. {
  4408. struct sock *sk = bpf_sock->sk;
  4409. int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
  4410. if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
  4411. return -EINVAL;
  4412. tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
  4413. return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
  4414. }
  4415. static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
  4416. .func = bpf_sock_ops_cb_flags_set,
  4417. .gpl_only = false,
  4418. .ret_type = RET_INTEGER,
  4419. .arg1_type = ARG_PTR_TO_CTX,
  4420. .arg2_type = ARG_ANYTHING,
  4421. };
  4422. const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
  4423. EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
  4424. BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
  4425. int, addr_len)
  4426. {
  4427. #ifdef CONFIG_INET
  4428. struct sock *sk = ctx->sk;
  4429. u32 flags = BIND_FROM_BPF;
  4430. int err;
  4431. err = -EINVAL;
  4432. if (addr_len < offsetofend(struct sockaddr, sa_family))
  4433. return err;
  4434. if (addr->sa_family == AF_INET) {
  4435. if (addr_len < sizeof(struct sockaddr_in))
  4436. return err;
  4437. if (((struct sockaddr_in *)addr)->sin_port == htons(0))
  4438. flags |= BIND_FORCE_ADDRESS_NO_PORT;
  4439. return __inet_bind(sk, addr, addr_len, flags);
  4440. #if IS_ENABLED(CONFIG_IPV6)
  4441. } else if (addr->sa_family == AF_INET6) {
  4442. if (addr_len < SIN6_LEN_RFC2133)
  4443. return err;
  4444. if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0))
  4445. flags |= BIND_FORCE_ADDRESS_NO_PORT;
  4446. /* ipv6_bpf_stub cannot be NULL, since it's called from
  4447. * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
  4448. */
  4449. return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags);
  4450. #endif /* CONFIG_IPV6 */
  4451. }
  4452. #endif /* CONFIG_INET */
  4453. return -EAFNOSUPPORT;
  4454. }
  4455. static const struct bpf_func_proto bpf_bind_proto = {
  4456. .func = bpf_bind,
  4457. .gpl_only = false,
  4458. .ret_type = RET_INTEGER,
  4459. .arg1_type = ARG_PTR_TO_CTX,
  4460. .arg2_type = ARG_PTR_TO_MEM,
  4461. .arg3_type = ARG_CONST_SIZE,
  4462. };
  4463. #ifdef CONFIG_XFRM
  4464. BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
  4465. struct bpf_xfrm_state *, to, u32, size, u64, flags)
  4466. {
  4467. const struct sec_path *sp = skb_sec_path(skb);
  4468. const struct xfrm_state *x;
  4469. if (!sp || unlikely(index >= sp->len || flags))
  4470. goto err_clear;
  4471. x = sp->xvec[index];
  4472. if (unlikely(size != sizeof(struct bpf_xfrm_state)))
  4473. goto err_clear;
  4474. to->reqid = x->props.reqid;
  4475. to->spi = x->id.spi;
  4476. to->family = x->props.family;
  4477. to->ext = 0;
  4478. if (to->family == AF_INET6) {
  4479. memcpy(to->remote_ipv6, x->props.saddr.a6,
  4480. sizeof(to->remote_ipv6));
  4481. } else {
  4482. to->remote_ipv4 = x->props.saddr.a4;
  4483. memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
  4484. }
  4485. return 0;
  4486. err_clear:
  4487. memset(to, 0, size);
  4488. return -EINVAL;
  4489. }
  4490. static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
  4491. .func = bpf_skb_get_xfrm_state,
  4492. .gpl_only = false,
  4493. .ret_type = RET_INTEGER,
  4494. .arg1_type = ARG_PTR_TO_CTX,
  4495. .arg2_type = ARG_ANYTHING,
  4496. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  4497. .arg4_type = ARG_CONST_SIZE,
  4498. .arg5_type = ARG_ANYTHING,
  4499. };
  4500. #endif
  4501. #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
  4502. static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
  4503. const struct neighbour *neigh,
  4504. const struct net_device *dev)
  4505. {
  4506. memcpy(params->dmac, neigh->ha, ETH_ALEN);
  4507. memcpy(params->smac, dev->dev_addr, ETH_ALEN);
  4508. params->h_vlan_TCI = 0;
  4509. params->h_vlan_proto = 0;
  4510. return 0;
  4511. }
  4512. #endif
  4513. #if IS_ENABLED(CONFIG_INET)
  4514. static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
  4515. u32 flags, bool check_mtu)
  4516. {
  4517. struct fib_nh_common *nhc;
  4518. struct in_device *in_dev;
  4519. struct neighbour *neigh;
  4520. struct net_device *dev;
  4521. struct fib_result res;
  4522. struct flowi4 fl4;
  4523. int err;
  4524. u32 mtu;
  4525. dev = dev_get_by_index_rcu(net, params->ifindex);
  4526. if (unlikely(!dev))
  4527. return -ENODEV;
  4528. /* verify forwarding is enabled on this interface */
  4529. in_dev = __in_dev_get_rcu(dev);
  4530. if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
  4531. return BPF_FIB_LKUP_RET_FWD_DISABLED;
  4532. if (flags & BPF_FIB_LOOKUP_OUTPUT) {
  4533. fl4.flowi4_iif = 1;
  4534. fl4.flowi4_oif = params->ifindex;
  4535. } else {
  4536. fl4.flowi4_iif = params->ifindex;
  4537. fl4.flowi4_oif = 0;
  4538. }
  4539. fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
  4540. fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
  4541. fl4.flowi4_flags = 0;
  4542. fl4.flowi4_proto = params->l4_protocol;
  4543. fl4.daddr = params->ipv4_dst;
  4544. fl4.saddr = params->ipv4_src;
  4545. fl4.fl4_sport = params->sport;
  4546. fl4.fl4_dport = params->dport;
  4547. fl4.flowi4_multipath_hash = 0;
  4548. if (flags & BPF_FIB_LOOKUP_DIRECT) {
  4549. u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
  4550. struct fib_table *tb;
  4551. tb = fib_get_table(net, tbid);
  4552. if (unlikely(!tb))
  4553. return BPF_FIB_LKUP_RET_NOT_FWDED;
  4554. err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
  4555. } else {
  4556. fl4.flowi4_mark = 0;
  4557. fl4.flowi4_secid = 0;
  4558. fl4.flowi4_tun_key.tun_id = 0;
  4559. fl4.flowi4_uid = sock_net_uid(net, NULL);
  4560. err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
  4561. }
  4562. if (err) {
  4563. /* map fib lookup errors to RTN_ type */
  4564. if (err == -EINVAL)
  4565. return BPF_FIB_LKUP_RET_BLACKHOLE;
  4566. if (err == -EHOSTUNREACH)
  4567. return BPF_FIB_LKUP_RET_UNREACHABLE;
  4568. if (err == -EACCES)
  4569. return BPF_FIB_LKUP_RET_PROHIBIT;
  4570. return BPF_FIB_LKUP_RET_NOT_FWDED;
  4571. }
  4572. if (res.type != RTN_UNICAST)
  4573. return BPF_FIB_LKUP_RET_NOT_FWDED;
  4574. if (fib_info_num_path(res.fi) > 1)
  4575. fib_select_path(net, &res, &fl4, NULL);
  4576. if (check_mtu) {
  4577. mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
  4578. if (params->tot_len > mtu)
  4579. return BPF_FIB_LKUP_RET_FRAG_NEEDED;
  4580. }
  4581. nhc = res.nhc;
  4582. /* do not handle lwt encaps right now */
  4583. if (nhc->nhc_lwtstate)
  4584. return BPF_FIB_LKUP_RET_UNSUPP_LWT;
  4585. dev = nhc->nhc_dev;
  4586. params->rt_metric = res.fi->fib_priority;
  4587. params->ifindex = dev->ifindex;
  4588. /* xdp and cls_bpf programs are run in RCU-bh so
  4589. * rcu_read_lock_bh is not needed here
  4590. */
  4591. if (likely(nhc->nhc_gw_family != AF_INET6)) {
  4592. if (nhc->nhc_gw_family)
  4593. params->ipv4_dst = nhc->nhc_gw.ipv4;
  4594. neigh = __ipv4_neigh_lookup_noref(dev,
  4595. (__force u32)params->ipv4_dst);
  4596. } else {
  4597. struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
  4598. params->family = AF_INET6;
  4599. *dst = nhc->nhc_gw.ipv6;
  4600. neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
  4601. }
  4602. if (!neigh)
  4603. return BPF_FIB_LKUP_RET_NO_NEIGH;
  4604. return bpf_fib_set_fwd_params(params, neigh, dev);
  4605. }
  4606. #endif
  4607. #if IS_ENABLED(CONFIG_IPV6)
  4608. static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
  4609. u32 flags, bool check_mtu)
  4610. {
  4611. struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
  4612. struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
  4613. struct fib6_result res = {};
  4614. struct neighbour *neigh;
  4615. struct net_device *dev;
  4616. struct inet6_dev *idev;
  4617. struct flowi6 fl6;
  4618. int strict = 0;
  4619. int oif, err;
  4620. u32 mtu;
  4621. /* link local addresses are never forwarded */
  4622. if (rt6_need_strict(dst) || rt6_need_strict(src))
  4623. return BPF_FIB_LKUP_RET_NOT_FWDED;
  4624. dev = dev_get_by_index_rcu(net, params->ifindex);
  4625. if (unlikely(!dev))
  4626. return -ENODEV;
  4627. idev = __in6_dev_get_safely(dev);
  4628. if (unlikely(!idev || !idev->cnf.forwarding))
  4629. return BPF_FIB_LKUP_RET_FWD_DISABLED;
  4630. if (flags & BPF_FIB_LOOKUP_OUTPUT) {
  4631. fl6.flowi6_iif = 1;
  4632. oif = fl6.flowi6_oif = params->ifindex;
  4633. } else {
  4634. oif = fl6.flowi6_iif = params->ifindex;
  4635. fl6.flowi6_oif = 0;
  4636. strict = RT6_LOOKUP_F_HAS_SADDR;
  4637. }
  4638. fl6.flowlabel = params->flowinfo;
  4639. fl6.flowi6_scope = 0;
  4640. fl6.flowi6_flags = 0;
  4641. fl6.mp_hash = 0;
  4642. fl6.flowi6_proto = params->l4_protocol;
  4643. fl6.daddr = *dst;
  4644. fl6.saddr = *src;
  4645. fl6.fl6_sport = params->sport;
  4646. fl6.fl6_dport = params->dport;
  4647. if (flags & BPF_FIB_LOOKUP_DIRECT) {
  4648. u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
  4649. struct fib6_table *tb;
  4650. tb = ipv6_stub->fib6_get_table(net, tbid);
  4651. if (unlikely(!tb))
  4652. return BPF_FIB_LKUP_RET_NOT_FWDED;
  4653. err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
  4654. strict);
  4655. } else {
  4656. fl6.flowi6_mark = 0;
  4657. fl6.flowi6_secid = 0;
  4658. fl6.flowi6_tun_key.tun_id = 0;
  4659. fl6.flowi6_uid = sock_net_uid(net, NULL);
  4660. err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
  4661. }
  4662. if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
  4663. res.f6i == net->ipv6.fib6_null_entry))
  4664. return BPF_FIB_LKUP_RET_NOT_FWDED;
  4665. switch (res.fib6_type) {
  4666. /* only unicast is forwarded */
  4667. case RTN_UNICAST:
  4668. break;
  4669. case RTN_BLACKHOLE:
  4670. return BPF_FIB_LKUP_RET_BLACKHOLE;
  4671. case RTN_UNREACHABLE:
  4672. return BPF_FIB_LKUP_RET_UNREACHABLE;
  4673. case RTN_PROHIBIT:
  4674. return BPF_FIB_LKUP_RET_PROHIBIT;
  4675. default:
  4676. return BPF_FIB_LKUP_RET_NOT_FWDED;
  4677. }
  4678. ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
  4679. fl6.flowi6_oif != 0, NULL, strict);
  4680. if (check_mtu) {
  4681. mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
  4682. if (params->tot_len > mtu)
  4683. return BPF_FIB_LKUP_RET_FRAG_NEEDED;
  4684. }
  4685. if (res.nh->fib_nh_lws)
  4686. return BPF_FIB_LKUP_RET_UNSUPP_LWT;
  4687. if (res.nh->fib_nh_gw_family)
  4688. *dst = res.nh->fib_nh_gw6;
  4689. dev = res.nh->fib_nh_dev;
  4690. params->rt_metric = res.f6i->fib6_metric;
  4691. params->ifindex = dev->ifindex;
  4692. /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
  4693. * not needed here.
  4694. */
  4695. neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
  4696. if (!neigh)
  4697. return BPF_FIB_LKUP_RET_NO_NEIGH;
  4698. return bpf_fib_set_fwd_params(params, neigh, dev);
  4699. }
  4700. #endif
  4701. BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
  4702. struct bpf_fib_lookup *, params, int, plen, u32, flags)
  4703. {
  4704. if (plen < sizeof(*params))
  4705. return -EINVAL;
  4706. if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
  4707. return -EINVAL;
  4708. switch (params->family) {
  4709. #if IS_ENABLED(CONFIG_INET)
  4710. case AF_INET:
  4711. return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
  4712. flags, true);
  4713. #endif
  4714. #if IS_ENABLED(CONFIG_IPV6)
  4715. case AF_INET6:
  4716. return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
  4717. flags, true);
  4718. #endif
  4719. }
  4720. return -EAFNOSUPPORT;
  4721. }
  4722. static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
  4723. .func = bpf_xdp_fib_lookup,
  4724. .gpl_only = true,
  4725. .ret_type = RET_INTEGER,
  4726. .arg1_type = ARG_PTR_TO_CTX,
  4727. .arg2_type = ARG_PTR_TO_MEM,
  4728. .arg3_type = ARG_CONST_SIZE,
  4729. .arg4_type = ARG_ANYTHING,
  4730. };
  4731. BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
  4732. struct bpf_fib_lookup *, params, int, plen, u32, flags)
  4733. {
  4734. struct net *net = dev_net(skb->dev);
  4735. int rc = -EAFNOSUPPORT;
  4736. bool check_mtu = false;
  4737. if (plen < sizeof(*params))
  4738. return -EINVAL;
  4739. if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
  4740. return -EINVAL;
  4741. if (params->tot_len)
  4742. check_mtu = true;
  4743. switch (params->family) {
  4744. #if IS_ENABLED(CONFIG_INET)
  4745. case AF_INET:
  4746. rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu);
  4747. break;
  4748. #endif
  4749. #if IS_ENABLED(CONFIG_IPV6)
  4750. case AF_INET6:
  4751. rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu);
  4752. break;
  4753. #endif
  4754. }
  4755. if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) {
  4756. struct net_device *dev;
  4757. /* When tot_len isn't provided by user, check skb
  4758. * against MTU of FIB lookup resulting net_device
  4759. */
  4760. dev = dev_get_by_index_rcu(net, params->ifindex);
  4761. if (!is_skb_forwardable(dev, skb))
  4762. rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
  4763. }
  4764. return rc;
  4765. }
  4766. static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
  4767. .func = bpf_skb_fib_lookup,
  4768. .gpl_only = true,
  4769. .ret_type = RET_INTEGER,
  4770. .arg1_type = ARG_PTR_TO_CTX,
  4771. .arg2_type = ARG_PTR_TO_MEM,
  4772. .arg3_type = ARG_CONST_SIZE,
  4773. .arg4_type = ARG_ANYTHING,
  4774. };
  4775. #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
  4776. static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
  4777. {
  4778. int err;
  4779. struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
  4780. if (!seg6_validate_srh(srh, len, false))
  4781. return -EINVAL;
  4782. switch (type) {
  4783. case BPF_LWT_ENCAP_SEG6_INLINE:
  4784. if (skb->protocol != htons(ETH_P_IPV6))
  4785. return -EBADMSG;
  4786. err = seg6_do_srh_inline(skb, srh);
  4787. break;
  4788. case BPF_LWT_ENCAP_SEG6:
  4789. skb_reset_inner_headers(skb);
  4790. skb->encapsulation = 1;
  4791. err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
  4792. break;
  4793. default:
  4794. return -EINVAL;
  4795. }
  4796. bpf_compute_data_pointers(skb);
  4797. if (err)
  4798. return err;
  4799. ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  4800. skb_set_transport_header(skb, sizeof(struct ipv6hdr));
  4801. return seg6_lookup_nexthop(skb, NULL, 0);
  4802. }
  4803. #endif /* CONFIG_IPV6_SEG6_BPF */
  4804. #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
  4805. static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
  4806. bool ingress)
  4807. {
  4808. return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
  4809. }
  4810. #endif
  4811. BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
  4812. u32, len)
  4813. {
  4814. switch (type) {
  4815. #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
  4816. case BPF_LWT_ENCAP_SEG6:
  4817. case BPF_LWT_ENCAP_SEG6_INLINE:
  4818. return bpf_push_seg6_encap(skb, type, hdr, len);
  4819. #endif
  4820. #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
  4821. case BPF_LWT_ENCAP_IP:
  4822. return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
  4823. #endif
  4824. default:
  4825. return -EINVAL;
  4826. }
  4827. }
  4828. BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
  4829. void *, hdr, u32, len)
  4830. {
  4831. switch (type) {
  4832. #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
  4833. case BPF_LWT_ENCAP_IP:
  4834. return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
  4835. #endif
  4836. default:
  4837. return -EINVAL;
  4838. }
  4839. }
  4840. static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
  4841. .func = bpf_lwt_in_push_encap,
  4842. .gpl_only = false,
  4843. .ret_type = RET_INTEGER,
  4844. .arg1_type = ARG_PTR_TO_CTX,
  4845. .arg2_type = ARG_ANYTHING,
  4846. .arg3_type = ARG_PTR_TO_MEM,
  4847. .arg4_type = ARG_CONST_SIZE
  4848. };
  4849. static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
  4850. .func = bpf_lwt_xmit_push_encap,
  4851. .gpl_only = false,
  4852. .ret_type = RET_INTEGER,
  4853. .arg1_type = ARG_PTR_TO_CTX,
  4854. .arg2_type = ARG_ANYTHING,
  4855. .arg3_type = ARG_PTR_TO_MEM,
  4856. .arg4_type = ARG_CONST_SIZE
  4857. };
  4858. #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
  4859. BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
  4860. const void *, from, u32, len)
  4861. {
  4862. struct seg6_bpf_srh_state *srh_state =
  4863. this_cpu_ptr(&seg6_bpf_srh_states);
  4864. struct ipv6_sr_hdr *srh = srh_state->srh;
  4865. void *srh_tlvs, *srh_end, *ptr;
  4866. int srhoff = 0;
  4867. if (srh == NULL)
  4868. return -EINVAL;
  4869. srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
  4870. srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
  4871. ptr = skb->data + offset;
  4872. if (ptr >= srh_tlvs && ptr + len <= srh_end)
  4873. srh_state->valid = false;
  4874. else if (ptr < (void *)&srh->flags ||
  4875. ptr + len > (void *)&srh->segments)
  4876. return -EFAULT;
  4877. if (unlikely(bpf_try_make_writable(skb, offset + len)))
  4878. return -EFAULT;
  4879. if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
  4880. return -EINVAL;
  4881. srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
  4882. memcpy(skb->data + offset, from, len);
  4883. return 0;
  4884. }
  4885. static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
  4886. .func = bpf_lwt_seg6_store_bytes,
  4887. .gpl_only = false,
  4888. .ret_type = RET_INTEGER,
  4889. .arg1_type = ARG_PTR_TO_CTX,
  4890. .arg2_type = ARG_ANYTHING,
  4891. .arg3_type = ARG_PTR_TO_MEM,
  4892. .arg4_type = ARG_CONST_SIZE
  4893. };
  4894. static void bpf_update_srh_state(struct sk_buff *skb)
  4895. {
  4896. struct seg6_bpf_srh_state *srh_state =
  4897. this_cpu_ptr(&seg6_bpf_srh_states);
  4898. int srhoff = 0;
  4899. if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
  4900. srh_state->srh = NULL;
  4901. } else {
  4902. srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
  4903. srh_state->hdrlen = srh_state->srh->hdrlen << 3;
  4904. srh_state->valid = true;
  4905. }
  4906. }
  4907. BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
  4908. u32, action, void *, param, u32, param_len)
  4909. {
  4910. struct seg6_bpf_srh_state *srh_state =
  4911. this_cpu_ptr(&seg6_bpf_srh_states);
  4912. int hdroff = 0;
  4913. int err;
  4914. switch (action) {
  4915. case SEG6_LOCAL_ACTION_END_X:
  4916. if (!seg6_bpf_has_valid_srh(skb))
  4917. return -EBADMSG;
  4918. if (param_len != sizeof(struct in6_addr))
  4919. return -EINVAL;
  4920. return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
  4921. case SEG6_LOCAL_ACTION_END_T:
  4922. if (!seg6_bpf_has_valid_srh(skb))
  4923. return -EBADMSG;
  4924. if (param_len != sizeof(int))
  4925. return -EINVAL;
  4926. return seg6_lookup_nexthop(skb, NULL, *(int *)param);
  4927. case SEG6_LOCAL_ACTION_END_DT6:
  4928. if (!seg6_bpf_has_valid_srh(skb))
  4929. return -EBADMSG;
  4930. if (param_len != sizeof(int))
  4931. return -EINVAL;
  4932. if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
  4933. return -EBADMSG;
  4934. if (!pskb_pull(skb, hdroff))
  4935. return -EBADMSG;
  4936. skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
  4937. skb_reset_network_header(skb);
  4938. skb_reset_transport_header(skb);
  4939. skb->encapsulation = 0;
  4940. bpf_compute_data_pointers(skb);
  4941. bpf_update_srh_state(skb);
  4942. return seg6_lookup_nexthop(skb, NULL, *(int *)param);
  4943. case SEG6_LOCAL_ACTION_END_B6:
  4944. if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
  4945. return -EBADMSG;
  4946. err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
  4947. param, param_len);
  4948. if (!err)
  4949. bpf_update_srh_state(skb);
  4950. return err;
  4951. case SEG6_LOCAL_ACTION_END_B6_ENCAP:
  4952. if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
  4953. return -EBADMSG;
  4954. err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
  4955. param, param_len);
  4956. if (!err)
  4957. bpf_update_srh_state(skb);
  4958. return err;
  4959. default:
  4960. return -EINVAL;
  4961. }
  4962. }
  4963. static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
  4964. .func = bpf_lwt_seg6_action,
  4965. .gpl_only = false,
  4966. .ret_type = RET_INTEGER,
  4967. .arg1_type = ARG_PTR_TO_CTX,
  4968. .arg2_type = ARG_ANYTHING,
  4969. .arg3_type = ARG_PTR_TO_MEM,
  4970. .arg4_type = ARG_CONST_SIZE
  4971. };
  4972. BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
  4973. s32, len)
  4974. {
  4975. struct seg6_bpf_srh_state *srh_state =
  4976. this_cpu_ptr(&seg6_bpf_srh_states);
  4977. struct ipv6_sr_hdr *srh = srh_state->srh;
  4978. void *srh_end, *srh_tlvs, *ptr;
  4979. struct ipv6hdr *hdr;
  4980. int srhoff = 0;
  4981. int ret;
  4982. if (unlikely(srh == NULL))
  4983. return -EINVAL;
  4984. srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
  4985. ((srh->first_segment + 1) << 4));
  4986. srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
  4987. srh_state->hdrlen);
  4988. ptr = skb->data + offset;
  4989. if (unlikely(ptr < srh_tlvs || ptr > srh_end))
  4990. return -EFAULT;
  4991. if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
  4992. return -EFAULT;
  4993. if (len > 0) {
  4994. ret = skb_cow_head(skb, len);
  4995. if (unlikely(ret < 0))
  4996. return ret;
  4997. ret = bpf_skb_net_hdr_push(skb, offset, len);
  4998. } else {
  4999. ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
  5000. }
  5001. bpf_compute_data_pointers(skb);
  5002. if (unlikely(ret < 0))
  5003. return ret;
  5004. hdr = (struct ipv6hdr *)skb->data;
  5005. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  5006. if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
  5007. return -EINVAL;
  5008. srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
  5009. srh_state->hdrlen += len;
  5010. srh_state->valid = false;
  5011. return 0;
  5012. }
  5013. static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
  5014. .func = bpf_lwt_seg6_adjust_srh,
  5015. .gpl_only = false,
  5016. .ret_type = RET_INTEGER,
  5017. .arg1_type = ARG_PTR_TO_CTX,
  5018. .arg2_type = ARG_ANYTHING,
  5019. .arg3_type = ARG_ANYTHING,
  5020. };
  5021. #endif /* CONFIG_IPV6_SEG6_BPF */
  5022. #ifdef CONFIG_INET
  5023. static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
  5024. int dif, int sdif, u8 family, u8 proto)
  5025. {
  5026. bool refcounted = false;
  5027. struct sock *sk = NULL;
  5028. if (family == AF_INET) {
  5029. __be32 src4 = tuple->ipv4.saddr;
  5030. __be32 dst4 = tuple->ipv4.daddr;
  5031. if (proto == IPPROTO_TCP)
  5032. sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
  5033. src4, tuple->ipv4.sport,
  5034. dst4, tuple->ipv4.dport,
  5035. dif, sdif, &refcounted);
  5036. else
  5037. sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
  5038. dst4, tuple->ipv4.dport,
  5039. dif, sdif, &udp_table, NULL);
  5040. #if IS_ENABLED(CONFIG_IPV6)
  5041. } else {
  5042. struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
  5043. struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
  5044. if (proto == IPPROTO_TCP)
  5045. sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
  5046. src6, tuple->ipv6.sport,
  5047. dst6, ntohs(tuple->ipv6.dport),
  5048. dif, sdif, &refcounted);
  5049. else if (likely(ipv6_bpf_stub))
  5050. sk = ipv6_bpf_stub->udp6_lib_lookup(net,
  5051. src6, tuple->ipv6.sport,
  5052. dst6, tuple->ipv6.dport,
  5053. dif, sdif,
  5054. &udp_table, NULL);
  5055. #endif
  5056. }
  5057. if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
  5058. WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
  5059. sk = NULL;
  5060. }
  5061. return sk;
  5062. }
  5063. /* bpf_skc_lookup performs the core lookup for different types of sockets,
  5064. * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
  5065. * Returns the socket as an 'unsigned long' to simplify the casting in the
  5066. * callers to satisfy BPF_CALL declarations.
  5067. */
  5068. static struct sock *
  5069. __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
  5070. struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
  5071. u64 flags)
  5072. {
  5073. struct sock *sk = NULL;
  5074. u8 family = AF_UNSPEC;
  5075. struct net *net;
  5076. int sdif;
  5077. if (len == sizeof(tuple->ipv4))
  5078. family = AF_INET;
  5079. else if (len == sizeof(tuple->ipv6))
  5080. family = AF_INET6;
  5081. else
  5082. return NULL;
  5083. if (unlikely(family == AF_UNSPEC || flags ||
  5084. !((s32)netns_id < 0 || netns_id <= S32_MAX)))
  5085. goto out;
  5086. if (family == AF_INET)
  5087. sdif = inet_sdif(skb);
  5088. else
  5089. sdif = inet6_sdif(skb);
  5090. if ((s32)netns_id < 0) {
  5091. net = caller_net;
  5092. sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
  5093. } else {
  5094. net = get_net_ns_by_id(caller_net, netns_id);
  5095. if (unlikely(!net))
  5096. goto out;
  5097. sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
  5098. put_net(net);
  5099. }
  5100. out:
  5101. return sk;
  5102. }
  5103. static struct sock *
  5104. __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
  5105. struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
  5106. u64 flags)
  5107. {
  5108. struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
  5109. ifindex, proto, netns_id, flags);
  5110. if (sk) {
  5111. sk = sk_to_full_sk(sk);
  5112. if (!sk_fullsock(sk)) {
  5113. sock_gen_put(sk);
  5114. return NULL;
  5115. }
  5116. }
  5117. return sk;
  5118. }
  5119. static struct sock *
  5120. bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
  5121. u8 proto, u64 netns_id, u64 flags)
  5122. {
  5123. struct net *caller_net;
  5124. int ifindex;
  5125. if (skb->dev) {
  5126. caller_net = dev_net(skb->dev);
  5127. ifindex = skb->dev->ifindex;
  5128. } else {
  5129. caller_net = sock_net(skb->sk);
  5130. ifindex = 0;
  5131. }
  5132. return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
  5133. netns_id, flags);
  5134. }
  5135. static struct sock *
  5136. bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
  5137. u8 proto, u64 netns_id, u64 flags)
  5138. {
  5139. struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
  5140. flags);
  5141. if (sk) {
  5142. sk = sk_to_full_sk(sk);
  5143. if (!sk_fullsock(sk)) {
  5144. sock_gen_put(sk);
  5145. return NULL;
  5146. }
  5147. }
  5148. return sk;
  5149. }
  5150. BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
  5151. struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  5152. {
  5153. return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
  5154. netns_id, flags);
  5155. }
  5156. static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
  5157. .func = bpf_skc_lookup_tcp,
  5158. .gpl_only = false,
  5159. .pkt_access = true,
  5160. .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
  5161. .arg1_type = ARG_PTR_TO_CTX,
  5162. .arg2_type = ARG_PTR_TO_MEM,
  5163. .arg3_type = ARG_CONST_SIZE,
  5164. .arg4_type = ARG_ANYTHING,
  5165. .arg5_type = ARG_ANYTHING,
  5166. };
  5167. BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
  5168. struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  5169. {
  5170. return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
  5171. netns_id, flags);
  5172. }
  5173. static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
  5174. .func = bpf_sk_lookup_tcp,
  5175. .gpl_only = false,
  5176. .pkt_access = true,
  5177. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  5178. .arg1_type = ARG_PTR_TO_CTX,
  5179. .arg2_type = ARG_PTR_TO_MEM,
  5180. .arg3_type = ARG_CONST_SIZE,
  5181. .arg4_type = ARG_ANYTHING,
  5182. .arg5_type = ARG_ANYTHING,
  5183. };
  5184. BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
  5185. struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  5186. {
  5187. return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
  5188. netns_id, flags);
  5189. }
  5190. static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
  5191. .func = bpf_sk_lookup_udp,
  5192. .gpl_only = false,
  5193. .pkt_access = true,
  5194. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  5195. .arg1_type = ARG_PTR_TO_CTX,
  5196. .arg2_type = ARG_PTR_TO_MEM,
  5197. .arg3_type = ARG_CONST_SIZE,
  5198. .arg4_type = ARG_ANYTHING,
  5199. .arg5_type = ARG_ANYTHING,
  5200. };
  5201. BPF_CALL_1(bpf_sk_release, struct sock *, sk)
  5202. {
  5203. if (sk && sk_is_refcounted(sk))
  5204. sock_gen_put(sk);
  5205. return 0;
  5206. }
  5207. static const struct bpf_func_proto bpf_sk_release_proto = {
  5208. .func = bpf_sk_release,
  5209. .gpl_only = false,
  5210. .ret_type = RET_INTEGER,
  5211. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  5212. };
  5213. BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
  5214. struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
  5215. {
  5216. struct net *caller_net = dev_net(ctx->rxq->dev);
  5217. int ifindex = ctx->rxq->dev->ifindex;
  5218. return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
  5219. ifindex, IPPROTO_UDP, netns_id,
  5220. flags);
  5221. }
  5222. static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
  5223. .func = bpf_xdp_sk_lookup_udp,
  5224. .gpl_only = false,
  5225. .pkt_access = true,
  5226. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  5227. .arg1_type = ARG_PTR_TO_CTX,
  5228. .arg2_type = ARG_PTR_TO_MEM,
  5229. .arg3_type = ARG_CONST_SIZE,
  5230. .arg4_type = ARG_ANYTHING,
  5231. .arg5_type = ARG_ANYTHING,
  5232. };
  5233. BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
  5234. struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
  5235. {
  5236. struct net *caller_net = dev_net(ctx->rxq->dev);
  5237. int ifindex = ctx->rxq->dev->ifindex;
  5238. return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
  5239. ifindex, IPPROTO_TCP, netns_id,
  5240. flags);
  5241. }
  5242. static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
  5243. .func = bpf_xdp_skc_lookup_tcp,
  5244. .gpl_only = false,
  5245. .pkt_access = true,
  5246. .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
  5247. .arg1_type = ARG_PTR_TO_CTX,
  5248. .arg2_type = ARG_PTR_TO_MEM,
  5249. .arg3_type = ARG_CONST_SIZE,
  5250. .arg4_type = ARG_ANYTHING,
  5251. .arg5_type = ARG_ANYTHING,
  5252. };
  5253. BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
  5254. struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
  5255. {
  5256. struct net *caller_net = dev_net(ctx->rxq->dev);
  5257. int ifindex = ctx->rxq->dev->ifindex;
  5258. return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
  5259. ifindex, IPPROTO_TCP, netns_id,
  5260. flags);
  5261. }
  5262. static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
  5263. .func = bpf_xdp_sk_lookup_tcp,
  5264. .gpl_only = false,
  5265. .pkt_access = true,
  5266. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  5267. .arg1_type = ARG_PTR_TO_CTX,
  5268. .arg2_type = ARG_PTR_TO_MEM,
  5269. .arg3_type = ARG_CONST_SIZE,
  5270. .arg4_type = ARG_ANYTHING,
  5271. .arg5_type = ARG_ANYTHING,
  5272. };
  5273. BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
  5274. struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  5275. {
  5276. return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
  5277. sock_net(ctx->sk), 0,
  5278. IPPROTO_TCP, netns_id, flags);
  5279. }
  5280. static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
  5281. .func = bpf_sock_addr_skc_lookup_tcp,
  5282. .gpl_only = false,
  5283. .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
  5284. .arg1_type = ARG_PTR_TO_CTX,
  5285. .arg2_type = ARG_PTR_TO_MEM,
  5286. .arg3_type = ARG_CONST_SIZE,
  5287. .arg4_type = ARG_ANYTHING,
  5288. .arg5_type = ARG_ANYTHING,
  5289. };
  5290. BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
  5291. struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  5292. {
  5293. return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
  5294. sock_net(ctx->sk), 0, IPPROTO_TCP,
  5295. netns_id, flags);
  5296. }
  5297. static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
  5298. .func = bpf_sock_addr_sk_lookup_tcp,
  5299. .gpl_only = false,
  5300. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  5301. .arg1_type = ARG_PTR_TO_CTX,
  5302. .arg2_type = ARG_PTR_TO_MEM,
  5303. .arg3_type = ARG_CONST_SIZE,
  5304. .arg4_type = ARG_ANYTHING,
  5305. .arg5_type = ARG_ANYTHING,
  5306. };
  5307. BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
  5308. struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  5309. {
  5310. return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
  5311. sock_net(ctx->sk), 0, IPPROTO_UDP,
  5312. netns_id, flags);
  5313. }
  5314. static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
  5315. .func = bpf_sock_addr_sk_lookup_udp,
  5316. .gpl_only = false,
  5317. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  5318. .arg1_type = ARG_PTR_TO_CTX,
  5319. .arg2_type = ARG_PTR_TO_MEM,
  5320. .arg3_type = ARG_CONST_SIZE,
  5321. .arg4_type = ARG_ANYTHING,
  5322. .arg5_type = ARG_ANYTHING,
  5323. };
  5324. bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
  5325. struct bpf_insn_access_aux *info)
  5326. {
  5327. if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
  5328. icsk_retransmits))
  5329. return false;
  5330. if (off % size != 0)
  5331. return false;
  5332. switch (off) {
  5333. case offsetof(struct bpf_tcp_sock, bytes_received):
  5334. case offsetof(struct bpf_tcp_sock, bytes_acked):
  5335. return size == sizeof(__u64);
  5336. default:
  5337. return size == sizeof(__u32);
  5338. }
  5339. }
  5340. u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
  5341. const struct bpf_insn *si,
  5342. struct bpf_insn *insn_buf,
  5343. struct bpf_prog *prog, u32 *target_size)
  5344. {
  5345. struct bpf_insn *insn = insn_buf;
  5346. #define BPF_TCP_SOCK_GET_COMMON(FIELD) \
  5347. do { \
  5348. BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \
  5349. sizeof_field(struct bpf_tcp_sock, FIELD)); \
  5350. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
  5351. si->dst_reg, si->src_reg, \
  5352. offsetof(struct tcp_sock, FIELD)); \
  5353. } while (0)
  5354. #define BPF_INET_SOCK_GET_COMMON(FIELD) \
  5355. do { \
  5356. BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \
  5357. FIELD) > \
  5358. sizeof_field(struct bpf_tcp_sock, FIELD)); \
  5359. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
  5360. struct inet_connection_sock, \
  5361. FIELD), \
  5362. si->dst_reg, si->src_reg, \
  5363. offsetof( \
  5364. struct inet_connection_sock, \
  5365. FIELD)); \
  5366. } while (0)
  5367. if (insn > insn_buf)
  5368. return insn - insn_buf;
  5369. switch (si->off) {
  5370. case offsetof(struct bpf_tcp_sock, rtt_min):
  5371. BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
  5372. sizeof(struct minmax));
  5373. BUILD_BUG_ON(sizeof(struct minmax) <
  5374. sizeof(struct minmax_sample));
  5375. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  5376. offsetof(struct tcp_sock, rtt_min) +
  5377. offsetof(struct minmax_sample, v));
  5378. break;
  5379. case offsetof(struct bpf_tcp_sock, snd_cwnd):
  5380. BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
  5381. break;
  5382. case offsetof(struct bpf_tcp_sock, srtt_us):
  5383. BPF_TCP_SOCK_GET_COMMON(srtt_us);
  5384. break;
  5385. case offsetof(struct bpf_tcp_sock, snd_ssthresh):
  5386. BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
  5387. break;
  5388. case offsetof(struct bpf_tcp_sock, rcv_nxt):
  5389. BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
  5390. break;
  5391. case offsetof(struct bpf_tcp_sock, snd_nxt):
  5392. BPF_TCP_SOCK_GET_COMMON(snd_nxt);
  5393. break;
  5394. case offsetof(struct bpf_tcp_sock, snd_una):
  5395. BPF_TCP_SOCK_GET_COMMON(snd_una);
  5396. break;
  5397. case offsetof(struct bpf_tcp_sock, mss_cache):
  5398. BPF_TCP_SOCK_GET_COMMON(mss_cache);
  5399. break;
  5400. case offsetof(struct bpf_tcp_sock, ecn_flags):
  5401. BPF_TCP_SOCK_GET_COMMON(ecn_flags);
  5402. break;
  5403. case offsetof(struct bpf_tcp_sock, rate_delivered):
  5404. BPF_TCP_SOCK_GET_COMMON(rate_delivered);
  5405. break;
  5406. case offsetof(struct bpf_tcp_sock, rate_interval_us):
  5407. BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
  5408. break;
  5409. case offsetof(struct bpf_tcp_sock, packets_out):
  5410. BPF_TCP_SOCK_GET_COMMON(packets_out);
  5411. break;
  5412. case offsetof(struct bpf_tcp_sock, retrans_out):
  5413. BPF_TCP_SOCK_GET_COMMON(retrans_out);
  5414. break;
  5415. case offsetof(struct bpf_tcp_sock, total_retrans):
  5416. BPF_TCP_SOCK_GET_COMMON(total_retrans);
  5417. break;
  5418. case offsetof(struct bpf_tcp_sock, segs_in):
  5419. BPF_TCP_SOCK_GET_COMMON(segs_in);
  5420. break;
  5421. case offsetof(struct bpf_tcp_sock, data_segs_in):
  5422. BPF_TCP_SOCK_GET_COMMON(data_segs_in);
  5423. break;
  5424. case offsetof(struct bpf_tcp_sock, segs_out):
  5425. BPF_TCP_SOCK_GET_COMMON(segs_out);
  5426. break;
  5427. case offsetof(struct bpf_tcp_sock, data_segs_out):
  5428. BPF_TCP_SOCK_GET_COMMON(data_segs_out);
  5429. break;
  5430. case offsetof(struct bpf_tcp_sock, lost_out):
  5431. BPF_TCP_SOCK_GET_COMMON(lost_out);
  5432. break;
  5433. case offsetof(struct bpf_tcp_sock, sacked_out):
  5434. BPF_TCP_SOCK_GET_COMMON(sacked_out);
  5435. break;
  5436. case offsetof(struct bpf_tcp_sock, bytes_received):
  5437. BPF_TCP_SOCK_GET_COMMON(bytes_received);
  5438. break;
  5439. case offsetof(struct bpf_tcp_sock, bytes_acked):
  5440. BPF_TCP_SOCK_GET_COMMON(bytes_acked);
  5441. break;
  5442. case offsetof(struct bpf_tcp_sock, dsack_dups):
  5443. BPF_TCP_SOCK_GET_COMMON(dsack_dups);
  5444. break;
  5445. case offsetof(struct bpf_tcp_sock, delivered):
  5446. BPF_TCP_SOCK_GET_COMMON(delivered);
  5447. break;
  5448. case offsetof(struct bpf_tcp_sock, delivered_ce):
  5449. BPF_TCP_SOCK_GET_COMMON(delivered_ce);
  5450. break;
  5451. case offsetof(struct bpf_tcp_sock, icsk_retransmits):
  5452. BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
  5453. break;
  5454. }
  5455. return insn - insn_buf;
  5456. }
  5457. BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
  5458. {
  5459. if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
  5460. return (unsigned long)sk;
  5461. return (unsigned long)NULL;
  5462. }
  5463. const struct bpf_func_proto bpf_tcp_sock_proto = {
  5464. .func = bpf_tcp_sock,
  5465. .gpl_only = false,
  5466. .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
  5467. .arg1_type = ARG_PTR_TO_SOCK_COMMON,
  5468. };
  5469. BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
  5470. {
  5471. sk = sk_to_full_sk(sk);
  5472. if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
  5473. return (unsigned long)sk;
  5474. return (unsigned long)NULL;
  5475. }
  5476. static const struct bpf_func_proto bpf_get_listener_sock_proto = {
  5477. .func = bpf_get_listener_sock,
  5478. .gpl_only = false,
  5479. .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
  5480. .arg1_type = ARG_PTR_TO_SOCK_COMMON,
  5481. };
  5482. BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
  5483. {
  5484. unsigned int iphdr_len;
  5485. switch (skb_protocol(skb, true)) {
  5486. case cpu_to_be16(ETH_P_IP):
  5487. iphdr_len = sizeof(struct iphdr);
  5488. break;
  5489. case cpu_to_be16(ETH_P_IPV6):
  5490. iphdr_len = sizeof(struct ipv6hdr);
  5491. break;
  5492. default:
  5493. return 0;
  5494. }
  5495. if (skb_headlen(skb) < iphdr_len)
  5496. return 0;
  5497. if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
  5498. return 0;
  5499. return INET_ECN_set_ce(skb);
  5500. }
  5501. bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
  5502. struct bpf_insn_access_aux *info)
  5503. {
  5504. if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
  5505. return false;
  5506. if (off % size != 0)
  5507. return false;
  5508. switch (off) {
  5509. default:
  5510. return size == sizeof(__u32);
  5511. }
  5512. }
  5513. u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
  5514. const struct bpf_insn *si,
  5515. struct bpf_insn *insn_buf,
  5516. struct bpf_prog *prog, u32 *target_size)
  5517. {
  5518. struct bpf_insn *insn = insn_buf;
  5519. #define BPF_XDP_SOCK_GET(FIELD) \
  5520. do { \
  5521. BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \
  5522. sizeof_field(struct bpf_xdp_sock, FIELD)); \
  5523. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
  5524. si->dst_reg, si->src_reg, \
  5525. offsetof(struct xdp_sock, FIELD)); \
  5526. } while (0)
  5527. switch (si->off) {
  5528. case offsetof(struct bpf_xdp_sock, queue_id):
  5529. BPF_XDP_SOCK_GET(queue_id);
  5530. break;
  5531. }
  5532. return insn - insn_buf;
  5533. }
  5534. static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
  5535. .func = bpf_skb_ecn_set_ce,
  5536. .gpl_only = false,
  5537. .ret_type = RET_INTEGER,
  5538. .arg1_type = ARG_PTR_TO_CTX,
  5539. };
  5540. BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
  5541. struct tcphdr *, th, u32, th_len)
  5542. {
  5543. #ifdef CONFIG_SYN_COOKIES
  5544. u32 cookie;
  5545. int ret;
  5546. if (unlikely(!sk || th_len < sizeof(*th)))
  5547. return -EINVAL;
  5548. /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
  5549. if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
  5550. return -EINVAL;
  5551. if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
  5552. return -EINVAL;
  5553. if (!th->ack || th->rst || th->syn)
  5554. return -ENOENT;
  5555. if (unlikely(iph_len < sizeof(struct iphdr)))
  5556. return -EINVAL;
  5557. if (tcp_synq_no_recent_overflow(sk))
  5558. return -ENOENT;
  5559. cookie = ntohl(th->ack_seq) - 1;
  5560. /* Both struct iphdr and struct ipv6hdr have the version field at the
  5561. * same offset so we can cast to the shorter header (struct iphdr).
  5562. */
  5563. switch (((struct iphdr *)iph)->version) {
  5564. case 4:
  5565. if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk))
  5566. return -EINVAL;
  5567. ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
  5568. break;
  5569. #if IS_BUILTIN(CONFIG_IPV6)
  5570. case 6:
  5571. if (unlikely(iph_len < sizeof(struct ipv6hdr)))
  5572. return -EINVAL;
  5573. if (sk->sk_family != AF_INET6)
  5574. return -EINVAL;
  5575. ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
  5576. break;
  5577. #endif /* CONFIG_IPV6 */
  5578. default:
  5579. return -EPROTONOSUPPORT;
  5580. }
  5581. if (ret > 0)
  5582. return 0;
  5583. return -ENOENT;
  5584. #else
  5585. return -ENOTSUPP;
  5586. #endif
  5587. }
  5588. static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
  5589. .func = bpf_tcp_check_syncookie,
  5590. .gpl_only = true,
  5591. .pkt_access = true,
  5592. .ret_type = RET_INTEGER,
  5593. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  5594. .arg2_type = ARG_PTR_TO_MEM,
  5595. .arg3_type = ARG_CONST_SIZE,
  5596. .arg4_type = ARG_PTR_TO_MEM,
  5597. .arg5_type = ARG_CONST_SIZE,
  5598. };
  5599. BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
  5600. struct tcphdr *, th, u32, th_len)
  5601. {
  5602. #ifdef CONFIG_SYN_COOKIES
  5603. u32 cookie;
  5604. u16 mss;
  5605. if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4))
  5606. return -EINVAL;
  5607. if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
  5608. return -EINVAL;
  5609. if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
  5610. return -ENOENT;
  5611. if (!th->syn || th->ack || th->fin || th->rst)
  5612. return -EINVAL;
  5613. if (unlikely(iph_len < sizeof(struct iphdr)))
  5614. return -EINVAL;
  5615. /* Both struct iphdr and struct ipv6hdr have the version field at the
  5616. * same offset so we can cast to the shorter header (struct iphdr).
  5617. */
  5618. switch (((struct iphdr *)iph)->version) {
  5619. case 4:
  5620. if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
  5621. return -EINVAL;
  5622. mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
  5623. break;
  5624. #if IS_BUILTIN(CONFIG_IPV6)
  5625. case 6:
  5626. if (unlikely(iph_len < sizeof(struct ipv6hdr)))
  5627. return -EINVAL;
  5628. if (sk->sk_family != AF_INET6)
  5629. return -EINVAL;
  5630. mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
  5631. break;
  5632. #endif /* CONFIG_IPV6 */
  5633. default:
  5634. return -EPROTONOSUPPORT;
  5635. }
  5636. if (mss == 0)
  5637. return -ENOENT;
  5638. return cookie | ((u64)mss << 32);
  5639. #else
  5640. return -EOPNOTSUPP;
  5641. #endif /* CONFIG_SYN_COOKIES */
  5642. }
  5643. static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
  5644. .func = bpf_tcp_gen_syncookie,
  5645. .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */
  5646. .pkt_access = true,
  5647. .ret_type = RET_INTEGER,
  5648. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  5649. .arg2_type = ARG_PTR_TO_MEM,
  5650. .arg3_type = ARG_CONST_SIZE,
  5651. .arg4_type = ARG_PTR_TO_MEM,
  5652. .arg5_type = ARG_CONST_SIZE,
  5653. };
  5654. BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
  5655. {
  5656. if (!sk || flags != 0)
  5657. return -EINVAL;
  5658. if (!skb_at_tc_ingress(skb))
  5659. return -EOPNOTSUPP;
  5660. if (unlikely(dev_net(skb->dev) != sock_net(sk)))
  5661. return -ENETUNREACH;
  5662. if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
  5663. return -ESOCKTNOSUPPORT;
  5664. if (sk_is_refcounted(sk) &&
  5665. unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
  5666. return -ENOENT;
  5667. skb_orphan(skb);
  5668. skb->sk = sk;
  5669. skb->destructor = sock_pfree;
  5670. return 0;
  5671. }
  5672. static const struct bpf_func_proto bpf_sk_assign_proto = {
  5673. .func = bpf_sk_assign,
  5674. .gpl_only = false,
  5675. .ret_type = RET_INTEGER,
  5676. .arg1_type = ARG_PTR_TO_CTX,
  5677. .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  5678. .arg3_type = ARG_ANYTHING,
  5679. };
  5680. static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend,
  5681. u8 search_kind, const u8 *magic,
  5682. u8 magic_len, bool *eol)
  5683. {
  5684. u8 kind, kind_len;
  5685. *eol = false;
  5686. while (op < opend) {
  5687. kind = op[0];
  5688. if (kind == TCPOPT_EOL) {
  5689. *eol = true;
  5690. return ERR_PTR(-ENOMSG);
  5691. } else if (kind == TCPOPT_NOP) {
  5692. op++;
  5693. continue;
  5694. }
  5695. if (opend - op < 2 || opend - op < op[1] || op[1] < 2)
  5696. /* Something is wrong in the received header.
  5697. * Follow the TCP stack's tcp_parse_options()
  5698. * and just bail here.
  5699. */
  5700. return ERR_PTR(-EFAULT);
  5701. kind_len = op[1];
  5702. if (search_kind == kind) {
  5703. if (!magic_len)
  5704. return op;
  5705. if (magic_len > kind_len - 2)
  5706. return ERR_PTR(-ENOMSG);
  5707. if (!memcmp(&op[2], magic, magic_len))
  5708. return op;
  5709. }
  5710. op += kind_len;
  5711. }
  5712. return ERR_PTR(-ENOMSG);
  5713. }
  5714. BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
  5715. void *, search_res, u32, len, u64, flags)
  5716. {
  5717. bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN;
  5718. const u8 *op, *opend, *magic, *search = search_res;
  5719. u8 search_kind, search_len, copy_len, magic_len;
  5720. int ret;
  5721. /* 2 byte is the minimal option len except TCPOPT_NOP and
  5722. * TCPOPT_EOL which are useless for the bpf prog to learn
  5723. * and this helper disallow loading them also.
  5724. */
  5725. if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN)
  5726. return -EINVAL;
  5727. search_kind = search[0];
  5728. search_len = search[1];
  5729. if (search_len > len || search_kind == TCPOPT_NOP ||
  5730. search_kind == TCPOPT_EOL)
  5731. return -EINVAL;
  5732. if (search_kind == TCPOPT_EXP || search_kind == 253) {
  5733. /* 16 or 32 bit magic. +2 for kind and kind length */
  5734. if (search_len != 4 && search_len != 6)
  5735. return -EINVAL;
  5736. magic = &search[2];
  5737. magic_len = search_len - 2;
  5738. } else {
  5739. if (search_len)
  5740. return -EINVAL;
  5741. magic = NULL;
  5742. magic_len = 0;
  5743. }
  5744. if (load_syn) {
  5745. ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op);
  5746. if (ret < 0)
  5747. return ret;
  5748. opend = op + ret;
  5749. op += sizeof(struct tcphdr);
  5750. } else {
  5751. if (!bpf_sock->skb ||
  5752. bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB)
  5753. /* This bpf_sock->op cannot call this helper */
  5754. return -EPERM;
  5755. opend = bpf_sock->skb_data_end;
  5756. op = bpf_sock->skb->data + sizeof(struct tcphdr);
  5757. }
  5758. op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len,
  5759. &eol);
  5760. if (IS_ERR(op))
  5761. return PTR_ERR(op);
  5762. copy_len = op[1];
  5763. ret = copy_len;
  5764. if (copy_len > len) {
  5765. ret = -ENOSPC;
  5766. copy_len = len;
  5767. }
  5768. memcpy(search_res, op, copy_len);
  5769. return ret;
  5770. }
  5771. static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
  5772. .func = bpf_sock_ops_load_hdr_opt,
  5773. .gpl_only = false,
  5774. .ret_type = RET_INTEGER,
  5775. .arg1_type = ARG_PTR_TO_CTX,
  5776. .arg2_type = ARG_PTR_TO_MEM,
  5777. .arg3_type = ARG_CONST_SIZE,
  5778. .arg4_type = ARG_ANYTHING,
  5779. };
  5780. BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
  5781. const void *, from, u32, len, u64, flags)
  5782. {
  5783. u8 new_kind, new_kind_len, magic_len = 0, *opend;
  5784. const u8 *op, *new_op, *magic = NULL;
  5785. struct sk_buff *skb;
  5786. bool eol;
  5787. if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB)
  5788. return -EPERM;
  5789. if (len < 2 || flags)
  5790. return -EINVAL;
  5791. new_op = from;
  5792. new_kind = new_op[0];
  5793. new_kind_len = new_op[1];
  5794. if (new_kind_len > len || new_kind == TCPOPT_NOP ||
  5795. new_kind == TCPOPT_EOL)
  5796. return -EINVAL;
  5797. if (new_kind_len > bpf_sock->remaining_opt_len)
  5798. return -ENOSPC;
  5799. /* 253 is another experimental kind */
  5800. if (new_kind == TCPOPT_EXP || new_kind == 253) {
  5801. if (new_kind_len < 4)
  5802. return -EINVAL;
  5803. /* Match for the 2 byte magic also.
  5804. * RFC 6994: the magic could be 2 or 4 bytes.
  5805. * Hence, matching by 2 byte only is on the
  5806. * conservative side but it is the right
  5807. * thing to do for the 'search-for-duplication'
  5808. * purpose.
  5809. */
  5810. magic = &new_op[2];
  5811. magic_len = 2;
  5812. }
  5813. /* Check for duplication */
  5814. skb = bpf_sock->skb;
  5815. op = skb->data + sizeof(struct tcphdr);
  5816. opend = bpf_sock->skb_data_end;
  5817. op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len,
  5818. &eol);
  5819. if (!IS_ERR(op))
  5820. return -EEXIST;
  5821. if (PTR_ERR(op) != -ENOMSG)
  5822. return PTR_ERR(op);
  5823. if (eol)
  5824. /* The option has been ended. Treat it as no more
  5825. * header option can be written.
  5826. */
  5827. return -ENOSPC;
  5828. /* No duplication found. Store the header option. */
  5829. memcpy(opend, from, new_kind_len);
  5830. bpf_sock->remaining_opt_len -= new_kind_len;
  5831. bpf_sock->skb_data_end += new_kind_len;
  5832. return 0;
  5833. }
  5834. static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
  5835. .func = bpf_sock_ops_store_hdr_opt,
  5836. .gpl_only = false,
  5837. .ret_type = RET_INTEGER,
  5838. .arg1_type = ARG_PTR_TO_CTX,
  5839. .arg2_type = ARG_PTR_TO_MEM,
  5840. .arg3_type = ARG_CONST_SIZE,
  5841. .arg4_type = ARG_ANYTHING,
  5842. };
  5843. BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
  5844. u32, len, u64, flags)
  5845. {
  5846. if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB)
  5847. return -EPERM;
  5848. if (flags || len < 2)
  5849. return -EINVAL;
  5850. if (len > bpf_sock->remaining_opt_len)
  5851. return -ENOSPC;
  5852. bpf_sock->remaining_opt_len -= len;
  5853. return 0;
  5854. }
  5855. static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = {
  5856. .func = bpf_sock_ops_reserve_hdr_opt,
  5857. .gpl_only = false,
  5858. .ret_type = RET_INTEGER,
  5859. .arg1_type = ARG_PTR_TO_CTX,
  5860. .arg2_type = ARG_ANYTHING,
  5861. .arg3_type = ARG_ANYTHING,
  5862. };
  5863. #endif /* CONFIG_INET */
  5864. bool bpf_helper_changes_pkt_data(void *func)
  5865. {
  5866. if (func == bpf_skb_vlan_push ||
  5867. func == bpf_skb_vlan_pop ||
  5868. func == bpf_skb_store_bytes ||
  5869. func == bpf_skb_change_proto ||
  5870. func == bpf_skb_change_head ||
  5871. func == sk_skb_change_head ||
  5872. func == bpf_skb_change_tail ||
  5873. func == sk_skb_change_tail ||
  5874. func == bpf_skb_adjust_room ||
  5875. func == sk_skb_adjust_room ||
  5876. func == bpf_skb_pull_data ||
  5877. func == sk_skb_pull_data ||
  5878. func == bpf_clone_redirect ||
  5879. func == bpf_l3_csum_replace ||
  5880. func == bpf_l4_csum_replace ||
  5881. func == bpf_xdp_adjust_head ||
  5882. func == bpf_xdp_adjust_meta ||
  5883. func == bpf_msg_pull_data ||
  5884. func == bpf_msg_push_data ||
  5885. func == bpf_msg_pop_data ||
  5886. func == bpf_xdp_adjust_tail ||
  5887. #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
  5888. func == bpf_lwt_seg6_store_bytes ||
  5889. func == bpf_lwt_seg6_adjust_srh ||
  5890. func == bpf_lwt_seg6_action ||
  5891. #endif
  5892. #ifdef CONFIG_INET
  5893. func == bpf_sock_ops_store_hdr_opt ||
  5894. #endif
  5895. func == bpf_lwt_in_push_encap ||
  5896. func == bpf_lwt_xmit_push_encap)
  5897. return true;
  5898. return false;
  5899. }
  5900. const struct bpf_func_proto bpf_event_output_data_proto __weak;
  5901. const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak;
  5902. static const struct bpf_func_proto *
  5903. sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  5904. {
  5905. switch (func_id) {
  5906. /* inet and inet6 sockets are created in a process
  5907. * context so there is always a valid uid/gid
  5908. */
  5909. case BPF_FUNC_get_current_uid_gid:
  5910. return &bpf_get_current_uid_gid_proto;
  5911. case BPF_FUNC_get_local_storage:
  5912. return &bpf_get_local_storage_proto;
  5913. case BPF_FUNC_get_socket_cookie:
  5914. return &bpf_get_socket_cookie_sock_proto;
  5915. case BPF_FUNC_get_netns_cookie:
  5916. return &bpf_get_netns_cookie_sock_proto;
  5917. case BPF_FUNC_perf_event_output:
  5918. return &bpf_event_output_data_proto;
  5919. case BPF_FUNC_get_current_pid_tgid:
  5920. return &bpf_get_current_pid_tgid_proto;
  5921. case BPF_FUNC_get_current_comm:
  5922. return &bpf_get_current_comm_proto;
  5923. #ifdef CONFIG_CGROUPS
  5924. case BPF_FUNC_get_current_cgroup_id:
  5925. return &bpf_get_current_cgroup_id_proto;
  5926. case BPF_FUNC_get_current_ancestor_cgroup_id:
  5927. return &bpf_get_current_ancestor_cgroup_id_proto;
  5928. #endif
  5929. #ifdef CONFIG_CGROUP_NET_CLASSID
  5930. case BPF_FUNC_get_cgroup_classid:
  5931. return &bpf_get_cgroup_classid_curr_proto;
  5932. #endif
  5933. case BPF_FUNC_sk_storage_get:
  5934. return &bpf_sk_storage_get_cg_sock_proto;
  5935. default:
  5936. return bpf_base_func_proto(func_id);
  5937. }
  5938. }
  5939. static const struct bpf_func_proto *
  5940. sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  5941. {
  5942. switch (func_id) {
  5943. /* inet and inet6 sockets are created in a process
  5944. * context so there is always a valid uid/gid
  5945. */
  5946. case BPF_FUNC_get_current_uid_gid:
  5947. return &bpf_get_current_uid_gid_proto;
  5948. case BPF_FUNC_bind:
  5949. switch (prog->expected_attach_type) {
  5950. case BPF_CGROUP_INET4_CONNECT:
  5951. case BPF_CGROUP_INET6_CONNECT:
  5952. return &bpf_bind_proto;
  5953. default:
  5954. return NULL;
  5955. }
  5956. case BPF_FUNC_get_socket_cookie:
  5957. return &bpf_get_socket_cookie_sock_addr_proto;
  5958. case BPF_FUNC_get_netns_cookie:
  5959. return &bpf_get_netns_cookie_sock_addr_proto;
  5960. case BPF_FUNC_get_local_storage:
  5961. return &bpf_get_local_storage_proto;
  5962. case BPF_FUNC_perf_event_output:
  5963. return &bpf_event_output_data_proto;
  5964. case BPF_FUNC_get_current_pid_tgid:
  5965. return &bpf_get_current_pid_tgid_proto;
  5966. case BPF_FUNC_get_current_comm:
  5967. return &bpf_get_current_comm_proto;
  5968. #ifdef CONFIG_CGROUPS
  5969. case BPF_FUNC_get_current_cgroup_id:
  5970. return &bpf_get_current_cgroup_id_proto;
  5971. case BPF_FUNC_get_current_ancestor_cgroup_id:
  5972. return &bpf_get_current_ancestor_cgroup_id_proto;
  5973. #endif
  5974. #ifdef CONFIG_CGROUP_NET_CLASSID
  5975. case BPF_FUNC_get_cgroup_classid:
  5976. return &bpf_get_cgroup_classid_curr_proto;
  5977. #endif
  5978. #ifdef CONFIG_INET
  5979. case BPF_FUNC_sk_lookup_tcp:
  5980. return &bpf_sock_addr_sk_lookup_tcp_proto;
  5981. case BPF_FUNC_sk_lookup_udp:
  5982. return &bpf_sock_addr_sk_lookup_udp_proto;
  5983. case BPF_FUNC_sk_release:
  5984. return &bpf_sk_release_proto;
  5985. case BPF_FUNC_skc_lookup_tcp:
  5986. return &bpf_sock_addr_skc_lookup_tcp_proto;
  5987. #endif /* CONFIG_INET */
  5988. case BPF_FUNC_sk_storage_get:
  5989. return &bpf_sk_storage_get_proto;
  5990. case BPF_FUNC_sk_storage_delete:
  5991. return &bpf_sk_storage_delete_proto;
  5992. case BPF_FUNC_setsockopt:
  5993. switch (prog->expected_attach_type) {
  5994. case BPF_CGROUP_INET4_CONNECT:
  5995. case BPF_CGROUP_INET6_CONNECT:
  5996. return &bpf_sock_addr_setsockopt_proto;
  5997. default:
  5998. return NULL;
  5999. }
  6000. case BPF_FUNC_getsockopt:
  6001. switch (prog->expected_attach_type) {
  6002. case BPF_CGROUP_INET4_CONNECT:
  6003. case BPF_CGROUP_INET6_CONNECT:
  6004. return &bpf_sock_addr_getsockopt_proto;
  6005. default:
  6006. return NULL;
  6007. }
  6008. default:
  6009. return bpf_sk_base_func_proto(func_id);
  6010. }
  6011. }
  6012. static const struct bpf_func_proto *
  6013. sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6014. {
  6015. switch (func_id) {
  6016. case BPF_FUNC_skb_load_bytes:
  6017. return &bpf_skb_load_bytes_proto;
  6018. case BPF_FUNC_skb_load_bytes_relative:
  6019. return &bpf_skb_load_bytes_relative_proto;
  6020. case BPF_FUNC_get_socket_cookie:
  6021. return &bpf_get_socket_cookie_proto;
  6022. case BPF_FUNC_get_socket_uid:
  6023. return &bpf_get_socket_uid_proto;
  6024. case BPF_FUNC_perf_event_output:
  6025. return &bpf_skb_event_output_proto;
  6026. default:
  6027. return bpf_sk_base_func_proto(func_id);
  6028. }
  6029. }
  6030. const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
  6031. const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
  6032. static const struct bpf_func_proto *
  6033. cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6034. {
  6035. switch (func_id) {
  6036. case BPF_FUNC_get_local_storage:
  6037. return &bpf_get_local_storage_proto;
  6038. case BPF_FUNC_sk_fullsock:
  6039. return &bpf_sk_fullsock_proto;
  6040. case BPF_FUNC_sk_storage_get:
  6041. return &bpf_sk_storage_get_proto;
  6042. case BPF_FUNC_sk_storage_delete:
  6043. return &bpf_sk_storage_delete_proto;
  6044. case BPF_FUNC_perf_event_output:
  6045. return &bpf_skb_event_output_proto;
  6046. #ifdef CONFIG_SOCK_CGROUP_DATA
  6047. case BPF_FUNC_skb_cgroup_id:
  6048. return &bpf_skb_cgroup_id_proto;
  6049. case BPF_FUNC_skb_ancestor_cgroup_id:
  6050. return &bpf_skb_ancestor_cgroup_id_proto;
  6051. case BPF_FUNC_sk_cgroup_id:
  6052. return &bpf_sk_cgroup_id_proto;
  6053. case BPF_FUNC_sk_ancestor_cgroup_id:
  6054. return &bpf_sk_ancestor_cgroup_id_proto;
  6055. #endif
  6056. #ifdef CONFIG_INET
  6057. case BPF_FUNC_sk_lookup_tcp:
  6058. return &bpf_sk_lookup_tcp_proto;
  6059. case BPF_FUNC_sk_lookup_udp:
  6060. return &bpf_sk_lookup_udp_proto;
  6061. case BPF_FUNC_sk_release:
  6062. return &bpf_sk_release_proto;
  6063. case BPF_FUNC_skc_lookup_tcp:
  6064. return &bpf_skc_lookup_tcp_proto;
  6065. case BPF_FUNC_tcp_sock:
  6066. return &bpf_tcp_sock_proto;
  6067. case BPF_FUNC_get_listener_sock:
  6068. return &bpf_get_listener_sock_proto;
  6069. case BPF_FUNC_skb_ecn_set_ce:
  6070. return &bpf_skb_ecn_set_ce_proto;
  6071. #endif
  6072. default:
  6073. return sk_filter_func_proto(func_id, prog);
  6074. }
  6075. }
  6076. static const struct bpf_func_proto *
  6077. tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6078. {
  6079. switch (func_id) {
  6080. case BPF_FUNC_skb_store_bytes:
  6081. return &bpf_skb_store_bytes_proto;
  6082. case BPF_FUNC_skb_load_bytes:
  6083. return &bpf_skb_load_bytes_proto;
  6084. case BPF_FUNC_skb_load_bytes_relative:
  6085. return &bpf_skb_load_bytes_relative_proto;
  6086. case BPF_FUNC_skb_pull_data:
  6087. return &bpf_skb_pull_data_proto;
  6088. case BPF_FUNC_csum_diff:
  6089. return &bpf_csum_diff_proto;
  6090. case BPF_FUNC_csum_update:
  6091. return &bpf_csum_update_proto;
  6092. case BPF_FUNC_csum_level:
  6093. return &bpf_csum_level_proto;
  6094. case BPF_FUNC_l3_csum_replace:
  6095. return &bpf_l3_csum_replace_proto;
  6096. case BPF_FUNC_l4_csum_replace:
  6097. return &bpf_l4_csum_replace_proto;
  6098. case BPF_FUNC_clone_redirect:
  6099. return &bpf_clone_redirect_proto;
  6100. case BPF_FUNC_get_cgroup_classid:
  6101. return &bpf_get_cgroup_classid_proto;
  6102. case BPF_FUNC_skb_vlan_push:
  6103. return &bpf_skb_vlan_push_proto;
  6104. case BPF_FUNC_skb_vlan_pop:
  6105. return &bpf_skb_vlan_pop_proto;
  6106. case BPF_FUNC_skb_change_proto:
  6107. return &bpf_skb_change_proto_proto;
  6108. case BPF_FUNC_skb_change_type:
  6109. return &bpf_skb_change_type_proto;
  6110. case BPF_FUNC_skb_adjust_room:
  6111. return &bpf_skb_adjust_room_proto;
  6112. case BPF_FUNC_skb_change_tail:
  6113. return &bpf_skb_change_tail_proto;
  6114. case BPF_FUNC_skb_change_head:
  6115. return &bpf_skb_change_head_proto;
  6116. case BPF_FUNC_skb_get_tunnel_key:
  6117. return &bpf_skb_get_tunnel_key_proto;
  6118. case BPF_FUNC_skb_set_tunnel_key:
  6119. return bpf_get_skb_set_tunnel_proto(func_id);
  6120. case BPF_FUNC_skb_get_tunnel_opt:
  6121. return &bpf_skb_get_tunnel_opt_proto;
  6122. case BPF_FUNC_skb_set_tunnel_opt:
  6123. return bpf_get_skb_set_tunnel_proto(func_id);
  6124. case BPF_FUNC_redirect:
  6125. return &bpf_redirect_proto;
  6126. case BPF_FUNC_redirect_neigh:
  6127. return &bpf_redirect_neigh_proto;
  6128. case BPF_FUNC_redirect_peer:
  6129. return &bpf_redirect_peer_proto;
  6130. case BPF_FUNC_get_route_realm:
  6131. return &bpf_get_route_realm_proto;
  6132. case BPF_FUNC_get_hash_recalc:
  6133. return &bpf_get_hash_recalc_proto;
  6134. case BPF_FUNC_set_hash_invalid:
  6135. return &bpf_set_hash_invalid_proto;
  6136. case BPF_FUNC_set_hash:
  6137. return &bpf_set_hash_proto;
  6138. case BPF_FUNC_perf_event_output:
  6139. return &bpf_skb_event_output_proto;
  6140. case BPF_FUNC_get_smp_processor_id:
  6141. return &bpf_get_smp_processor_id_proto;
  6142. case BPF_FUNC_skb_under_cgroup:
  6143. return &bpf_skb_under_cgroup_proto;
  6144. case BPF_FUNC_get_socket_cookie:
  6145. return &bpf_get_socket_cookie_proto;
  6146. case BPF_FUNC_get_socket_uid:
  6147. return &bpf_get_socket_uid_proto;
  6148. case BPF_FUNC_fib_lookup:
  6149. return &bpf_skb_fib_lookup_proto;
  6150. case BPF_FUNC_sk_fullsock:
  6151. return &bpf_sk_fullsock_proto;
  6152. case BPF_FUNC_sk_storage_get:
  6153. return &bpf_sk_storage_get_proto;
  6154. case BPF_FUNC_sk_storage_delete:
  6155. return &bpf_sk_storage_delete_proto;
  6156. #ifdef CONFIG_XFRM
  6157. case BPF_FUNC_skb_get_xfrm_state:
  6158. return &bpf_skb_get_xfrm_state_proto;
  6159. #endif
  6160. #ifdef CONFIG_CGROUP_NET_CLASSID
  6161. case BPF_FUNC_skb_cgroup_classid:
  6162. return &bpf_skb_cgroup_classid_proto;
  6163. #endif
  6164. #ifdef CONFIG_SOCK_CGROUP_DATA
  6165. case BPF_FUNC_skb_cgroup_id:
  6166. return &bpf_skb_cgroup_id_proto;
  6167. case BPF_FUNC_skb_ancestor_cgroup_id:
  6168. return &bpf_skb_ancestor_cgroup_id_proto;
  6169. #endif
  6170. #ifdef CONFIG_INET
  6171. case BPF_FUNC_sk_lookup_tcp:
  6172. return &bpf_sk_lookup_tcp_proto;
  6173. case BPF_FUNC_sk_lookup_udp:
  6174. return &bpf_sk_lookup_udp_proto;
  6175. case BPF_FUNC_sk_release:
  6176. return &bpf_sk_release_proto;
  6177. case BPF_FUNC_tcp_sock:
  6178. return &bpf_tcp_sock_proto;
  6179. case BPF_FUNC_get_listener_sock:
  6180. return &bpf_get_listener_sock_proto;
  6181. case BPF_FUNC_skc_lookup_tcp:
  6182. return &bpf_skc_lookup_tcp_proto;
  6183. case BPF_FUNC_tcp_check_syncookie:
  6184. return &bpf_tcp_check_syncookie_proto;
  6185. case BPF_FUNC_skb_ecn_set_ce:
  6186. return &bpf_skb_ecn_set_ce_proto;
  6187. case BPF_FUNC_tcp_gen_syncookie:
  6188. return &bpf_tcp_gen_syncookie_proto;
  6189. case BPF_FUNC_sk_assign:
  6190. return &bpf_sk_assign_proto;
  6191. #endif
  6192. default:
  6193. return bpf_sk_base_func_proto(func_id);
  6194. }
  6195. }
  6196. static const struct bpf_func_proto *
  6197. xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6198. {
  6199. switch (func_id) {
  6200. case BPF_FUNC_perf_event_output:
  6201. return &bpf_xdp_event_output_proto;
  6202. case BPF_FUNC_get_smp_processor_id:
  6203. return &bpf_get_smp_processor_id_proto;
  6204. case BPF_FUNC_csum_diff:
  6205. return &bpf_csum_diff_proto;
  6206. case BPF_FUNC_xdp_adjust_head:
  6207. return &bpf_xdp_adjust_head_proto;
  6208. case BPF_FUNC_xdp_adjust_meta:
  6209. return &bpf_xdp_adjust_meta_proto;
  6210. case BPF_FUNC_redirect:
  6211. return &bpf_xdp_redirect_proto;
  6212. case BPF_FUNC_redirect_map:
  6213. return &bpf_xdp_redirect_map_proto;
  6214. case BPF_FUNC_xdp_adjust_tail:
  6215. return &bpf_xdp_adjust_tail_proto;
  6216. case BPF_FUNC_fib_lookup:
  6217. return &bpf_xdp_fib_lookup_proto;
  6218. #ifdef CONFIG_INET
  6219. case BPF_FUNC_sk_lookup_udp:
  6220. return &bpf_xdp_sk_lookup_udp_proto;
  6221. case BPF_FUNC_sk_lookup_tcp:
  6222. return &bpf_xdp_sk_lookup_tcp_proto;
  6223. case BPF_FUNC_sk_release:
  6224. return &bpf_sk_release_proto;
  6225. case BPF_FUNC_skc_lookup_tcp:
  6226. return &bpf_xdp_skc_lookup_tcp_proto;
  6227. case BPF_FUNC_tcp_check_syncookie:
  6228. return &bpf_tcp_check_syncookie_proto;
  6229. case BPF_FUNC_tcp_gen_syncookie:
  6230. return &bpf_tcp_gen_syncookie_proto;
  6231. #endif
  6232. default:
  6233. return bpf_sk_base_func_proto(func_id);
  6234. }
  6235. }
  6236. const struct bpf_func_proto bpf_sock_map_update_proto __weak;
  6237. const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
  6238. static const struct bpf_func_proto *
  6239. sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6240. {
  6241. switch (func_id) {
  6242. case BPF_FUNC_setsockopt:
  6243. return &bpf_sock_ops_setsockopt_proto;
  6244. case BPF_FUNC_getsockopt:
  6245. return &bpf_sock_ops_getsockopt_proto;
  6246. case BPF_FUNC_sock_ops_cb_flags_set:
  6247. return &bpf_sock_ops_cb_flags_set_proto;
  6248. case BPF_FUNC_sock_map_update:
  6249. return &bpf_sock_map_update_proto;
  6250. case BPF_FUNC_sock_hash_update:
  6251. return &bpf_sock_hash_update_proto;
  6252. case BPF_FUNC_get_socket_cookie:
  6253. return &bpf_get_socket_cookie_sock_ops_proto;
  6254. case BPF_FUNC_get_local_storage:
  6255. return &bpf_get_local_storage_proto;
  6256. case BPF_FUNC_perf_event_output:
  6257. return &bpf_event_output_data_proto;
  6258. case BPF_FUNC_sk_storage_get:
  6259. return &bpf_sk_storage_get_proto;
  6260. case BPF_FUNC_sk_storage_delete:
  6261. return &bpf_sk_storage_delete_proto;
  6262. #ifdef CONFIG_INET
  6263. case BPF_FUNC_load_hdr_opt:
  6264. return &bpf_sock_ops_load_hdr_opt_proto;
  6265. case BPF_FUNC_store_hdr_opt:
  6266. return &bpf_sock_ops_store_hdr_opt_proto;
  6267. case BPF_FUNC_reserve_hdr_opt:
  6268. return &bpf_sock_ops_reserve_hdr_opt_proto;
  6269. case BPF_FUNC_tcp_sock:
  6270. return &bpf_tcp_sock_proto;
  6271. #endif /* CONFIG_INET */
  6272. default:
  6273. return bpf_sk_base_func_proto(func_id);
  6274. }
  6275. }
  6276. const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
  6277. const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
  6278. static const struct bpf_func_proto *
  6279. sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6280. {
  6281. switch (func_id) {
  6282. case BPF_FUNC_msg_redirect_map:
  6283. return &bpf_msg_redirect_map_proto;
  6284. case BPF_FUNC_msg_redirect_hash:
  6285. return &bpf_msg_redirect_hash_proto;
  6286. case BPF_FUNC_msg_apply_bytes:
  6287. return &bpf_msg_apply_bytes_proto;
  6288. case BPF_FUNC_msg_cork_bytes:
  6289. return &bpf_msg_cork_bytes_proto;
  6290. case BPF_FUNC_msg_pull_data:
  6291. return &bpf_msg_pull_data_proto;
  6292. case BPF_FUNC_msg_push_data:
  6293. return &bpf_msg_push_data_proto;
  6294. case BPF_FUNC_msg_pop_data:
  6295. return &bpf_msg_pop_data_proto;
  6296. case BPF_FUNC_perf_event_output:
  6297. return &bpf_event_output_data_proto;
  6298. case BPF_FUNC_get_current_uid_gid:
  6299. return &bpf_get_current_uid_gid_proto;
  6300. case BPF_FUNC_get_current_pid_tgid:
  6301. return &bpf_get_current_pid_tgid_proto;
  6302. case BPF_FUNC_sk_storage_get:
  6303. return &bpf_sk_storage_get_proto;
  6304. case BPF_FUNC_sk_storage_delete:
  6305. return &bpf_sk_storage_delete_proto;
  6306. #ifdef CONFIG_CGROUPS
  6307. case BPF_FUNC_get_current_cgroup_id:
  6308. return &bpf_get_current_cgroup_id_proto;
  6309. case BPF_FUNC_get_current_ancestor_cgroup_id:
  6310. return &bpf_get_current_ancestor_cgroup_id_proto;
  6311. #endif
  6312. #ifdef CONFIG_CGROUP_NET_CLASSID
  6313. case BPF_FUNC_get_cgroup_classid:
  6314. return &bpf_get_cgroup_classid_curr_proto;
  6315. #endif
  6316. default:
  6317. return bpf_sk_base_func_proto(func_id);
  6318. }
  6319. }
  6320. const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
  6321. const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
  6322. static const struct bpf_func_proto *
  6323. sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6324. {
  6325. switch (func_id) {
  6326. case BPF_FUNC_skb_store_bytes:
  6327. return &bpf_skb_store_bytes_proto;
  6328. case BPF_FUNC_skb_load_bytes:
  6329. return &bpf_skb_load_bytes_proto;
  6330. case BPF_FUNC_skb_pull_data:
  6331. return &sk_skb_pull_data_proto;
  6332. case BPF_FUNC_skb_change_tail:
  6333. return &sk_skb_change_tail_proto;
  6334. case BPF_FUNC_skb_change_head:
  6335. return &sk_skb_change_head_proto;
  6336. case BPF_FUNC_skb_adjust_room:
  6337. return &sk_skb_adjust_room_proto;
  6338. case BPF_FUNC_get_socket_cookie:
  6339. return &bpf_get_socket_cookie_proto;
  6340. case BPF_FUNC_get_socket_uid:
  6341. return &bpf_get_socket_uid_proto;
  6342. case BPF_FUNC_sk_redirect_map:
  6343. return &bpf_sk_redirect_map_proto;
  6344. case BPF_FUNC_sk_redirect_hash:
  6345. return &bpf_sk_redirect_hash_proto;
  6346. case BPF_FUNC_perf_event_output:
  6347. return &bpf_skb_event_output_proto;
  6348. #ifdef CONFIG_INET
  6349. case BPF_FUNC_sk_lookup_tcp:
  6350. return &bpf_sk_lookup_tcp_proto;
  6351. case BPF_FUNC_sk_lookup_udp:
  6352. return &bpf_sk_lookup_udp_proto;
  6353. case BPF_FUNC_sk_release:
  6354. return &bpf_sk_release_proto;
  6355. case BPF_FUNC_skc_lookup_tcp:
  6356. return &bpf_skc_lookup_tcp_proto;
  6357. #endif
  6358. default:
  6359. return bpf_sk_base_func_proto(func_id);
  6360. }
  6361. }
  6362. static const struct bpf_func_proto *
  6363. flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6364. {
  6365. switch (func_id) {
  6366. case BPF_FUNC_skb_load_bytes:
  6367. return &bpf_flow_dissector_load_bytes_proto;
  6368. default:
  6369. return bpf_sk_base_func_proto(func_id);
  6370. }
  6371. }
  6372. static const struct bpf_func_proto *
  6373. lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6374. {
  6375. switch (func_id) {
  6376. case BPF_FUNC_skb_load_bytes:
  6377. return &bpf_skb_load_bytes_proto;
  6378. case BPF_FUNC_skb_pull_data:
  6379. return &bpf_skb_pull_data_proto;
  6380. case BPF_FUNC_csum_diff:
  6381. return &bpf_csum_diff_proto;
  6382. case BPF_FUNC_get_cgroup_classid:
  6383. return &bpf_get_cgroup_classid_proto;
  6384. case BPF_FUNC_get_route_realm:
  6385. return &bpf_get_route_realm_proto;
  6386. case BPF_FUNC_get_hash_recalc:
  6387. return &bpf_get_hash_recalc_proto;
  6388. case BPF_FUNC_perf_event_output:
  6389. return &bpf_skb_event_output_proto;
  6390. case BPF_FUNC_get_smp_processor_id:
  6391. return &bpf_get_smp_processor_id_proto;
  6392. case BPF_FUNC_skb_under_cgroup:
  6393. return &bpf_skb_under_cgroup_proto;
  6394. default:
  6395. return bpf_sk_base_func_proto(func_id);
  6396. }
  6397. }
  6398. static const struct bpf_func_proto *
  6399. lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6400. {
  6401. switch (func_id) {
  6402. case BPF_FUNC_lwt_push_encap:
  6403. return &bpf_lwt_in_push_encap_proto;
  6404. default:
  6405. return lwt_out_func_proto(func_id, prog);
  6406. }
  6407. }
  6408. static const struct bpf_func_proto *
  6409. lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6410. {
  6411. switch (func_id) {
  6412. case BPF_FUNC_skb_get_tunnel_key:
  6413. return &bpf_skb_get_tunnel_key_proto;
  6414. case BPF_FUNC_skb_set_tunnel_key:
  6415. return bpf_get_skb_set_tunnel_proto(func_id);
  6416. case BPF_FUNC_skb_get_tunnel_opt:
  6417. return &bpf_skb_get_tunnel_opt_proto;
  6418. case BPF_FUNC_skb_set_tunnel_opt:
  6419. return bpf_get_skb_set_tunnel_proto(func_id);
  6420. case BPF_FUNC_redirect:
  6421. return &bpf_redirect_proto;
  6422. case BPF_FUNC_clone_redirect:
  6423. return &bpf_clone_redirect_proto;
  6424. case BPF_FUNC_skb_change_tail:
  6425. return &bpf_skb_change_tail_proto;
  6426. case BPF_FUNC_skb_change_head:
  6427. return &bpf_skb_change_head_proto;
  6428. case BPF_FUNC_skb_store_bytes:
  6429. return &bpf_skb_store_bytes_proto;
  6430. case BPF_FUNC_csum_update:
  6431. return &bpf_csum_update_proto;
  6432. case BPF_FUNC_csum_level:
  6433. return &bpf_csum_level_proto;
  6434. case BPF_FUNC_l3_csum_replace:
  6435. return &bpf_l3_csum_replace_proto;
  6436. case BPF_FUNC_l4_csum_replace:
  6437. return &bpf_l4_csum_replace_proto;
  6438. case BPF_FUNC_set_hash_invalid:
  6439. return &bpf_set_hash_invalid_proto;
  6440. case BPF_FUNC_lwt_push_encap:
  6441. return &bpf_lwt_xmit_push_encap_proto;
  6442. default:
  6443. return lwt_out_func_proto(func_id, prog);
  6444. }
  6445. }
  6446. static const struct bpf_func_proto *
  6447. lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  6448. {
  6449. switch (func_id) {
  6450. #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
  6451. case BPF_FUNC_lwt_seg6_store_bytes:
  6452. return &bpf_lwt_seg6_store_bytes_proto;
  6453. case BPF_FUNC_lwt_seg6_action:
  6454. return &bpf_lwt_seg6_action_proto;
  6455. case BPF_FUNC_lwt_seg6_adjust_srh:
  6456. return &bpf_lwt_seg6_adjust_srh_proto;
  6457. #endif
  6458. default:
  6459. return lwt_out_func_proto(func_id, prog);
  6460. }
  6461. }
  6462. static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
  6463. const struct bpf_prog *prog,
  6464. struct bpf_insn_access_aux *info)
  6465. {
  6466. const int size_default = sizeof(__u32);
  6467. if (off < 0 || off >= sizeof(struct __sk_buff))
  6468. return false;
  6469. /* The verifier guarantees that size > 0. */
  6470. if (off % size != 0)
  6471. return false;
  6472. switch (off) {
  6473. case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
  6474. if (off + size > offsetofend(struct __sk_buff, cb[4]))
  6475. return false;
  6476. break;
  6477. case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
  6478. case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
  6479. case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
  6480. case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
  6481. case bpf_ctx_range(struct __sk_buff, data):
  6482. case bpf_ctx_range(struct __sk_buff, data_meta):
  6483. case bpf_ctx_range(struct __sk_buff, data_end):
  6484. if (size != size_default)
  6485. return false;
  6486. break;
  6487. case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
  6488. return false;
  6489. case bpf_ctx_range(struct __sk_buff, tstamp):
  6490. if (size != sizeof(__u64))
  6491. return false;
  6492. break;
  6493. case offsetof(struct __sk_buff, sk):
  6494. if (type == BPF_WRITE || size != sizeof(__u64))
  6495. return false;
  6496. info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
  6497. break;
  6498. default:
  6499. /* Only narrow read access allowed for now. */
  6500. if (type == BPF_WRITE) {
  6501. if (size != size_default)
  6502. return false;
  6503. } else {
  6504. bpf_ctx_record_field_size(info, size_default);
  6505. if (!bpf_ctx_narrow_access_ok(off, size, size_default))
  6506. return false;
  6507. }
  6508. }
  6509. return true;
  6510. }
  6511. static bool sk_filter_is_valid_access(int off, int size,
  6512. enum bpf_access_type type,
  6513. const struct bpf_prog *prog,
  6514. struct bpf_insn_access_aux *info)
  6515. {
  6516. switch (off) {
  6517. case bpf_ctx_range(struct __sk_buff, tc_classid):
  6518. case bpf_ctx_range(struct __sk_buff, data):
  6519. case bpf_ctx_range(struct __sk_buff, data_meta):
  6520. case bpf_ctx_range(struct __sk_buff, data_end):
  6521. case bpf_ctx_range_till(struct __sk_buff, family, local_port):
  6522. case bpf_ctx_range(struct __sk_buff, tstamp):
  6523. case bpf_ctx_range(struct __sk_buff, wire_len):
  6524. return false;
  6525. }
  6526. if (type == BPF_WRITE) {
  6527. switch (off) {
  6528. case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
  6529. break;
  6530. default:
  6531. return false;
  6532. }
  6533. }
  6534. return bpf_skb_is_valid_access(off, size, type, prog, info);
  6535. }
  6536. static bool cg_skb_is_valid_access(int off, int size,
  6537. enum bpf_access_type type,
  6538. const struct bpf_prog *prog,
  6539. struct bpf_insn_access_aux *info)
  6540. {
  6541. switch (off) {
  6542. case bpf_ctx_range(struct __sk_buff, tc_classid):
  6543. case bpf_ctx_range(struct __sk_buff, data_meta):
  6544. case bpf_ctx_range(struct __sk_buff, wire_len):
  6545. return false;
  6546. case bpf_ctx_range(struct __sk_buff, data):
  6547. case bpf_ctx_range(struct __sk_buff, data_end):
  6548. if (!bpf_capable())
  6549. return false;
  6550. break;
  6551. }
  6552. if (type == BPF_WRITE) {
  6553. switch (off) {
  6554. case bpf_ctx_range(struct __sk_buff, mark):
  6555. case bpf_ctx_range(struct __sk_buff, priority):
  6556. case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
  6557. break;
  6558. case bpf_ctx_range(struct __sk_buff, tstamp):
  6559. if (!bpf_capable())
  6560. return false;
  6561. break;
  6562. default:
  6563. return false;
  6564. }
  6565. }
  6566. switch (off) {
  6567. case bpf_ctx_range(struct __sk_buff, data):
  6568. info->reg_type = PTR_TO_PACKET;
  6569. break;
  6570. case bpf_ctx_range(struct __sk_buff, data_end):
  6571. info->reg_type = PTR_TO_PACKET_END;
  6572. break;
  6573. }
  6574. return bpf_skb_is_valid_access(off, size, type, prog, info);
  6575. }
  6576. static bool lwt_is_valid_access(int off, int size,
  6577. enum bpf_access_type type,
  6578. const struct bpf_prog *prog,
  6579. struct bpf_insn_access_aux *info)
  6580. {
  6581. switch (off) {
  6582. case bpf_ctx_range(struct __sk_buff, tc_classid):
  6583. case bpf_ctx_range_till(struct __sk_buff, family, local_port):
  6584. case bpf_ctx_range(struct __sk_buff, data_meta):
  6585. case bpf_ctx_range(struct __sk_buff, tstamp):
  6586. case bpf_ctx_range(struct __sk_buff, wire_len):
  6587. return false;
  6588. }
  6589. if (type == BPF_WRITE) {
  6590. switch (off) {
  6591. case bpf_ctx_range(struct __sk_buff, mark):
  6592. case bpf_ctx_range(struct __sk_buff, priority):
  6593. case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
  6594. break;
  6595. default:
  6596. return false;
  6597. }
  6598. }
  6599. switch (off) {
  6600. case bpf_ctx_range(struct __sk_buff, data):
  6601. info->reg_type = PTR_TO_PACKET;
  6602. break;
  6603. case bpf_ctx_range(struct __sk_buff, data_end):
  6604. info->reg_type = PTR_TO_PACKET_END;
  6605. break;
  6606. }
  6607. return bpf_skb_is_valid_access(off, size, type, prog, info);
  6608. }
  6609. /* Attach type specific accesses */
  6610. static bool __sock_filter_check_attach_type(int off,
  6611. enum bpf_access_type access_type,
  6612. enum bpf_attach_type attach_type)
  6613. {
  6614. switch (off) {
  6615. case offsetof(struct bpf_sock, bound_dev_if):
  6616. case offsetof(struct bpf_sock, mark):
  6617. case offsetof(struct bpf_sock, priority):
  6618. switch (attach_type) {
  6619. case BPF_CGROUP_INET_SOCK_CREATE:
  6620. case BPF_CGROUP_INET_SOCK_RELEASE:
  6621. goto full_access;
  6622. default:
  6623. return false;
  6624. }
  6625. case bpf_ctx_range(struct bpf_sock, src_ip4):
  6626. switch (attach_type) {
  6627. case BPF_CGROUP_INET4_POST_BIND:
  6628. goto read_only;
  6629. default:
  6630. return false;
  6631. }
  6632. case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
  6633. switch (attach_type) {
  6634. case BPF_CGROUP_INET6_POST_BIND:
  6635. goto read_only;
  6636. default:
  6637. return false;
  6638. }
  6639. case bpf_ctx_range(struct bpf_sock, src_port):
  6640. switch (attach_type) {
  6641. case BPF_CGROUP_INET4_POST_BIND:
  6642. case BPF_CGROUP_INET6_POST_BIND:
  6643. goto read_only;
  6644. default:
  6645. return false;
  6646. }
  6647. }
  6648. read_only:
  6649. return access_type == BPF_READ;
  6650. full_access:
  6651. return true;
  6652. }
  6653. bool bpf_sock_common_is_valid_access(int off, int size,
  6654. enum bpf_access_type type,
  6655. struct bpf_insn_access_aux *info)
  6656. {
  6657. switch (off) {
  6658. case bpf_ctx_range_till(struct bpf_sock, type, priority):
  6659. return false;
  6660. default:
  6661. return bpf_sock_is_valid_access(off, size, type, info);
  6662. }
  6663. }
  6664. bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
  6665. struct bpf_insn_access_aux *info)
  6666. {
  6667. const int size_default = sizeof(__u32);
  6668. int field_size;
  6669. if (off < 0 || off >= sizeof(struct bpf_sock))
  6670. return false;
  6671. if (off % size != 0)
  6672. return false;
  6673. switch (off) {
  6674. case offsetof(struct bpf_sock, state):
  6675. case offsetof(struct bpf_sock, family):
  6676. case offsetof(struct bpf_sock, type):
  6677. case offsetof(struct bpf_sock, protocol):
  6678. case offsetof(struct bpf_sock, src_port):
  6679. case offsetof(struct bpf_sock, rx_queue_mapping):
  6680. case bpf_ctx_range(struct bpf_sock, src_ip4):
  6681. case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
  6682. case bpf_ctx_range(struct bpf_sock, dst_ip4):
  6683. case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
  6684. bpf_ctx_record_field_size(info, size_default);
  6685. return bpf_ctx_narrow_access_ok(off, size, size_default);
  6686. case bpf_ctx_range(struct bpf_sock, dst_port):
  6687. field_size = size == size_default ?
  6688. size_default : sizeof_field(struct bpf_sock, dst_port);
  6689. bpf_ctx_record_field_size(info, field_size);
  6690. return bpf_ctx_narrow_access_ok(off, size, field_size);
  6691. case offsetofend(struct bpf_sock, dst_port) ...
  6692. offsetof(struct bpf_sock, dst_ip4) - 1:
  6693. return false;
  6694. }
  6695. return size == size_default;
  6696. }
  6697. static bool sock_filter_is_valid_access(int off, int size,
  6698. enum bpf_access_type type,
  6699. const struct bpf_prog *prog,
  6700. struct bpf_insn_access_aux *info)
  6701. {
  6702. if (!bpf_sock_is_valid_access(off, size, type, info))
  6703. return false;
  6704. return __sock_filter_check_attach_type(off, type,
  6705. prog->expected_attach_type);
  6706. }
  6707. static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
  6708. const struct bpf_prog *prog)
  6709. {
  6710. /* Neither direct read nor direct write requires any preliminary
  6711. * action.
  6712. */
  6713. return 0;
  6714. }
  6715. static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
  6716. const struct bpf_prog *prog, int drop_verdict)
  6717. {
  6718. struct bpf_insn *insn = insn_buf;
  6719. if (!direct_write)
  6720. return 0;
  6721. /* if (!skb->cloned)
  6722. * goto start;
  6723. *
  6724. * (Fast-path, otherwise approximation that we might be
  6725. * a clone, do the rest in helper.)
  6726. */
  6727. *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
  6728. *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
  6729. *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
  6730. /* ret = bpf_skb_pull_data(skb, 0); */
  6731. *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  6732. *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
  6733. *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6734. BPF_FUNC_skb_pull_data);
  6735. /* if (!ret)
  6736. * goto restore;
  6737. * return TC_ACT_SHOT;
  6738. */
  6739. *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
  6740. *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
  6741. *insn++ = BPF_EXIT_INSN();
  6742. /* restore: */
  6743. *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
  6744. /* start: */
  6745. *insn++ = prog->insnsi[0];
  6746. return insn - insn_buf;
  6747. }
  6748. static int bpf_gen_ld_abs(const struct bpf_insn *orig,
  6749. struct bpf_insn *insn_buf)
  6750. {
  6751. bool indirect = BPF_MODE(orig->code) == BPF_IND;
  6752. struct bpf_insn *insn = insn_buf;
  6753. if (!indirect) {
  6754. *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
  6755. } else {
  6756. *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
  6757. if (orig->imm)
  6758. *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
  6759. }
  6760. /* We're guaranteed here that CTX is in R6. */
  6761. *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
  6762. switch (BPF_SIZE(orig->code)) {
  6763. case BPF_B:
  6764. *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
  6765. break;
  6766. case BPF_H:
  6767. *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
  6768. break;
  6769. case BPF_W:
  6770. *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
  6771. break;
  6772. }
  6773. *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
  6774. *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
  6775. *insn++ = BPF_EXIT_INSN();
  6776. return insn - insn_buf;
  6777. }
  6778. static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
  6779. const struct bpf_prog *prog)
  6780. {
  6781. return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
  6782. }
  6783. static bool tc_cls_act_is_valid_access(int off, int size,
  6784. enum bpf_access_type type,
  6785. const struct bpf_prog *prog,
  6786. struct bpf_insn_access_aux *info)
  6787. {
  6788. if (type == BPF_WRITE) {
  6789. switch (off) {
  6790. case bpf_ctx_range(struct __sk_buff, mark):
  6791. case bpf_ctx_range(struct __sk_buff, tc_index):
  6792. case bpf_ctx_range(struct __sk_buff, priority):
  6793. case bpf_ctx_range(struct __sk_buff, tc_classid):
  6794. case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
  6795. case bpf_ctx_range(struct __sk_buff, tstamp):
  6796. case bpf_ctx_range(struct __sk_buff, queue_mapping):
  6797. break;
  6798. default:
  6799. return false;
  6800. }
  6801. }
  6802. switch (off) {
  6803. case bpf_ctx_range(struct __sk_buff, data):
  6804. info->reg_type = PTR_TO_PACKET;
  6805. break;
  6806. case bpf_ctx_range(struct __sk_buff, data_meta):
  6807. info->reg_type = PTR_TO_PACKET_META;
  6808. break;
  6809. case bpf_ctx_range(struct __sk_buff, data_end):
  6810. info->reg_type = PTR_TO_PACKET_END;
  6811. break;
  6812. case bpf_ctx_range_till(struct __sk_buff, family, local_port):
  6813. return false;
  6814. }
  6815. return bpf_skb_is_valid_access(off, size, type, prog, info);
  6816. }
  6817. static bool __is_valid_xdp_access(int off, int size)
  6818. {
  6819. if (off < 0 || off >= sizeof(struct xdp_md))
  6820. return false;
  6821. if (off % size != 0)
  6822. return false;
  6823. if (size != sizeof(__u32))
  6824. return false;
  6825. return true;
  6826. }
  6827. static bool xdp_is_valid_access(int off, int size,
  6828. enum bpf_access_type type,
  6829. const struct bpf_prog *prog,
  6830. struct bpf_insn_access_aux *info)
  6831. {
  6832. if (prog->expected_attach_type != BPF_XDP_DEVMAP) {
  6833. switch (off) {
  6834. case offsetof(struct xdp_md, egress_ifindex):
  6835. return false;
  6836. }
  6837. }
  6838. if (type == BPF_WRITE) {
  6839. if (bpf_prog_is_dev_bound(prog->aux)) {
  6840. switch (off) {
  6841. case offsetof(struct xdp_md, rx_queue_index):
  6842. return __is_valid_xdp_access(off, size);
  6843. }
  6844. }
  6845. return false;
  6846. }
  6847. switch (off) {
  6848. case offsetof(struct xdp_md, data):
  6849. info->reg_type = PTR_TO_PACKET;
  6850. break;
  6851. case offsetof(struct xdp_md, data_meta):
  6852. info->reg_type = PTR_TO_PACKET_META;
  6853. break;
  6854. case offsetof(struct xdp_md, data_end):
  6855. info->reg_type = PTR_TO_PACKET_END;
  6856. break;
  6857. }
  6858. return __is_valid_xdp_access(off, size);
  6859. }
  6860. void bpf_warn_invalid_xdp_action(u32 act)
  6861. {
  6862. const u32 act_max = XDP_REDIRECT;
  6863. pr_warn_once("%s XDP return value %u, expect packet loss!\n",
  6864. act > act_max ? "Illegal" : "Driver unsupported",
  6865. act);
  6866. }
  6867. EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
  6868. static bool sock_addr_is_valid_access(int off, int size,
  6869. enum bpf_access_type type,
  6870. const struct bpf_prog *prog,
  6871. struct bpf_insn_access_aux *info)
  6872. {
  6873. const int size_default = sizeof(__u32);
  6874. if (off < 0 || off >= sizeof(struct bpf_sock_addr))
  6875. return false;
  6876. if (off % size != 0)
  6877. return false;
  6878. /* Disallow access to IPv6 fields from IPv4 contex and vise
  6879. * versa.
  6880. */
  6881. switch (off) {
  6882. case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
  6883. switch (prog->expected_attach_type) {
  6884. case BPF_CGROUP_INET4_BIND:
  6885. case BPF_CGROUP_INET4_CONNECT:
  6886. case BPF_CGROUP_INET4_GETPEERNAME:
  6887. case BPF_CGROUP_INET4_GETSOCKNAME:
  6888. case BPF_CGROUP_UDP4_SENDMSG:
  6889. case BPF_CGROUP_UDP4_RECVMSG:
  6890. break;
  6891. default:
  6892. return false;
  6893. }
  6894. break;
  6895. case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
  6896. switch (prog->expected_attach_type) {
  6897. case BPF_CGROUP_INET6_BIND:
  6898. case BPF_CGROUP_INET6_CONNECT:
  6899. case BPF_CGROUP_INET6_GETPEERNAME:
  6900. case BPF_CGROUP_INET6_GETSOCKNAME:
  6901. case BPF_CGROUP_UDP6_SENDMSG:
  6902. case BPF_CGROUP_UDP6_RECVMSG:
  6903. break;
  6904. default:
  6905. return false;
  6906. }
  6907. break;
  6908. case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
  6909. switch (prog->expected_attach_type) {
  6910. case BPF_CGROUP_UDP4_SENDMSG:
  6911. break;
  6912. default:
  6913. return false;
  6914. }
  6915. break;
  6916. case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
  6917. msg_src_ip6[3]):
  6918. switch (prog->expected_attach_type) {
  6919. case BPF_CGROUP_UDP6_SENDMSG:
  6920. break;
  6921. default:
  6922. return false;
  6923. }
  6924. break;
  6925. }
  6926. switch (off) {
  6927. case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
  6928. case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
  6929. case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
  6930. case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
  6931. msg_src_ip6[3]):
  6932. case bpf_ctx_range(struct bpf_sock_addr, user_port):
  6933. if (type == BPF_READ) {
  6934. bpf_ctx_record_field_size(info, size_default);
  6935. if (bpf_ctx_wide_access_ok(off, size,
  6936. struct bpf_sock_addr,
  6937. user_ip6))
  6938. return true;
  6939. if (bpf_ctx_wide_access_ok(off, size,
  6940. struct bpf_sock_addr,
  6941. msg_src_ip6))
  6942. return true;
  6943. if (!bpf_ctx_narrow_access_ok(off, size, size_default))
  6944. return false;
  6945. } else {
  6946. if (bpf_ctx_wide_access_ok(off, size,
  6947. struct bpf_sock_addr,
  6948. user_ip6))
  6949. return true;
  6950. if (bpf_ctx_wide_access_ok(off, size,
  6951. struct bpf_sock_addr,
  6952. msg_src_ip6))
  6953. return true;
  6954. if (size != size_default)
  6955. return false;
  6956. }
  6957. break;
  6958. case offsetof(struct bpf_sock_addr, sk):
  6959. if (type != BPF_READ)
  6960. return false;
  6961. if (size != sizeof(__u64))
  6962. return false;
  6963. info->reg_type = PTR_TO_SOCKET;
  6964. break;
  6965. default:
  6966. if (type == BPF_READ) {
  6967. if (size != size_default)
  6968. return false;
  6969. } else {
  6970. return false;
  6971. }
  6972. }
  6973. return true;
  6974. }
  6975. static bool sock_ops_is_valid_access(int off, int size,
  6976. enum bpf_access_type type,
  6977. const struct bpf_prog *prog,
  6978. struct bpf_insn_access_aux *info)
  6979. {
  6980. const int size_default = sizeof(__u32);
  6981. if (off < 0 || off >= sizeof(struct bpf_sock_ops))
  6982. return false;
  6983. /* The verifier guarantees that size > 0. */
  6984. if (off % size != 0)
  6985. return false;
  6986. if (type == BPF_WRITE) {
  6987. switch (off) {
  6988. case offsetof(struct bpf_sock_ops, reply):
  6989. case offsetof(struct bpf_sock_ops, sk_txhash):
  6990. if (size != size_default)
  6991. return false;
  6992. break;
  6993. default:
  6994. return false;
  6995. }
  6996. } else {
  6997. switch (off) {
  6998. case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
  6999. bytes_acked):
  7000. if (size != sizeof(__u64))
  7001. return false;
  7002. break;
  7003. case offsetof(struct bpf_sock_ops, sk):
  7004. if (size != sizeof(__u64))
  7005. return false;
  7006. info->reg_type = PTR_TO_SOCKET_OR_NULL;
  7007. break;
  7008. case offsetof(struct bpf_sock_ops, skb_data):
  7009. if (size != sizeof(__u64))
  7010. return false;
  7011. info->reg_type = PTR_TO_PACKET;
  7012. break;
  7013. case offsetof(struct bpf_sock_ops, skb_data_end):
  7014. if (size != sizeof(__u64))
  7015. return false;
  7016. info->reg_type = PTR_TO_PACKET_END;
  7017. break;
  7018. case offsetof(struct bpf_sock_ops, skb_tcp_flags):
  7019. bpf_ctx_record_field_size(info, size_default);
  7020. return bpf_ctx_narrow_access_ok(off, size,
  7021. size_default);
  7022. default:
  7023. if (size != size_default)
  7024. return false;
  7025. break;
  7026. }
  7027. }
  7028. return true;
  7029. }
  7030. static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
  7031. const struct bpf_prog *prog)
  7032. {
  7033. return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
  7034. }
  7035. static bool sk_skb_is_valid_access(int off, int size,
  7036. enum bpf_access_type type,
  7037. const struct bpf_prog *prog,
  7038. struct bpf_insn_access_aux *info)
  7039. {
  7040. switch (off) {
  7041. case bpf_ctx_range(struct __sk_buff, tc_classid):
  7042. case bpf_ctx_range(struct __sk_buff, data_meta):
  7043. case bpf_ctx_range(struct __sk_buff, tstamp):
  7044. case bpf_ctx_range(struct __sk_buff, wire_len):
  7045. return false;
  7046. }
  7047. if (type == BPF_WRITE) {
  7048. switch (off) {
  7049. case bpf_ctx_range(struct __sk_buff, tc_index):
  7050. case bpf_ctx_range(struct __sk_buff, priority):
  7051. break;
  7052. default:
  7053. return false;
  7054. }
  7055. }
  7056. switch (off) {
  7057. case bpf_ctx_range(struct __sk_buff, mark):
  7058. return false;
  7059. case bpf_ctx_range(struct __sk_buff, data):
  7060. info->reg_type = PTR_TO_PACKET;
  7061. break;
  7062. case bpf_ctx_range(struct __sk_buff, data_end):
  7063. info->reg_type = PTR_TO_PACKET_END;
  7064. break;
  7065. }
  7066. return bpf_skb_is_valid_access(off, size, type, prog, info);
  7067. }
  7068. static bool sk_msg_is_valid_access(int off, int size,
  7069. enum bpf_access_type type,
  7070. const struct bpf_prog *prog,
  7071. struct bpf_insn_access_aux *info)
  7072. {
  7073. if (type == BPF_WRITE)
  7074. return false;
  7075. if (off % size != 0)
  7076. return false;
  7077. switch (off) {
  7078. case offsetof(struct sk_msg_md, data):
  7079. info->reg_type = PTR_TO_PACKET;
  7080. if (size != sizeof(__u64))
  7081. return false;
  7082. break;
  7083. case offsetof(struct sk_msg_md, data_end):
  7084. info->reg_type = PTR_TO_PACKET_END;
  7085. if (size != sizeof(__u64))
  7086. return false;
  7087. break;
  7088. case offsetof(struct sk_msg_md, sk):
  7089. if (size != sizeof(__u64))
  7090. return false;
  7091. info->reg_type = PTR_TO_SOCKET;
  7092. break;
  7093. case bpf_ctx_range(struct sk_msg_md, family):
  7094. case bpf_ctx_range(struct sk_msg_md, remote_ip4):
  7095. case bpf_ctx_range(struct sk_msg_md, local_ip4):
  7096. case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
  7097. case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
  7098. case bpf_ctx_range(struct sk_msg_md, remote_port):
  7099. case bpf_ctx_range(struct sk_msg_md, local_port):
  7100. case bpf_ctx_range(struct sk_msg_md, size):
  7101. if (size != sizeof(__u32))
  7102. return false;
  7103. break;
  7104. default:
  7105. return false;
  7106. }
  7107. return true;
  7108. }
  7109. static bool flow_dissector_is_valid_access(int off, int size,
  7110. enum bpf_access_type type,
  7111. const struct bpf_prog *prog,
  7112. struct bpf_insn_access_aux *info)
  7113. {
  7114. const int size_default = sizeof(__u32);
  7115. if (off < 0 || off >= sizeof(struct __sk_buff))
  7116. return false;
  7117. if (type == BPF_WRITE)
  7118. return false;
  7119. switch (off) {
  7120. case bpf_ctx_range(struct __sk_buff, data):
  7121. if (size != size_default)
  7122. return false;
  7123. info->reg_type = PTR_TO_PACKET;
  7124. return true;
  7125. case bpf_ctx_range(struct __sk_buff, data_end):
  7126. if (size != size_default)
  7127. return false;
  7128. info->reg_type = PTR_TO_PACKET_END;
  7129. return true;
  7130. case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
  7131. if (size != sizeof(__u64))
  7132. return false;
  7133. info->reg_type = PTR_TO_FLOW_KEYS;
  7134. return true;
  7135. default:
  7136. return false;
  7137. }
  7138. }
  7139. static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
  7140. const struct bpf_insn *si,
  7141. struct bpf_insn *insn_buf,
  7142. struct bpf_prog *prog,
  7143. u32 *target_size)
  7144. {
  7145. struct bpf_insn *insn = insn_buf;
  7146. switch (si->off) {
  7147. case offsetof(struct __sk_buff, data):
  7148. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
  7149. si->dst_reg, si->src_reg,
  7150. offsetof(struct bpf_flow_dissector, data));
  7151. break;
  7152. case offsetof(struct __sk_buff, data_end):
  7153. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
  7154. si->dst_reg, si->src_reg,
  7155. offsetof(struct bpf_flow_dissector, data_end));
  7156. break;
  7157. case offsetof(struct __sk_buff, flow_keys):
  7158. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
  7159. si->dst_reg, si->src_reg,
  7160. offsetof(struct bpf_flow_dissector, flow_keys));
  7161. break;
  7162. }
  7163. return insn - insn_buf;
  7164. }
  7165. static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si,
  7166. struct bpf_insn *insn)
  7167. {
  7168. /* si->dst_reg = skb_shinfo(SKB); */
  7169. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  7170. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
  7171. BPF_REG_AX, si->src_reg,
  7172. offsetof(struct sk_buff, end));
  7173. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
  7174. si->dst_reg, si->src_reg,
  7175. offsetof(struct sk_buff, head));
  7176. *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
  7177. #else
  7178. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
  7179. si->dst_reg, si->src_reg,
  7180. offsetof(struct sk_buff, end));
  7181. #endif
  7182. return insn;
  7183. }
  7184. static u32 bpf_convert_ctx_access(enum bpf_access_type type,
  7185. const struct bpf_insn *si,
  7186. struct bpf_insn *insn_buf,
  7187. struct bpf_prog *prog, u32 *target_size)
  7188. {
  7189. struct bpf_insn *insn = insn_buf;
  7190. int off;
  7191. switch (si->off) {
  7192. case offsetof(struct __sk_buff, len):
  7193. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7194. bpf_target_off(struct sk_buff, len, 4,
  7195. target_size));
  7196. break;
  7197. case offsetof(struct __sk_buff, protocol):
  7198. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  7199. bpf_target_off(struct sk_buff, protocol, 2,
  7200. target_size));
  7201. break;
  7202. case offsetof(struct __sk_buff, vlan_proto):
  7203. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  7204. bpf_target_off(struct sk_buff, vlan_proto, 2,
  7205. target_size));
  7206. break;
  7207. case offsetof(struct __sk_buff, priority):
  7208. if (type == BPF_WRITE)
  7209. *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7210. bpf_target_off(struct sk_buff, priority, 4,
  7211. target_size));
  7212. else
  7213. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7214. bpf_target_off(struct sk_buff, priority, 4,
  7215. target_size));
  7216. break;
  7217. case offsetof(struct __sk_buff, ingress_ifindex):
  7218. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7219. bpf_target_off(struct sk_buff, skb_iif, 4,
  7220. target_size));
  7221. break;
  7222. case offsetof(struct __sk_buff, ifindex):
  7223. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
  7224. si->dst_reg, si->src_reg,
  7225. offsetof(struct sk_buff, dev));
  7226. *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
  7227. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7228. bpf_target_off(struct net_device, ifindex, 4,
  7229. target_size));
  7230. break;
  7231. case offsetof(struct __sk_buff, hash):
  7232. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7233. bpf_target_off(struct sk_buff, hash, 4,
  7234. target_size));
  7235. break;
  7236. case offsetof(struct __sk_buff, mark):
  7237. if (type == BPF_WRITE)
  7238. *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7239. bpf_target_off(struct sk_buff, mark, 4,
  7240. target_size));
  7241. else
  7242. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7243. bpf_target_off(struct sk_buff, mark, 4,
  7244. target_size));
  7245. break;
  7246. case offsetof(struct __sk_buff, pkt_type):
  7247. *target_size = 1;
  7248. *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
  7249. PKT_TYPE_OFFSET());
  7250. *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
  7251. #ifdef __BIG_ENDIAN_BITFIELD
  7252. *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
  7253. #endif
  7254. break;
  7255. case offsetof(struct __sk_buff, queue_mapping):
  7256. if (type == BPF_WRITE) {
  7257. *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
  7258. *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
  7259. bpf_target_off(struct sk_buff,
  7260. queue_mapping,
  7261. 2, target_size));
  7262. } else {
  7263. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  7264. bpf_target_off(struct sk_buff,
  7265. queue_mapping,
  7266. 2, target_size));
  7267. }
  7268. break;
  7269. case offsetof(struct __sk_buff, vlan_present):
  7270. *target_size = 1;
  7271. *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
  7272. PKT_VLAN_PRESENT_OFFSET());
  7273. if (PKT_VLAN_PRESENT_BIT)
  7274. *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
  7275. if (PKT_VLAN_PRESENT_BIT < 7)
  7276. *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
  7277. break;
  7278. case offsetof(struct __sk_buff, vlan_tci):
  7279. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  7280. bpf_target_off(struct sk_buff, vlan_tci, 2,
  7281. target_size));
  7282. break;
  7283. case offsetof(struct __sk_buff, cb[0]) ...
  7284. offsetofend(struct __sk_buff, cb[4]) - 1:
  7285. BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20);
  7286. BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
  7287. offsetof(struct qdisc_skb_cb, data)) %
  7288. sizeof(__u64));
  7289. prog->cb_access = 1;
  7290. off = si->off;
  7291. off -= offsetof(struct __sk_buff, cb[0]);
  7292. off += offsetof(struct sk_buff, cb);
  7293. off += offsetof(struct qdisc_skb_cb, data);
  7294. if (type == BPF_WRITE)
  7295. *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
  7296. si->src_reg, off);
  7297. else
  7298. *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
  7299. si->src_reg, off);
  7300. break;
  7301. case offsetof(struct __sk_buff, tc_classid):
  7302. BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2);
  7303. off = si->off;
  7304. off -= offsetof(struct __sk_buff, tc_classid);
  7305. off += offsetof(struct sk_buff, cb);
  7306. off += offsetof(struct qdisc_skb_cb, tc_classid);
  7307. *target_size = 2;
  7308. if (type == BPF_WRITE)
  7309. *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
  7310. si->src_reg, off);
  7311. else
  7312. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
  7313. si->src_reg, off);
  7314. break;
  7315. case offsetof(struct __sk_buff, data):
  7316. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
  7317. si->dst_reg, si->src_reg,
  7318. offsetof(struct sk_buff, data));
  7319. break;
  7320. case offsetof(struct __sk_buff, data_meta):
  7321. off = si->off;
  7322. off -= offsetof(struct __sk_buff, data_meta);
  7323. off += offsetof(struct sk_buff, cb);
  7324. off += offsetof(struct bpf_skb_data_end, data_meta);
  7325. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
  7326. si->src_reg, off);
  7327. break;
  7328. case offsetof(struct __sk_buff, data_end):
  7329. off = si->off;
  7330. off -= offsetof(struct __sk_buff, data_end);
  7331. off += offsetof(struct sk_buff, cb);
  7332. off += offsetof(struct bpf_skb_data_end, data_end);
  7333. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
  7334. si->src_reg, off);
  7335. break;
  7336. case offsetof(struct __sk_buff, tc_index):
  7337. #ifdef CONFIG_NET_SCHED
  7338. if (type == BPF_WRITE)
  7339. *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
  7340. bpf_target_off(struct sk_buff, tc_index, 2,
  7341. target_size));
  7342. else
  7343. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  7344. bpf_target_off(struct sk_buff, tc_index, 2,
  7345. target_size));
  7346. #else
  7347. *target_size = 2;
  7348. if (type == BPF_WRITE)
  7349. *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
  7350. else
  7351. *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
  7352. #endif
  7353. break;
  7354. case offsetof(struct __sk_buff, napi_id):
  7355. #if defined(CONFIG_NET_RX_BUSY_POLL)
  7356. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7357. bpf_target_off(struct sk_buff, napi_id, 4,
  7358. target_size));
  7359. *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
  7360. *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
  7361. #else
  7362. *target_size = 4;
  7363. *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
  7364. #endif
  7365. break;
  7366. case offsetof(struct __sk_buff, family):
  7367. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
  7368. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7369. si->dst_reg, si->src_reg,
  7370. offsetof(struct sk_buff, sk));
  7371. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  7372. bpf_target_off(struct sock_common,
  7373. skc_family,
  7374. 2, target_size));
  7375. break;
  7376. case offsetof(struct __sk_buff, remote_ip4):
  7377. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
  7378. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7379. si->dst_reg, si->src_reg,
  7380. offsetof(struct sk_buff, sk));
  7381. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7382. bpf_target_off(struct sock_common,
  7383. skc_daddr,
  7384. 4, target_size));
  7385. break;
  7386. case offsetof(struct __sk_buff, local_ip4):
  7387. BUILD_BUG_ON(sizeof_field(struct sock_common,
  7388. skc_rcv_saddr) != 4);
  7389. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7390. si->dst_reg, si->src_reg,
  7391. offsetof(struct sk_buff, sk));
  7392. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7393. bpf_target_off(struct sock_common,
  7394. skc_rcv_saddr,
  7395. 4, target_size));
  7396. break;
  7397. case offsetof(struct __sk_buff, remote_ip6[0]) ...
  7398. offsetof(struct __sk_buff, remote_ip6[3]):
  7399. #if IS_ENABLED(CONFIG_IPV6)
  7400. BUILD_BUG_ON(sizeof_field(struct sock_common,
  7401. skc_v6_daddr.s6_addr32[0]) != 4);
  7402. off = si->off;
  7403. off -= offsetof(struct __sk_buff, remote_ip6[0]);
  7404. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7405. si->dst_reg, si->src_reg,
  7406. offsetof(struct sk_buff, sk));
  7407. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7408. offsetof(struct sock_common,
  7409. skc_v6_daddr.s6_addr32[0]) +
  7410. off);
  7411. #else
  7412. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  7413. #endif
  7414. break;
  7415. case offsetof(struct __sk_buff, local_ip6[0]) ...
  7416. offsetof(struct __sk_buff, local_ip6[3]):
  7417. #if IS_ENABLED(CONFIG_IPV6)
  7418. BUILD_BUG_ON(sizeof_field(struct sock_common,
  7419. skc_v6_rcv_saddr.s6_addr32[0]) != 4);
  7420. off = si->off;
  7421. off -= offsetof(struct __sk_buff, local_ip6[0]);
  7422. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7423. si->dst_reg, si->src_reg,
  7424. offsetof(struct sk_buff, sk));
  7425. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7426. offsetof(struct sock_common,
  7427. skc_v6_rcv_saddr.s6_addr32[0]) +
  7428. off);
  7429. #else
  7430. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  7431. #endif
  7432. break;
  7433. case offsetof(struct __sk_buff, remote_port):
  7434. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
  7435. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7436. si->dst_reg, si->src_reg,
  7437. offsetof(struct sk_buff, sk));
  7438. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  7439. bpf_target_off(struct sock_common,
  7440. skc_dport,
  7441. 2, target_size));
  7442. #ifndef __BIG_ENDIAN_BITFIELD
  7443. *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
  7444. #endif
  7445. break;
  7446. case offsetof(struct __sk_buff, local_port):
  7447. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
  7448. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7449. si->dst_reg, si->src_reg,
  7450. offsetof(struct sk_buff, sk));
  7451. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  7452. bpf_target_off(struct sock_common,
  7453. skc_num, 2, target_size));
  7454. break;
  7455. case offsetof(struct __sk_buff, tstamp):
  7456. BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8);
  7457. if (type == BPF_WRITE)
  7458. *insn++ = BPF_STX_MEM(BPF_DW,
  7459. si->dst_reg, si->src_reg,
  7460. bpf_target_off(struct sk_buff,
  7461. tstamp, 8,
  7462. target_size));
  7463. else
  7464. *insn++ = BPF_LDX_MEM(BPF_DW,
  7465. si->dst_reg, si->src_reg,
  7466. bpf_target_off(struct sk_buff,
  7467. tstamp, 8,
  7468. target_size));
  7469. break;
  7470. case offsetof(struct __sk_buff, gso_segs):
  7471. insn = bpf_convert_shinfo_access(si, insn);
  7472. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
  7473. si->dst_reg, si->dst_reg,
  7474. bpf_target_off(struct skb_shared_info,
  7475. gso_segs, 2,
  7476. target_size));
  7477. break;
  7478. case offsetof(struct __sk_buff, gso_size):
  7479. insn = bpf_convert_shinfo_access(si, insn);
  7480. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size),
  7481. si->dst_reg, si->dst_reg,
  7482. bpf_target_off(struct skb_shared_info,
  7483. gso_size, 2,
  7484. target_size));
  7485. break;
  7486. case offsetof(struct __sk_buff, wire_len):
  7487. BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
  7488. off = si->off;
  7489. off -= offsetof(struct __sk_buff, wire_len);
  7490. off += offsetof(struct sk_buff, cb);
  7491. off += offsetof(struct qdisc_skb_cb, pkt_len);
  7492. *target_size = 4;
  7493. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
  7494. break;
  7495. case offsetof(struct __sk_buff, sk):
  7496. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
  7497. si->dst_reg, si->src_reg,
  7498. offsetof(struct sk_buff, sk));
  7499. break;
  7500. }
  7501. return insn - insn_buf;
  7502. }
  7503. u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
  7504. const struct bpf_insn *si,
  7505. struct bpf_insn *insn_buf,
  7506. struct bpf_prog *prog, u32 *target_size)
  7507. {
  7508. struct bpf_insn *insn = insn_buf;
  7509. int off;
  7510. switch (si->off) {
  7511. case offsetof(struct bpf_sock, bound_dev_if):
  7512. BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
  7513. if (type == BPF_WRITE)
  7514. *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7515. offsetof(struct sock, sk_bound_dev_if));
  7516. else
  7517. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7518. offsetof(struct sock, sk_bound_dev_if));
  7519. break;
  7520. case offsetof(struct bpf_sock, mark):
  7521. BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
  7522. if (type == BPF_WRITE)
  7523. *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7524. offsetof(struct sock, sk_mark));
  7525. else
  7526. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7527. offsetof(struct sock, sk_mark));
  7528. break;
  7529. case offsetof(struct bpf_sock, priority):
  7530. BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
  7531. if (type == BPF_WRITE)
  7532. *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7533. offsetof(struct sock, sk_priority));
  7534. else
  7535. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  7536. offsetof(struct sock, sk_priority));
  7537. break;
  7538. case offsetof(struct bpf_sock, family):
  7539. *insn++ = BPF_LDX_MEM(
  7540. BPF_FIELD_SIZEOF(struct sock_common, skc_family),
  7541. si->dst_reg, si->src_reg,
  7542. bpf_target_off(struct sock_common,
  7543. skc_family,
  7544. sizeof_field(struct sock_common,
  7545. skc_family),
  7546. target_size));
  7547. break;
  7548. case offsetof(struct bpf_sock, type):
  7549. *insn++ = BPF_LDX_MEM(
  7550. BPF_FIELD_SIZEOF(struct sock, sk_type),
  7551. si->dst_reg, si->src_reg,
  7552. bpf_target_off(struct sock, sk_type,
  7553. sizeof_field(struct sock, sk_type),
  7554. target_size));
  7555. break;
  7556. case offsetof(struct bpf_sock, protocol):
  7557. *insn++ = BPF_LDX_MEM(
  7558. BPF_FIELD_SIZEOF(struct sock, sk_protocol),
  7559. si->dst_reg, si->src_reg,
  7560. bpf_target_off(struct sock, sk_protocol,
  7561. sizeof_field(struct sock, sk_protocol),
  7562. target_size));
  7563. break;
  7564. case offsetof(struct bpf_sock, src_ip4):
  7565. *insn++ = BPF_LDX_MEM(
  7566. BPF_SIZE(si->code), si->dst_reg, si->src_reg,
  7567. bpf_target_off(struct sock_common, skc_rcv_saddr,
  7568. sizeof_field(struct sock_common,
  7569. skc_rcv_saddr),
  7570. target_size));
  7571. break;
  7572. case offsetof(struct bpf_sock, dst_ip4):
  7573. *insn++ = BPF_LDX_MEM(
  7574. BPF_SIZE(si->code), si->dst_reg, si->src_reg,
  7575. bpf_target_off(struct sock_common, skc_daddr,
  7576. sizeof_field(struct sock_common,
  7577. skc_daddr),
  7578. target_size));
  7579. break;
  7580. case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
  7581. #if IS_ENABLED(CONFIG_IPV6)
  7582. off = si->off;
  7583. off -= offsetof(struct bpf_sock, src_ip6[0]);
  7584. *insn++ = BPF_LDX_MEM(
  7585. BPF_SIZE(si->code), si->dst_reg, si->src_reg,
  7586. bpf_target_off(
  7587. struct sock_common,
  7588. skc_v6_rcv_saddr.s6_addr32[0],
  7589. sizeof_field(struct sock_common,
  7590. skc_v6_rcv_saddr.s6_addr32[0]),
  7591. target_size) + off);
  7592. #else
  7593. (void)off;
  7594. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  7595. #endif
  7596. break;
  7597. case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
  7598. #if IS_ENABLED(CONFIG_IPV6)
  7599. off = si->off;
  7600. off -= offsetof(struct bpf_sock, dst_ip6[0]);
  7601. *insn++ = BPF_LDX_MEM(
  7602. BPF_SIZE(si->code), si->dst_reg, si->src_reg,
  7603. bpf_target_off(struct sock_common,
  7604. skc_v6_daddr.s6_addr32[0],
  7605. sizeof_field(struct sock_common,
  7606. skc_v6_daddr.s6_addr32[0]),
  7607. target_size) + off);
  7608. #else
  7609. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  7610. *target_size = 4;
  7611. #endif
  7612. break;
  7613. case offsetof(struct bpf_sock, src_port):
  7614. *insn++ = BPF_LDX_MEM(
  7615. BPF_FIELD_SIZEOF(struct sock_common, skc_num),
  7616. si->dst_reg, si->src_reg,
  7617. bpf_target_off(struct sock_common, skc_num,
  7618. sizeof_field(struct sock_common,
  7619. skc_num),
  7620. target_size));
  7621. break;
  7622. case offsetof(struct bpf_sock, dst_port):
  7623. *insn++ = BPF_LDX_MEM(
  7624. BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
  7625. si->dst_reg, si->src_reg,
  7626. bpf_target_off(struct sock_common, skc_dport,
  7627. sizeof_field(struct sock_common,
  7628. skc_dport),
  7629. target_size));
  7630. break;
  7631. case offsetof(struct bpf_sock, state):
  7632. *insn++ = BPF_LDX_MEM(
  7633. BPF_FIELD_SIZEOF(struct sock_common, skc_state),
  7634. si->dst_reg, si->src_reg,
  7635. bpf_target_off(struct sock_common, skc_state,
  7636. sizeof_field(struct sock_common,
  7637. skc_state),
  7638. target_size));
  7639. break;
  7640. case offsetof(struct bpf_sock, rx_queue_mapping):
  7641. #ifdef CONFIG_XPS
  7642. *insn++ = BPF_LDX_MEM(
  7643. BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping),
  7644. si->dst_reg, si->src_reg,
  7645. bpf_target_off(struct sock, sk_rx_queue_mapping,
  7646. sizeof_field(struct sock,
  7647. sk_rx_queue_mapping),
  7648. target_size));
  7649. *insn++ = BPF_JMP_IMM(BPF_JNE, si->dst_reg, NO_QUEUE_MAPPING,
  7650. 1);
  7651. *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
  7652. #else
  7653. *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
  7654. *target_size = 2;
  7655. #endif
  7656. break;
  7657. }
  7658. return insn - insn_buf;
  7659. }
  7660. static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
  7661. const struct bpf_insn *si,
  7662. struct bpf_insn *insn_buf,
  7663. struct bpf_prog *prog, u32 *target_size)
  7664. {
  7665. struct bpf_insn *insn = insn_buf;
  7666. switch (si->off) {
  7667. case offsetof(struct __sk_buff, ifindex):
  7668. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
  7669. si->dst_reg, si->src_reg,
  7670. offsetof(struct sk_buff, dev));
  7671. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7672. bpf_target_off(struct net_device, ifindex, 4,
  7673. target_size));
  7674. break;
  7675. default:
  7676. return bpf_convert_ctx_access(type, si, insn_buf, prog,
  7677. target_size);
  7678. }
  7679. return insn - insn_buf;
  7680. }
  7681. static u32 xdp_convert_ctx_access(enum bpf_access_type type,
  7682. const struct bpf_insn *si,
  7683. struct bpf_insn *insn_buf,
  7684. struct bpf_prog *prog, u32 *target_size)
  7685. {
  7686. struct bpf_insn *insn = insn_buf;
  7687. switch (si->off) {
  7688. case offsetof(struct xdp_md, data):
  7689. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
  7690. si->dst_reg, si->src_reg,
  7691. offsetof(struct xdp_buff, data));
  7692. break;
  7693. case offsetof(struct xdp_md, data_meta):
  7694. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
  7695. si->dst_reg, si->src_reg,
  7696. offsetof(struct xdp_buff, data_meta));
  7697. break;
  7698. case offsetof(struct xdp_md, data_end):
  7699. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
  7700. si->dst_reg, si->src_reg,
  7701. offsetof(struct xdp_buff, data_end));
  7702. break;
  7703. case offsetof(struct xdp_md, ingress_ifindex):
  7704. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
  7705. si->dst_reg, si->src_reg,
  7706. offsetof(struct xdp_buff, rxq));
  7707. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
  7708. si->dst_reg, si->dst_reg,
  7709. offsetof(struct xdp_rxq_info, dev));
  7710. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7711. offsetof(struct net_device, ifindex));
  7712. break;
  7713. case offsetof(struct xdp_md, rx_queue_index):
  7714. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
  7715. si->dst_reg, si->src_reg,
  7716. offsetof(struct xdp_buff, rxq));
  7717. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7718. offsetof(struct xdp_rxq_info,
  7719. queue_index));
  7720. break;
  7721. case offsetof(struct xdp_md, egress_ifindex):
  7722. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, txq),
  7723. si->dst_reg, si->src_reg,
  7724. offsetof(struct xdp_buff, txq));
  7725. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_txq_info, dev),
  7726. si->dst_reg, si->dst_reg,
  7727. offsetof(struct xdp_txq_info, dev));
  7728. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  7729. offsetof(struct net_device, ifindex));
  7730. break;
  7731. }
  7732. return insn - insn_buf;
  7733. }
  7734. /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
  7735. * context Structure, F is Field in context structure that contains a pointer
  7736. * to Nested Structure of type NS that has the field NF.
  7737. *
  7738. * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
  7739. * sure that SIZE is not greater than actual size of S.F.NF.
  7740. *
  7741. * If offset OFF is provided, the load happens from that offset relative to
  7742. * offset of NF.
  7743. */
  7744. #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
  7745. do { \
  7746. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
  7747. si->src_reg, offsetof(S, F)); \
  7748. *insn++ = BPF_LDX_MEM( \
  7749. SIZE, si->dst_reg, si->dst_reg, \
  7750. bpf_target_off(NS, NF, sizeof_field(NS, NF), \
  7751. target_size) \
  7752. + OFF); \
  7753. } while (0)
  7754. #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
  7755. SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
  7756. BPF_FIELD_SIZEOF(NS, NF), 0)
  7757. /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
  7758. * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
  7759. *
  7760. * In addition it uses Temporary Field TF (member of struct S) as the 3rd
  7761. * "register" since two registers available in convert_ctx_access are not
  7762. * enough: we can't override neither SRC, since it contains value to store, nor
  7763. * DST since it contains pointer to context that may be used by later
  7764. * instructions. But we need a temporary place to save pointer to nested
  7765. * structure whose field we want to store to.
  7766. */
  7767. #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \
  7768. do { \
  7769. int tmp_reg = BPF_REG_9; \
  7770. if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
  7771. --tmp_reg; \
  7772. if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
  7773. --tmp_reg; \
  7774. *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
  7775. offsetof(S, TF)); \
  7776. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
  7777. si->dst_reg, offsetof(S, F)); \
  7778. *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \
  7779. bpf_target_off(NS, NF, sizeof_field(NS, NF), \
  7780. target_size) \
  7781. + OFF); \
  7782. *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
  7783. offsetof(S, TF)); \
  7784. } while (0)
  7785. #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
  7786. TF) \
  7787. do { \
  7788. if (type == BPF_WRITE) { \
  7789. SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \
  7790. OFF, TF); \
  7791. } else { \
  7792. SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
  7793. S, NS, F, NF, SIZE, OFF); \
  7794. } \
  7795. } while (0)
  7796. #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
  7797. SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
  7798. S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
  7799. static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
  7800. const struct bpf_insn *si,
  7801. struct bpf_insn *insn_buf,
  7802. struct bpf_prog *prog, u32 *target_size)
  7803. {
  7804. int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port);
  7805. struct bpf_insn *insn = insn_buf;
  7806. switch (si->off) {
  7807. case offsetof(struct bpf_sock_addr, user_family):
  7808. SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
  7809. struct sockaddr, uaddr, sa_family);
  7810. break;
  7811. case offsetof(struct bpf_sock_addr, user_ip4):
  7812. SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
  7813. struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
  7814. sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
  7815. break;
  7816. case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
  7817. off = si->off;
  7818. off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
  7819. SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
  7820. struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
  7821. sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
  7822. tmp_reg);
  7823. break;
  7824. case offsetof(struct bpf_sock_addr, user_port):
  7825. /* To get port we need to know sa_family first and then treat
  7826. * sockaddr as either sockaddr_in or sockaddr_in6.
  7827. * Though we can simplify since port field has same offset and
  7828. * size in both structures.
  7829. * Here we check this invariant and use just one of the
  7830. * structures if it's true.
  7831. */
  7832. BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
  7833. offsetof(struct sockaddr_in6, sin6_port));
  7834. BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
  7835. sizeof_field(struct sockaddr_in6, sin6_port));
  7836. /* Account for sin6_port being smaller than user_port. */
  7837. port_size = min(port_size, BPF_LDST_BYTES(si));
  7838. SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
  7839. struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
  7840. sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg);
  7841. break;
  7842. case offsetof(struct bpf_sock_addr, family):
  7843. SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
  7844. struct sock, sk, sk_family);
  7845. break;
  7846. case offsetof(struct bpf_sock_addr, type):
  7847. SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
  7848. struct sock, sk, sk_type);
  7849. break;
  7850. case offsetof(struct bpf_sock_addr, protocol):
  7851. SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
  7852. struct sock, sk, sk_protocol);
  7853. break;
  7854. case offsetof(struct bpf_sock_addr, msg_src_ip4):
  7855. /* Treat t_ctx as struct in_addr for msg_src_ip4. */
  7856. SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
  7857. struct bpf_sock_addr_kern, struct in_addr, t_ctx,
  7858. s_addr, BPF_SIZE(si->code), 0, tmp_reg);
  7859. break;
  7860. case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
  7861. msg_src_ip6[3]):
  7862. off = si->off;
  7863. off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
  7864. /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
  7865. SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
  7866. struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
  7867. s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
  7868. break;
  7869. case offsetof(struct bpf_sock_addr, sk):
  7870. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
  7871. si->dst_reg, si->src_reg,
  7872. offsetof(struct bpf_sock_addr_kern, sk));
  7873. break;
  7874. }
  7875. return insn - insn_buf;
  7876. }
  7877. static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
  7878. const struct bpf_insn *si,
  7879. struct bpf_insn *insn_buf,
  7880. struct bpf_prog *prog,
  7881. u32 *target_size)
  7882. {
  7883. struct bpf_insn *insn = insn_buf;
  7884. int off;
  7885. /* Helper macro for adding read access to tcp_sock or sock fields. */
  7886. #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
  7887. do { \
  7888. int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
  7889. BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
  7890. sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
  7891. if (si->dst_reg == reg || si->src_reg == reg) \
  7892. reg--; \
  7893. if (si->dst_reg == reg || si->src_reg == reg) \
  7894. reg--; \
  7895. if (si->dst_reg == si->src_reg) { \
  7896. *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
  7897. offsetof(struct bpf_sock_ops_kern, \
  7898. temp)); \
  7899. fullsock_reg = reg; \
  7900. jmp += 2; \
  7901. } \
  7902. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
  7903. struct bpf_sock_ops_kern, \
  7904. is_fullsock), \
  7905. fullsock_reg, si->src_reg, \
  7906. offsetof(struct bpf_sock_ops_kern, \
  7907. is_fullsock)); \
  7908. *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
  7909. if (si->dst_reg == si->src_reg) \
  7910. *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
  7911. offsetof(struct bpf_sock_ops_kern, \
  7912. temp)); \
  7913. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
  7914. struct bpf_sock_ops_kern, sk),\
  7915. si->dst_reg, si->src_reg, \
  7916. offsetof(struct bpf_sock_ops_kern, sk));\
  7917. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
  7918. OBJ_FIELD), \
  7919. si->dst_reg, si->dst_reg, \
  7920. offsetof(OBJ, OBJ_FIELD)); \
  7921. if (si->dst_reg == si->src_reg) { \
  7922. *insn++ = BPF_JMP_A(1); \
  7923. *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
  7924. offsetof(struct bpf_sock_ops_kern, \
  7925. temp)); \
  7926. } \
  7927. } while (0)
  7928. #define SOCK_OPS_GET_SK() \
  7929. do { \
  7930. int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
  7931. if (si->dst_reg == reg || si->src_reg == reg) \
  7932. reg--; \
  7933. if (si->dst_reg == reg || si->src_reg == reg) \
  7934. reg--; \
  7935. if (si->dst_reg == si->src_reg) { \
  7936. *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
  7937. offsetof(struct bpf_sock_ops_kern, \
  7938. temp)); \
  7939. fullsock_reg = reg; \
  7940. jmp += 2; \
  7941. } \
  7942. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
  7943. struct bpf_sock_ops_kern, \
  7944. is_fullsock), \
  7945. fullsock_reg, si->src_reg, \
  7946. offsetof(struct bpf_sock_ops_kern, \
  7947. is_fullsock)); \
  7948. *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
  7949. if (si->dst_reg == si->src_reg) \
  7950. *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
  7951. offsetof(struct bpf_sock_ops_kern, \
  7952. temp)); \
  7953. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
  7954. struct bpf_sock_ops_kern, sk),\
  7955. si->dst_reg, si->src_reg, \
  7956. offsetof(struct bpf_sock_ops_kern, sk));\
  7957. if (si->dst_reg == si->src_reg) { \
  7958. *insn++ = BPF_JMP_A(1); \
  7959. *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
  7960. offsetof(struct bpf_sock_ops_kern, \
  7961. temp)); \
  7962. } \
  7963. } while (0)
  7964. #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
  7965. SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
  7966. /* Helper macro for adding write access to tcp_sock or sock fields.
  7967. * The macro is called with two registers, dst_reg which contains a pointer
  7968. * to ctx (context) and src_reg which contains the value that should be
  7969. * stored. However, we need an additional register since we cannot overwrite
  7970. * dst_reg because it may be used later in the program.
  7971. * Instead we "borrow" one of the other register. We first save its value
  7972. * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
  7973. * it at the end of the macro.
  7974. */
  7975. #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
  7976. do { \
  7977. int reg = BPF_REG_9; \
  7978. BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
  7979. sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
  7980. if (si->dst_reg == reg || si->src_reg == reg) \
  7981. reg--; \
  7982. if (si->dst_reg == reg || si->src_reg == reg) \
  7983. reg--; \
  7984. *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
  7985. offsetof(struct bpf_sock_ops_kern, \
  7986. temp)); \
  7987. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
  7988. struct bpf_sock_ops_kern, \
  7989. is_fullsock), \
  7990. reg, si->dst_reg, \
  7991. offsetof(struct bpf_sock_ops_kern, \
  7992. is_fullsock)); \
  7993. *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
  7994. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
  7995. struct bpf_sock_ops_kern, sk),\
  7996. reg, si->dst_reg, \
  7997. offsetof(struct bpf_sock_ops_kern, sk));\
  7998. *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
  7999. reg, si->src_reg, \
  8000. offsetof(OBJ, OBJ_FIELD)); \
  8001. *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
  8002. offsetof(struct bpf_sock_ops_kern, \
  8003. temp)); \
  8004. } while (0)
  8005. #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
  8006. do { \
  8007. if (TYPE == BPF_WRITE) \
  8008. SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
  8009. else \
  8010. SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
  8011. } while (0)
  8012. if (insn > insn_buf)
  8013. return insn - insn_buf;
  8014. switch (si->off) {
  8015. case offsetof(struct bpf_sock_ops, op):
  8016. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
  8017. op),
  8018. si->dst_reg, si->src_reg,
  8019. offsetof(struct bpf_sock_ops_kern, op));
  8020. break;
  8021. case offsetof(struct bpf_sock_ops, replylong[0]) ...
  8022. offsetof(struct bpf_sock_ops, replylong[3]):
  8023. BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
  8024. sizeof_field(struct bpf_sock_ops_kern, reply));
  8025. BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
  8026. sizeof_field(struct bpf_sock_ops_kern, replylong));
  8027. off = si->off;
  8028. off -= offsetof(struct bpf_sock_ops, replylong[0]);
  8029. off += offsetof(struct bpf_sock_ops_kern, replylong[0]);
  8030. if (type == BPF_WRITE)
  8031. *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
  8032. off);
  8033. else
  8034. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  8035. off);
  8036. break;
  8037. case offsetof(struct bpf_sock_ops, family):
  8038. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
  8039. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8040. struct bpf_sock_ops_kern, sk),
  8041. si->dst_reg, si->src_reg,
  8042. offsetof(struct bpf_sock_ops_kern, sk));
  8043. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  8044. offsetof(struct sock_common, skc_family));
  8045. break;
  8046. case offsetof(struct bpf_sock_ops, remote_ip4):
  8047. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
  8048. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8049. struct bpf_sock_ops_kern, sk),
  8050. si->dst_reg, si->src_reg,
  8051. offsetof(struct bpf_sock_ops_kern, sk));
  8052. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8053. offsetof(struct sock_common, skc_daddr));
  8054. break;
  8055. case offsetof(struct bpf_sock_ops, local_ip4):
  8056. BUILD_BUG_ON(sizeof_field(struct sock_common,
  8057. skc_rcv_saddr) != 4);
  8058. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8059. struct bpf_sock_ops_kern, sk),
  8060. si->dst_reg, si->src_reg,
  8061. offsetof(struct bpf_sock_ops_kern, sk));
  8062. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8063. offsetof(struct sock_common,
  8064. skc_rcv_saddr));
  8065. break;
  8066. case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
  8067. offsetof(struct bpf_sock_ops, remote_ip6[3]):
  8068. #if IS_ENABLED(CONFIG_IPV6)
  8069. BUILD_BUG_ON(sizeof_field(struct sock_common,
  8070. skc_v6_daddr.s6_addr32[0]) != 4);
  8071. off = si->off;
  8072. off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
  8073. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8074. struct bpf_sock_ops_kern, sk),
  8075. si->dst_reg, si->src_reg,
  8076. offsetof(struct bpf_sock_ops_kern, sk));
  8077. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8078. offsetof(struct sock_common,
  8079. skc_v6_daddr.s6_addr32[0]) +
  8080. off);
  8081. #else
  8082. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  8083. #endif
  8084. break;
  8085. case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
  8086. offsetof(struct bpf_sock_ops, local_ip6[3]):
  8087. #if IS_ENABLED(CONFIG_IPV6)
  8088. BUILD_BUG_ON(sizeof_field(struct sock_common,
  8089. skc_v6_rcv_saddr.s6_addr32[0]) != 4);
  8090. off = si->off;
  8091. off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
  8092. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8093. struct bpf_sock_ops_kern, sk),
  8094. si->dst_reg, si->src_reg,
  8095. offsetof(struct bpf_sock_ops_kern, sk));
  8096. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8097. offsetof(struct sock_common,
  8098. skc_v6_rcv_saddr.s6_addr32[0]) +
  8099. off);
  8100. #else
  8101. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  8102. #endif
  8103. break;
  8104. case offsetof(struct bpf_sock_ops, remote_port):
  8105. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
  8106. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8107. struct bpf_sock_ops_kern, sk),
  8108. si->dst_reg, si->src_reg,
  8109. offsetof(struct bpf_sock_ops_kern, sk));
  8110. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  8111. offsetof(struct sock_common, skc_dport));
  8112. #ifndef __BIG_ENDIAN_BITFIELD
  8113. *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
  8114. #endif
  8115. break;
  8116. case offsetof(struct bpf_sock_ops, local_port):
  8117. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
  8118. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8119. struct bpf_sock_ops_kern, sk),
  8120. si->dst_reg, si->src_reg,
  8121. offsetof(struct bpf_sock_ops_kern, sk));
  8122. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  8123. offsetof(struct sock_common, skc_num));
  8124. break;
  8125. case offsetof(struct bpf_sock_ops, is_fullsock):
  8126. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8127. struct bpf_sock_ops_kern,
  8128. is_fullsock),
  8129. si->dst_reg, si->src_reg,
  8130. offsetof(struct bpf_sock_ops_kern,
  8131. is_fullsock));
  8132. break;
  8133. case offsetof(struct bpf_sock_ops, state):
  8134. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1);
  8135. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8136. struct bpf_sock_ops_kern, sk),
  8137. si->dst_reg, si->src_reg,
  8138. offsetof(struct bpf_sock_ops_kern, sk));
  8139. *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
  8140. offsetof(struct sock_common, skc_state));
  8141. break;
  8142. case offsetof(struct bpf_sock_ops, rtt_min):
  8143. BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
  8144. sizeof(struct minmax));
  8145. BUILD_BUG_ON(sizeof(struct minmax) <
  8146. sizeof(struct minmax_sample));
  8147. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8148. struct bpf_sock_ops_kern, sk),
  8149. si->dst_reg, si->src_reg,
  8150. offsetof(struct bpf_sock_ops_kern, sk));
  8151. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8152. offsetof(struct tcp_sock, rtt_min) +
  8153. sizeof_field(struct minmax_sample, t));
  8154. break;
  8155. case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
  8156. SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
  8157. struct tcp_sock);
  8158. break;
  8159. case offsetof(struct bpf_sock_ops, sk_txhash):
  8160. SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
  8161. struct sock, type);
  8162. break;
  8163. case offsetof(struct bpf_sock_ops, snd_cwnd):
  8164. SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
  8165. break;
  8166. case offsetof(struct bpf_sock_ops, srtt_us):
  8167. SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
  8168. break;
  8169. case offsetof(struct bpf_sock_ops, snd_ssthresh):
  8170. SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
  8171. break;
  8172. case offsetof(struct bpf_sock_ops, rcv_nxt):
  8173. SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
  8174. break;
  8175. case offsetof(struct bpf_sock_ops, snd_nxt):
  8176. SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
  8177. break;
  8178. case offsetof(struct bpf_sock_ops, snd_una):
  8179. SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
  8180. break;
  8181. case offsetof(struct bpf_sock_ops, mss_cache):
  8182. SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
  8183. break;
  8184. case offsetof(struct bpf_sock_ops, ecn_flags):
  8185. SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
  8186. break;
  8187. case offsetof(struct bpf_sock_ops, rate_delivered):
  8188. SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
  8189. break;
  8190. case offsetof(struct bpf_sock_ops, rate_interval_us):
  8191. SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
  8192. break;
  8193. case offsetof(struct bpf_sock_ops, packets_out):
  8194. SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
  8195. break;
  8196. case offsetof(struct bpf_sock_ops, retrans_out):
  8197. SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
  8198. break;
  8199. case offsetof(struct bpf_sock_ops, total_retrans):
  8200. SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
  8201. break;
  8202. case offsetof(struct bpf_sock_ops, segs_in):
  8203. SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
  8204. break;
  8205. case offsetof(struct bpf_sock_ops, data_segs_in):
  8206. SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
  8207. break;
  8208. case offsetof(struct bpf_sock_ops, segs_out):
  8209. SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
  8210. break;
  8211. case offsetof(struct bpf_sock_ops, data_segs_out):
  8212. SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
  8213. break;
  8214. case offsetof(struct bpf_sock_ops, lost_out):
  8215. SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
  8216. break;
  8217. case offsetof(struct bpf_sock_ops, sacked_out):
  8218. SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
  8219. break;
  8220. case offsetof(struct bpf_sock_ops, bytes_received):
  8221. SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
  8222. break;
  8223. case offsetof(struct bpf_sock_ops, bytes_acked):
  8224. SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
  8225. break;
  8226. case offsetof(struct bpf_sock_ops, sk):
  8227. SOCK_OPS_GET_SK();
  8228. break;
  8229. case offsetof(struct bpf_sock_ops, skb_data_end):
  8230. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
  8231. skb_data_end),
  8232. si->dst_reg, si->src_reg,
  8233. offsetof(struct bpf_sock_ops_kern,
  8234. skb_data_end));
  8235. break;
  8236. case offsetof(struct bpf_sock_ops, skb_data):
  8237. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
  8238. skb),
  8239. si->dst_reg, si->src_reg,
  8240. offsetof(struct bpf_sock_ops_kern,
  8241. skb));
  8242. *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
  8243. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
  8244. si->dst_reg, si->dst_reg,
  8245. offsetof(struct sk_buff, data));
  8246. break;
  8247. case offsetof(struct bpf_sock_ops, skb_len):
  8248. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
  8249. skb),
  8250. si->dst_reg, si->src_reg,
  8251. offsetof(struct bpf_sock_ops_kern,
  8252. skb));
  8253. *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
  8254. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
  8255. si->dst_reg, si->dst_reg,
  8256. offsetof(struct sk_buff, len));
  8257. break;
  8258. case offsetof(struct bpf_sock_ops, skb_tcp_flags):
  8259. off = offsetof(struct sk_buff, cb);
  8260. off += offsetof(struct tcp_skb_cb, tcp_flags);
  8261. *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags);
  8262. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
  8263. skb),
  8264. si->dst_reg, si->src_reg,
  8265. offsetof(struct bpf_sock_ops_kern,
  8266. skb));
  8267. *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
  8268. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb,
  8269. tcp_flags),
  8270. si->dst_reg, si->dst_reg, off);
  8271. break;
  8272. }
  8273. return insn - insn_buf;
  8274. }
  8275. static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
  8276. const struct bpf_insn *si,
  8277. struct bpf_insn *insn_buf,
  8278. struct bpf_prog *prog, u32 *target_size)
  8279. {
  8280. struct bpf_insn *insn = insn_buf;
  8281. int off;
  8282. switch (si->off) {
  8283. case offsetof(struct __sk_buff, data_end):
  8284. off = si->off;
  8285. off -= offsetof(struct __sk_buff, data_end);
  8286. off += offsetof(struct sk_buff, cb);
  8287. off += offsetof(struct tcp_skb_cb, bpf.data_end);
  8288. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
  8289. si->src_reg, off);
  8290. break;
  8291. case offsetof(struct __sk_buff, cb[0]) ...
  8292. offsetofend(struct __sk_buff, cb[4]) - 1:
  8293. BUILD_BUG_ON(sizeof_field(struct sk_skb_cb, data) < 20);
  8294. BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
  8295. offsetof(struct sk_skb_cb, data)) %
  8296. sizeof(__u64));
  8297. prog->cb_access = 1;
  8298. off = si->off;
  8299. off -= offsetof(struct __sk_buff, cb[0]);
  8300. off += offsetof(struct sk_buff, cb);
  8301. off += offsetof(struct sk_skb_cb, data);
  8302. if (type == BPF_WRITE)
  8303. *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
  8304. si->src_reg, off);
  8305. else
  8306. *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
  8307. si->src_reg, off);
  8308. break;
  8309. default:
  8310. return bpf_convert_ctx_access(type, si, insn_buf, prog,
  8311. target_size);
  8312. }
  8313. return insn - insn_buf;
  8314. }
  8315. static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
  8316. const struct bpf_insn *si,
  8317. struct bpf_insn *insn_buf,
  8318. struct bpf_prog *prog, u32 *target_size)
  8319. {
  8320. struct bpf_insn *insn = insn_buf;
  8321. #if IS_ENABLED(CONFIG_IPV6)
  8322. int off;
  8323. #endif
  8324. /* convert ctx uses the fact sg element is first in struct */
  8325. BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
  8326. switch (si->off) {
  8327. case offsetof(struct sk_msg_md, data):
  8328. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
  8329. si->dst_reg, si->src_reg,
  8330. offsetof(struct sk_msg, data));
  8331. break;
  8332. case offsetof(struct sk_msg_md, data_end):
  8333. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
  8334. si->dst_reg, si->src_reg,
  8335. offsetof(struct sk_msg, data_end));
  8336. break;
  8337. case offsetof(struct sk_msg_md, family):
  8338. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
  8339. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8340. struct sk_msg, sk),
  8341. si->dst_reg, si->src_reg,
  8342. offsetof(struct sk_msg, sk));
  8343. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  8344. offsetof(struct sock_common, skc_family));
  8345. break;
  8346. case offsetof(struct sk_msg_md, remote_ip4):
  8347. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
  8348. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8349. struct sk_msg, sk),
  8350. si->dst_reg, si->src_reg,
  8351. offsetof(struct sk_msg, sk));
  8352. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8353. offsetof(struct sock_common, skc_daddr));
  8354. break;
  8355. case offsetof(struct sk_msg_md, local_ip4):
  8356. BUILD_BUG_ON(sizeof_field(struct sock_common,
  8357. skc_rcv_saddr) != 4);
  8358. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8359. struct sk_msg, sk),
  8360. si->dst_reg, si->src_reg,
  8361. offsetof(struct sk_msg, sk));
  8362. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8363. offsetof(struct sock_common,
  8364. skc_rcv_saddr));
  8365. break;
  8366. case offsetof(struct sk_msg_md, remote_ip6[0]) ...
  8367. offsetof(struct sk_msg_md, remote_ip6[3]):
  8368. #if IS_ENABLED(CONFIG_IPV6)
  8369. BUILD_BUG_ON(sizeof_field(struct sock_common,
  8370. skc_v6_daddr.s6_addr32[0]) != 4);
  8371. off = si->off;
  8372. off -= offsetof(struct sk_msg_md, remote_ip6[0]);
  8373. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8374. struct sk_msg, sk),
  8375. si->dst_reg, si->src_reg,
  8376. offsetof(struct sk_msg, sk));
  8377. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8378. offsetof(struct sock_common,
  8379. skc_v6_daddr.s6_addr32[0]) +
  8380. off);
  8381. #else
  8382. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  8383. #endif
  8384. break;
  8385. case offsetof(struct sk_msg_md, local_ip6[0]) ...
  8386. offsetof(struct sk_msg_md, local_ip6[3]):
  8387. #if IS_ENABLED(CONFIG_IPV6)
  8388. BUILD_BUG_ON(sizeof_field(struct sock_common,
  8389. skc_v6_rcv_saddr.s6_addr32[0]) != 4);
  8390. off = si->off;
  8391. off -= offsetof(struct sk_msg_md, local_ip6[0]);
  8392. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8393. struct sk_msg, sk),
  8394. si->dst_reg, si->src_reg,
  8395. offsetof(struct sk_msg, sk));
  8396. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
  8397. offsetof(struct sock_common,
  8398. skc_v6_rcv_saddr.s6_addr32[0]) +
  8399. off);
  8400. #else
  8401. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  8402. #endif
  8403. break;
  8404. case offsetof(struct sk_msg_md, remote_port):
  8405. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
  8406. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8407. struct sk_msg, sk),
  8408. si->dst_reg, si->src_reg,
  8409. offsetof(struct sk_msg, sk));
  8410. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  8411. offsetof(struct sock_common, skc_dport));
  8412. #ifndef __BIG_ENDIAN_BITFIELD
  8413. *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
  8414. #endif
  8415. break;
  8416. case offsetof(struct sk_msg_md, local_port):
  8417. BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
  8418. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
  8419. struct sk_msg, sk),
  8420. si->dst_reg, si->src_reg,
  8421. offsetof(struct sk_msg, sk));
  8422. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
  8423. offsetof(struct sock_common, skc_num));
  8424. break;
  8425. case offsetof(struct sk_msg_md, size):
  8426. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
  8427. si->dst_reg, si->src_reg,
  8428. offsetof(struct sk_msg_sg, size));
  8429. break;
  8430. case offsetof(struct sk_msg_md, sk):
  8431. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk),
  8432. si->dst_reg, si->src_reg,
  8433. offsetof(struct sk_msg, sk));
  8434. break;
  8435. }
  8436. return insn - insn_buf;
  8437. }
  8438. const struct bpf_verifier_ops sk_filter_verifier_ops = {
  8439. .get_func_proto = sk_filter_func_proto,
  8440. .is_valid_access = sk_filter_is_valid_access,
  8441. .convert_ctx_access = bpf_convert_ctx_access,
  8442. .gen_ld_abs = bpf_gen_ld_abs,
  8443. };
  8444. const struct bpf_prog_ops sk_filter_prog_ops = {
  8445. .test_run = bpf_prog_test_run_skb,
  8446. };
  8447. const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
  8448. .get_func_proto = tc_cls_act_func_proto,
  8449. .is_valid_access = tc_cls_act_is_valid_access,
  8450. .convert_ctx_access = tc_cls_act_convert_ctx_access,
  8451. .gen_prologue = tc_cls_act_prologue,
  8452. .gen_ld_abs = bpf_gen_ld_abs,
  8453. };
  8454. const struct bpf_prog_ops tc_cls_act_prog_ops = {
  8455. .test_run = bpf_prog_test_run_skb,
  8456. };
  8457. const struct bpf_verifier_ops xdp_verifier_ops = {
  8458. .get_func_proto = xdp_func_proto,
  8459. .is_valid_access = xdp_is_valid_access,
  8460. .convert_ctx_access = xdp_convert_ctx_access,
  8461. .gen_prologue = bpf_noop_prologue,
  8462. };
  8463. const struct bpf_prog_ops xdp_prog_ops = {
  8464. .test_run = bpf_prog_test_run_xdp,
  8465. };
  8466. const struct bpf_verifier_ops cg_skb_verifier_ops = {
  8467. .get_func_proto = cg_skb_func_proto,
  8468. .is_valid_access = cg_skb_is_valid_access,
  8469. .convert_ctx_access = bpf_convert_ctx_access,
  8470. };
  8471. const struct bpf_prog_ops cg_skb_prog_ops = {
  8472. .test_run = bpf_prog_test_run_skb,
  8473. };
  8474. const struct bpf_verifier_ops lwt_in_verifier_ops = {
  8475. .get_func_proto = lwt_in_func_proto,
  8476. .is_valid_access = lwt_is_valid_access,
  8477. .convert_ctx_access = bpf_convert_ctx_access,
  8478. };
  8479. const struct bpf_prog_ops lwt_in_prog_ops = {
  8480. .test_run = bpf_prog_test_run_skb,
  8481. };
  8482. const struct bpf_verifier_ops lwt_out_verifier_ops = {
  8483. .get_func_proto = lwt_out_func_proto,
  8484. .is_valid_access = lwt_is_valid_access,
  8485. .convert_ctx_access = bpf_convert_ctx_access,
  8486. };
  8487. const struct bpf_prog_ops lwt_out_prog_ops = {
  8488. .test_run = bpf_prog_test_run_skb,
  8489. };
  8490. const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
  8491. .get_func_proto = lwt_xmit_func_proto,
  8492. .is_valid_access = lwt_is_valid_access,
  8493. .convert_ctx_access = bpf_convert_ctx_access,
  8494. .gen_prologue = tc_cls_act_prologue,
  8495. };
  8496. const struct bpf_prog_ops lwt_xmit_prog_ops = {
  8497. .test_run = bpf_prog_test_run_skb,
  8498. };
  8499. const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
  8500. .get_func_proto = lwt_seg6local_func_proto,
  8501. .is_valid_access = lwt_is_valid_access,
  8502. .convert_ctx_access = bpf_convert_ctx_access,
  8503. };
  8504. const struct bpf_prog_ops lwt_seg6local_prog_ops = {
  8505. .test_run = bpf_prog_test_run_skb,
  8506. };
  8507. const struct bpf_verifier_ops cg_sock_verifier_ops = {
  8508. .get_func_proto = sock_filter_func_proto,
  8509. .is_valid_access = sock_filter_is_valid_access,
  8510. .convert_ctx_access = bpf_sock_convert_ctx_access,
  8511. };
  8512. const struct bpf_prog_ops cg_sock_prog_ops = {
  8513. };
  8514. const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
  8515. .get_func_proto = sock_addr_func_proto,
  8516. .is_valid_access = sock_addr_is_valid_access,
  8517. .convert_ctx_access = sock_addr_convert_ctx_access,
  8518. };
  8519. const struct bpf_prog_ops cg_sock_addr_prog_ops = {
  8520. };
  8521. const struct bpf_verifier_ops sock_ops_verifier_ops = {
  8522. .get_func_proto = sock_ops_func_proto,
  8523. .is_valid_access = sock_ops_is_valid_access,
  8524. .convert_ctx_access = sock_ops_convert_ctx_access,
  8525. };
  8526. const struct bpf_prog_ops sock_ops_prog_ops = {
  8527. };
  8528. const struct bpf_verifier_ops sk_skb_verifier_ops = {
  8529. .get_func_proto = sk_skb_func_proto,
  8530. .is_valid_access = sk_skb_is_valid_access,
  8531. .convert_ctx_access = sk_skb_convert_ctx_access,
  8532. .gen_prologue = sk_skb_prologue,
  8533. };
  8534. const struct bpf_prog_ops sk_skb_prog_ops = {
  8535. };
  8536. const struct bpf_verifier_ops sk_msg_verifier_ops = {
  8537. .get_func_proto = sk_msg_func_proto,
  8538. .is_valid_access = sk_msg_is_valid_access,
  8539. .convert_ctx_access = sk_msg_convert_ctx_access,
  8540. .gen_prologue = bpf_noop_prologue,
  8541. };
  8542. const struct bpf_prog_ops sk_msg_prog_ops = {
  8543. };
  8544. const struct bpf_verifier_ops flow_dissector_verifier_ops = {
  8545. .get_func_proto = flow_dissector_func_proto,
  8546. .is_valid_access = flow_dissector_is_valid_access,
  8547. .convert_ctx_access = flow_dissector_convert_ctx_access,
  8548. };
  8549. const struct bpf_prog_ops flow_dissector_prog_ops = {
  8550. .test_run = bpf_prog_test_run_flow_dissector,
  8551. };
  8552. int sk_detach_filter(struct sock *sk)
  8553. {
  8554. int ret = -ENOENT;
  8555. struct sk_filter *filter;
  8556. if (sock_flag(sk, SOCK_FILTER_LOCKED))
  8557. return -EPERM;
  8558. filter = rcu_dereference_protected(sk->sk_filter,
  8559. lockdep_sock_is_held(sk));
  8560. if (filter) {
  8561. RCU_INIT_POINTER(sk->sk_filter, NULL);
  8562. sk_filter_uncharge(sk, filter);
  8563. ret = 0;
  8564. }
  8565. return ret;
  8566. }
  8567. EXPORT_SYMBOL_GPL(sk_detach_filter);
  8568. int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
  8569. unsigned int len)
  8570. {
  8571. struct sock_fprog_kern *fprog;
  8572. struct sk_filter *filter;
  8573. int ret = 0;
  8574. lock_sock(sk);
  8575. filter = rcu_dereference_protected(sk->sk_filter,
  8576. lockdep_sock_is_held(sk));
  8577. if (!filter)
  8578. goto out;
  8579. /* We're copying the filter that has been originally attached,
  8580. * so no conversion/decode needed anymore. eBPF programs that
  8581. * have no original program cannot be dumped through this.
  8582. */
  8583. ret = -EACCES;
  8584. fprog = filter->prog->orig_prog;
  8585. if (!fprog)
  8586. goto out;
  8587. ret = fprog->len;
  8588. if (!len)
  8589. /* User space only enquires number of filter blocks. */
  8590. goto out;
  8591. ret = -EINVAL;
  8592. if (len < fprog->len)
  8593. goto out;
  8594. ret = -EFAULT;
  8595. if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
  8596. goto out;
  8597. /* Instead of bytes, the API requests to return the number
  8598. * of filter blocks.
  8599. */
  8600. ret = fprog->len;
  8601. out:
  8602. release_sock(sk);
  8603. return ret;
  8604. }
  8605. #ifdef CONFIG_INET
  8606. static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
  8607. struct sock_reuseport *reuse,
  8608. struct sock *sk, struct sk_buff *skb,
  8609. u32 hash)
  8610. {
  8611. reuse_kern->skb = skb;
  8612. reuse_kern->sk = sk;
  8613. reuse_kern->selected_sk = NULL;
  8614. reuse_kern->data_end = skb->data + skb_headlen(skb);
  8615. reuse_kern->hash = hash;
  8616. reuse_kern->reuseport_id = reuse->reuseport_id;
  8617. reuse_kern->bind_inany = reuse->bind_inany;
  8618. }
  8619. struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
  8620. struct bpf_prog *prog, struct sk_buff *skb,
  8621. u32 hash)
  8622. {
  8623. struct sk_reuseport_kern reuse_kern;
  8624. enum sk_action action;
  8625. bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
  8626. action = BPF_PROG_RUN(prog, &reuse_kern);
  8627. if (action == SK_PASS)
  8628. return reuse_kern.selected_sk;
  8629. else
  8630. return ERR_PTR(-ECONNREFUSED);
  8631. }
  8632. BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
  8633. struct bpf_map *, map, void *, key, u32, flags)
  8634. {
  8635. bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
  8636. struct sock_reuseport *reuse;
  8637. struct sock *selected_sk;
  8638. selected_sk = map->ops->map_lookup_elem(map, key);
  8639. if (!selected_sk)
  8640. return -ENOENT;
  8641. reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
  8642. if (!reuse) {
  8643. /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
  8644. if (sk_is_refcounted(selected_sk))
  8645. sock_put(selected_sk);
  8646. /* reuseport_array has only sk with non NULL sk_reuseport_cb.
  8647. * The only (!reuse) case here is - the sk has already been
  8648. * unhashed (e.g. by close()), so treat it as -ENOENT.
  8649. *
  8650. * Other maps (e.g. sock_map) do not provide this guarantee and
  8651. * the sk may never be in the reuseport group to begin with.
  8652. */
  8653. return is_sockarray ? -ENOENT : -EINVAL;
  8654. }
  8655. if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
  8656. struct sock *sk = reuse_kern->sk;
  8657. if (sk->sk_protocol != selected_sk->sk_protocol)
  8658. return -EPROTOTYPE;
  8659. else if (sk->sk_family != selected_sk->sk_family)
  8660. return -EAFNOSUPPORT;
  8661. /* Catch all. Likely bound to a different sockaddr. */
  8662. return -EBADFD;
  8663. }
  8664. reuse_kern->selected_sk = selected_sk;
  8665. return 0;
  8666. }
  8667. static const struct bpf_func_proto sk_select_reuseport_proto = {
  8668. .func = sk_select_reuseport,
  8669. .gpl_only = false,
  8670. .ret_type = RET_INTEGER,
  8671. .arg1_type = ARG_PTR_TO_CTX,
  8672. .arg2_type = ARG_CONST_MAP_PTR,
  8673. .arg3_type = ARG_PTR_TO_MAP_KEY,
  8674. .arg4_type = ARG_ANYTHING,
  8675. };
  8676. BPF_CALL_4(sk_reuseport_load_bytes,
  8677. const struct sk_reuseport_kern *, reuse_kern, u32, offset,
  8678. void *, to, u32, len)
  8679. {
  8680. return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
  8681. }
  8682. static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
  8683. .func = sk_reuseport_load_bytes,
  8684. .gpl_only = false,
  8685. .ret_type = RET_INTEGER,
  8686. .arg1_type = ARG_PTR_TO_CTX,
  8687. .arg2_type = ARG_ANYTHING,
  8688. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  8689. .arg4_type = ARG_CONST_SIZE,
  8690. };
  8691. BPF_CALL_5(sk_reuseport_load_bytes_relative,
  8692. const struct sk_reuseport_kern *, reuse_kern, u32, offset,
  8693. void *, to, u32, len, u32, start_header)
  8694. {
  8695. return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
  8696. len, start_header);
  8697. }
  8698. static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
  8699. .func = sk_reuseport_load_bytes_relative,
  8700. .gpl_only = false,
  8701. .ret_type = RET_INTEGER,
  8702. .arg1_type = ARG_PTR_TO_CTX,
  8703. .arg2_type = ARG_ANYTHING,
  8704. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  8705. .arg4_type = ARG_CONST_SIZE,
  8706. .arg5_type = ARG_ANYTHING,
  8707. };
  8708. static const struct bpf_func_proto *
  8709. sk_reuseport_func_proto(enum bpf_func_id func_id,
  8710. const struct bpf_prog *prog)
  8711. {
  8712. switch (func_id) {
  8713. case BPF_FUNC_sk_select_reuseport:
  8714. return &sk_select_reuseport_proto;
  8715. case BPF_FUNC_skb_load_bytes:
  8716. return &sk_reuseport_load_bytes_proto;
  8717. case BPF_FUNC_skb_load_bytes_relative:
  8718. return &sk_reuseport_load_bytes_relative_proto;
  8719. default:
  8720. return bpf_base_func_proto(func_id);
  8721. }
  8722. }
  8723. static bool
  8724. sk_reuseport_is_valid_access(int off, int size,
  8725. enum bpf_access_type type,
  8726. const struct bpf_prog *prog,
  8727. struct bpf_insn_access_aux *info)
  8728. {
  8729. const u32 size_default = sizeof(__u32);
  8730. if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
  8731. off % size || type != BPF_READ)
  8732. return false;
  8733. switch (off) {
  8734. case offsetof(struct sk_reuseport_md, data):
  8735. info->reg_type = PTR_TO_PACKET;
  8736. return size == sizeof(__u64);
  8737. case offsetof(struct sk_reuseport_md, data_end):
  8738. info->reg_type = PTR_TO_PACKET_END;
  8739. return size == sizeof(__u64);
  8740. case offsetof(struct sk_reuseport_md, hash):
  8741. return size == size_default;
  8742. /* Fields that allow narrowing */
  8743. case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
  8744. if (size < sizeof_field(struct sk_buff, protocol))
  8745. return false;
  8746. fallthrough;
  8747. case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
  8748. case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
  8749. case bpf_ctx_range(struct sk_reuseport_md, len):
  8750. bpf_ctx_record_field_size(info, size_default);
  8751. return bpf_ctx_narrow_access_ok(off, size, size_default);
  8752. default:
  8753. return false;
  8754. }
  8755. }
  8756. #define SK_REUSEPORT_LOAD_FIELD(F) ({ \
  8757. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
  8758. si->dst_reg, si->src_reg, \
  8759. bpf_target_off(struct sk_reuseport_kern, F, \
  8760. sizeof_field(struct sk_reuseport_kern, F), \
  8761. target_size)); \
  8762. })
  8763. #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
  8764. SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
  8765. struct sk_buff, \
  8766. skb, \
  8767. SKB_FIELD)
  8768. #define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) \
  8769. SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
  8770. struct sock, \
  8771. sk, \
  8772. SK_FIELD)
  8773. static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
  8774. const struct bpf_insn *si,
  8775. struct bpf_insn *insn_buf,
  8776. struct bpf_prog *prog,
  8777. u32 *target_size)
  8778. {
  8779. struct bpf_insn *insn = insn_buf;
  8780. switch (si->off) {
  8781. case offsetof(struct sk_reuseport_md, data):
  8782. SK_REUSEPORT_LOAD_SKB_FIELD(data);
  8783. break;
  8784. case offsetof(struct sk_reuseport_md, len):
  8785. SK_REUSEPORT_LOAD_SKB_FIELD(len);
  8786. break;
  8787. case offsetof(struct sk_reuseport_md, eth_protocol):
  8788. SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
  8789. break;
  8790. case offsetof(struct sk_reuseport_md, ip_protocol):
  8791. SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol);
  8792. break;
  8793. case offsetof(struct sk_reuseport_md, data_end):
  8794. SK_REUSEPORT_LOAD_FIELD(data_end);
  8795. break;
  8796. case offsetof(struct sk_reuseport_md, hash):
  8797. SK_REUSEPORT_LOAD_FIELD(hash);
  8798. break;
  8799. case offsetof(struct sk_reuseport_md, bind_inany):
  8800. SK_REUSEPORT_LOAD_FIELD(bind_inany);
  8801. break;
  8802. }
  8803. return insn - insn_buf;
  8804. }
  8805. const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
  8806. .get_func_proto = sk_reuseport_func_proto,
  8807. .is_valid_access = sk_reuseport_is_valid_access,
  8808. .convert_ctx_access = sk_reuseport_convert_ctx_access,
  8809. };
  8810. const struct bpf_prog_ops sk_reuseport_prog_ops = {
  8811. };
  8812. DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled);
  8813. EXPORT_SYMBOL(bpf_sk_lookup_enabled);
  8814. BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx,
  8815. struct sock *, sk, u64, flags)
  8816. {
  8817. if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE |
  8818. BPF_SK_LOOKUP_F_NO_REUSEPORT)))
  8819. return -EINVAL;
  8820. if (unlikely(sk && sk_is_refcounted(sk)))
  8821. return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */
  8822. if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED))
  8823. return -ESOCKTNOSUPPORT; /* reject connected sockets */
  8824. /* Check if socket is suitable for packet L3/L4 protocol */
  8825. if (sk && sk->sk_protocol != ctx->protocol)
  8826. return -EPROTOTYPE;
  8827. if (sk && sk->sk_family != ctx->family &&
  8828. (sk->sk_family == AF_INET || ipv6_only_sock(sk)))
  8829. return -EAFNOSUPPORT;
  8830. if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE))
  8831. return -EEXIST;
  8832. /* Select socket as lookup result */
  8833. ctx->selected_sk = sk;
  8834. ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT;
  8835. return 0;
  8836. }
  8837. static const struct bpf_func_proto bpf_sk_lookup_assign_proto = {
  8838. .func = bpf_sk_lookup_assign,
  8839. .gpl_only = false,
  8840. .ret_type = RET_INTEGER,
  8841. .arg1_type = ARG_PTR_TO_CTX,
  8842. .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL,
  8843. .arg3_type = ARG_ANYTHING,
  8844. };
  8845. static const struct bpf_func_proto *
  8846. sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  8847. {
  8848. switch (func_id) {
  8849. case BPF_FUNC_perf_event_output:
  8850. return &bpf_event_output_data_proto;
  8851. case BPF_FUNC_sk_assign:
  8852. return &bpf_sk_lookup_assign_proto;
  8853. case BPF_FUNC_sk_release:
  8854. return &bpf_sk_release_proto;
  8855. default:
  8856. return bpf_sk_base_func_proto(func_id);
  8857. }
  8858. }
  8859. static bool sk_lookup_is_valid_access(int off, int size,
  8860. enum bpf_access_type type,
  8861. const struct bpf_prog *prog,
  8862. struct bpf_insn_access_aux *info)
  8863. {
  8864. if (off < 0 || off >= sizeof(struct bpf_sk_lookup))
  8865. return false;
  8866. if (off % size != 0)
  8867. return false;
  8868. if (type != BPF_READ)
  8869. return false;
  8870. switch (off) {
  8871. case offsetof(struct bpf_sk_lookup, sk):
  8872. info->reg_type = PTR_TO_SOCKET_OR_NULL;
  8873. return size == sizeof(__u64);
  8874. case bpf_ctx_range(struct bpf_sk_lookup, family):
  8875. case bpf_ctx_range(struct bpf_sk_lookup, protocol):
  8876. case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4):
  8877. case bpf_ctx_range(struct bpf_sk_lookup, local_ip4):
  8878. case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]):
  8879. case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
  8880. case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
  8881. case bpf_ctx_range(struct bpf_sk_lookup, local_port):
  8882. bpf_ctx_record_field_size(info, sizeof(__u32));
  8883. return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
  8884. default:
  8885. return false;
  8886. }
  8887. }
  8888. static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
  8889. const struct bpf_insn *si,
  8890. struct bpf_insn *insn_buf,
  8891. struct bpf_prog *prog,
  8892. u32 *target_size)
  8893. {
  8894. struct bpf_insn *insn = insn_buf;
  8895. switch (si->off) {
  8896. case offsetof(struct bpf_sk_lookup, sk):
  8897. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
  8898. offsetof(struct bpf_sk_lookup_kern, selected_sk));
  8899. break;
  8900. case offsetof(struct bpf_sk_lookup, family):
  8901. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  8902. bpf_target_off(struct bpf_sk_lookup_kern,
  8903. family, 2, target_size));
  8904. break;
  8905. case offsetof(struct bpf_sk_lookup, protocol):
  8906. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  8907. bpf_target_off(struct bpf_sk_lookup_kern,
  8908. protocol, 2, target_size));
  8909. break;
  8910. case offsetof(struct bpf_sk_lookup, remote_ip4):
  8911. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  8912. bpf_target_off(struct bpf_sk_lookup_kern,
  8913. v4.saddr, 4, target_size));
  8914. break;
  8915. case offsetof(struct bpf_sk_lookup, local_ip4):
  8916. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
  8917. bpf_target_off(struct bpf_sk_lookup_kern,
  8918. v4.daddr, 4, target_size));
  8919. break;
  8920. case bpf_ctx_range_till(struct bpf_sk_lookup,
  8921. remote_ip6[0], remote_ip6[3]): {
  8922. #if IS_ENABLED(CONFIG_IPV6)
  8923. int off = si->off;
  8924. off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]);
  8925. off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
  8926. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
  8927. offsetof(struct bpf_sk_lookup_kern, v6.saddr));
  8928. *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
  8929. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
  8930. #else
  8931. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  8932. #endif
  8933. break;
  8934. }
  8935. case bpf_ctx_range_till(struct bpf_sk_lookup,
  8936. local_ip6[0], local_ip6[3]): {
  8937. #if IS_ENABLED(CONFIG_IPV6)
  8938. int off = si->off;
  8939. off -= offsetof(struct bpf_sk_lookup, local_ip6[0]);
  8940. off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
  8941. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
  8942. offsetof(struct bpf_sk_lookup_kern, v6.daddr));
  8943. *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
  8944. *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
  8945. #else
  8946. *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
  8947. #endif
  8948. break;
  8949. }
  8950. case offsetof(struct bpf_sk_lookup, remote_port):
  8951. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  8952. bpf_target_off(struct bpf_sk_lookup_kern,
  8953. sport, 2, target_size));
  8954. break;
  8955. case offsetof(struct bpf_sk_lookup, local_port):
  8956. *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
  8957. bpf_target_off(struct bpf_sk_lookup_kern,
  8958. dport, 2, target_size));
  8959. break;
  8960. }
  8961. return insn - insn_buf;
  8962. }
  8963. const struct bpf_prog_ops sk_lookup_prog_ops = {
  8964. };
  8965. const struct bpf_verifier_ops sk_lookup_verifier_ops = {
  8966. .get_func_proto = sk_lookup_func_proto,
  8967. .is_valid_access = sk_lookup_is_valid_access,
  8968. .convert_ctx_access = sk_lookup_convert_ctx_access,
  8969. };
  8970. #endif /* CONFIG_INET */
  8971. DEFINE_BPF_DISPATCHER(xdp)
  8972. void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
  8973. {
  8974. bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
  8975. }
  8976. #ifdef CONFIG_DEBUG_INFO_BTF
  8977. BTF_ID_LIST_GLOBAL(btf_sock_ids)
  8978. #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
  8979. BTF_SOCK_TYPE_xxx
  8980. #undef BTF_SOCK_TYPE
  8981. #else
  8982. u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
  8983. #endif
  8984. BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
  8985. {
  8986. /* tcp6_sock type is not generated in dwarf and hence btf,
  8987. * trigger an explicit type generation here.
  8988. */
  8989. BTF_TYPE_EMIT(struct tcp6_sock);
  8990. if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP &&
  8991. sk->sk_family == AF_INET6)
  8992. return (unsigned long)sk;
  8993. return (unsigned long)NULL;
  8994. }
  8995. const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
  8996. .func = bpf_skc_to_tcp6_sock,
  8997. .gpl_only = false,
  8998. .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
  8999. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  9000. .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
  9001. };
  9002. BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk)
  9003. {
  9004. if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
  9005. return (unsigned long)sk;
  9006. return (unsigned long)NULL;
  9007. }
  9008. const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
  9009. .func = bpf_skc_to_tcp_sock,
  9010. .gpl_only = false,
  9011. .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
  9012. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  9013. .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
  9014. };
  9015. BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
  9016. {
  9017. /* BTF types for tcp_timewait_sock and inet_timewait_sock are not
  9018. * generated if CONFIG_INET=n. Trigger an explicit generation here.
  9019. */
  9020. BTF_TYPE_EMIT(struct inet_timewait_sock);
  9021. BTF_TYPE_EMIT(struct tcp_timewait_sock);
  9022. #ifdef CONFIG_INET
  9023. if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
  9024. return (unsigned long)sk;
  9025. #endif
  9026. #if IS_BUILTIN(CONFIG_IPV6)
  9027. if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT)
  9028. return (unsigned long)sk;
  9029. #endif
  9030. return (unsigned long)NULL;
  9031. }
  9032. const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
  9033. .func = bpf_skc_to_tcp_timewait_sock,
  9034. .gpl_only = false,
  9035. .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
  9036. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  9037. .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
  9038. };
  9039. BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk)
  9040. {
  9041. #ifdef CONFIG_INET
  9042. if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV)
  9043. return (unsigned long)sk;
  9044. #endif
  9045. #if IS_BUILTIN(CONFIG_IPV6)
  9046. if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV)
  9047. return (unsigned long)sk;
  9048. #endif
  9049. return (unsigned long)NULL;
  9050. }
  9051. const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
  9052. .func = bpf_skc_to_tcp_request_sock,
  9053. .gpl_only = false,
  9054. .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
  9055. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  9056. .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
  9057. };
  9058. BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk)
  9059. {
  9060. /* udp6_sock type is not generated in dwarf and hence btf,
  9061. * trigger an explicit type generation here.
  9062. */
  9063. BTF_TYPE_EMIT(struct udp6_sock);
  9064. if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP &&
  9065. sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6)
  9066. return (unsigned long)sk;
  9067. return (unsigned long)NULL;
  9068. }
  9069. const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
  9070. .func = bpf_skc_to_udp6_sock,
  9071. .gpl_only = false,
  9072. .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
  9073. .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
  9074. .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
  9075. };
  9076. static const struct bpf_func_proto *
  9077. bpf_sk_base_func_proto(enum bpf_func_id func_id)
  9078. {
  9079. const struct bpf_func_proto *func;
  9080. switch (func_id) {
  9081. case BPF_FUNC_skc_to_tcp6_sock:
  9082. func = &bpf_skc_to_tcp6_sock_proto;
  9083. break;
  9084. case BPF_FUNC_skc_to_tcp_sock:
  9085. func = &bpf_skc_to_tcp_sock_proto;
  9086. break;
  9087. case BPF_FUNC_skc_to_tcp_timewait_sock:
  9088. func = &bpf_skc_to_tcp_timewait_sock_proto;
  9089. break;
  9090. case BPF_FUNC_skc_to_tcp_request_sock:
  9091. func = &bpf_skc_to_tcp_request_sock_proto;
  9092. break;
  9093. case BPF_FUNC_skc_to_udp6_sock:
  9094. func = &bpf_skc_to_udp6_sock_proto;
  9095. break;
  9096. default:
  9097. return bpf_base_func_proto(func_id);
  9098. }
  9099. if (!perfmon_capable())
  9100. return NULL;
  9101. return func;
  9102. }