syscall.c 106 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  3. */
  4. #include <linux/bpf.h>
  5. #include <linux/bpf_trace.h>
  6. #include <linux/bpf_lirc.h>
  7. #include <linux/bpf_verifier.h>
  8. #include <linux/btf.h>
  9. #include <linux/syscalls.h>
  10. #include <linux/slab.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/mmzone.h>
  14. #include <linux/anon_inodes.h>
  15. #include <linux/fdtable.h>
  16. #include <linux/file.h>
  17. #include <linux/fs.h>
  18. #include <linux/license.h>
  19. #include <linux/filter.h>
  20. #include <linux/version.h>
  21. #include <linux/kernel.h>
  22. #include <linux/idr.h>
  23. #include <linux/cred.h>
  24. #include <linux/timekeeping.h>
  25. #include <linux/ctype.h>
  26. #include <linux/nospec.h>
  27. #include <linux/audit.h>
  28. #include <uapi/linux/btf.h>
  29. #include <linux/pgtable.h>
  30. #include <linux/bpf_lsm.h>
  31. #include <linux/poll.h>
  32. #include <linux/bpf-netns.h>
  33. #include <linux/rcupdate_trace.h>
  34. #include <trace/hooks/syscall_check.h>
  35. #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
  36. (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
  37. (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
  38. #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  39. #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
  40. #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
  41. IS_FD_HASH(map))
  42. #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
  43. DEFINE_PER_CPU(int, bpf_prog_active);
  44. static DEFINE_IDR(prog_idr);
  45. static DEFINE_SPINLOCK(prog_idr_lock);
  46. static DEFINE_IDR(map_idr);
  47. static DEFINE_SPINLOCK(map_idr_lock);
  48. static DEFINE_IDR(link_idr);
  49. static DEFINE_SPINLOCK(link_idr_lock);
  50. int sysctl_unprivileged_bpf_disabled __read_mostly =
  51. IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
  52. static const struct bpf_map_ops * const bpf_map_types[] = {
  53. #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
  54. #define BPF_MAP_TYPE(_id, _ops) \
  55. [_id] = &_ops,
  56. #define BPF_LINK_TYPE(_id, _name)
  57. #include <linux/bpf_types.h>
  58. #undef BPF_PROG_TYPE
  59. #undef BPF_MAP_TYPE
  60. #undef BPF_LINK_TYPE
  61. };
  62. /*
  63. * If we're handed a bigger struct than we know of, ensure all the unknown bits
  64. * are 0 - i.e. new user-space does not rely on any kernel feature extensions
  65. * we don't know about yet.
  66. *
  67. * There is a ToCToU between this function call and the following
  68. * copy_from_user() call. However, this is not a concern since this function is
  69. * meant to be a future-proofing of bits.
  70. */
  71. int bpf_check_uarg_tail_zero(void __user *uaddr,
  72. size_t expected_size,
  73. size_t actual_size)
  74. {
  75. unsigned char __user *addr = uaddr + expected_size;
  76. int res;
  77. if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
  78. return -E2BIG;
  79. if (actual_size <= expected_size)
  80. return 0;
  81. res = check_zeroed_user(addr, actual_size - expected_size);
  82. if (res < 0)
  83. return res;
  84. return res ? 0 : -E2BIG;
  85. }
  86. const struct bpf_map_ops bpf_map_offload_ops = {
  87. .map_meta_equal = bpf_map_meta_equal,
  88. .map_alloc = bpf_map_offload_map_alloc,
  89. .map_free = bpf_map_offload_map_free,
  90. .map_check_btf = map_check_no_btf,
  91. };
  92. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  93. {
  94. const struct bpf_map_ops *ops;
  95. u32 type = attr->map_type;
  96. struct bpf_map *map;
  97. int err;
  98. if (type >= ARRAY_SIZE(bpf_map_types))
  99. return ERR_PTR(-EINVAL);
  100. type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
  101. ops = bpf_map_types[type];
  102. if (!ops)
  103. return ERR_PTR(-EINVAL);
  104. if (ops->map_alloc_check) {
  105. err = ops->map_alloc_check(attr);
  106. if (err)
  107. return ERR_PTR(err);
  108. }
  109. if (attr->map_ifindex)
  110. ops = &bpf_map_offload_ops;
  111. map = ops->map_alloc(attr);
  112. if (IS_ERR(map))
  113. return map;
  114. map->ops = ops;
  115. map->map_type = type;
  116. return map;
  117. }
  118. static void bpf_map_write_active_inc(struct bpf_map *map)
  119. {
  120. atomic64_inc(&map->writecnt);
  121. }
  122. static void bpf_map_write_active_dec(struct bpf_map *map)
  123. {
  124. atomic64_dec(&map->writecnt);
  125. }
  126. bool bpf_map_write_active(const struct bpf_map *map)
  127. {
  128. return atomic64_read(&map->writecnt) != 0;
  129. }
  130. static u32 bpf_map_value_size(struct bpf_map *map)
  131. {
  132. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  133. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  134. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
  135. map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
  136. return round_up(map->value_size, 8) * num_possible_cpus();
  137. else if (IS_FD_MAP(map))
  138. return sizeof(u32);
  139. else
  140. return map->value_size;
  141. }
  142. static void maybe_wait_bpf_programs(struct bpf_map *map)
  143. {
  144. /* Wait for any running BPF programs to complete so that
  145. * userspace, when we return to it, knows that all programs
  146. * that could be running use the new map value.
  147. */
  148. if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
  149. map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
  150. synchronize_rcu();
  151. }
  152. static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
  153. void *value, __u64 flags)
  154. {
  155. int err;
  156. /* Need to create a kthread, thus must support schedule */
  157. if (bpf_map_is_dev_bound(map)) {
  158. return bpf_map_offload_update_elem(map, key, value, flags);
  159. } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
  160. map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
  161. return map->ops->map_update_elem(map, key, value, flags);
  162. } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
  163. map->map_type == BPF_MAP_TYPE_SOCKMAP) {
  164. return sock_map_update_elem_sys(map, key, value, flags);
  165. } else if (IS_FD_PROG_ARRAY(map)) {
  166. return bpf_fd_array_map_update_elem(map, f.file, key, value,
  167. flags);
  168. }
  169. bpf_disable_instrumentation();
  170. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  171. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  172. err = bpf_percpu_hash_update(map, key, value, flags);
  173. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  174. err = bpf_percpu_array_update(map, key, value, flags);
  175. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
  176. err = bpf_percpu_cgroup_storage_update(map, key, value,
  177. flags);
  178. } else if (IS_FD_ARRAY(map)) {
  179. rcu_read_lock();
  180. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  181. flags);
  182. rcu_read_unlock();
  183. } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  184. rcu_read_lock();
  185. err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
  186. flags);
  187. rcu_read_unlock();
  188. } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
  189. /* rcu_read_lock() is not needed */
  190. err = bpf_fd_reuseport_array_update_elem(map, key, value,
  191. flags);
  192. } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
  193. map->map_type == BPF_MAP_TYPE_STACK) {
  194. err = map->ops->map_push_elem(map, value, flags);
  195. } else {
  196. rcu_read_lock();
  197. err = map->ops->map_update_elem(map, key, value, flags);
  198. rcu_read_unlock();
  199. }
  200. bpf_enable_instrumentation();
  201. maybe_wait_bpf_programs(map);
  202. return err;
  203. }
  204. static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
  205. __u64 flags)
  206. {
  207. void *ptr;
  208. int err;
  209. if (bpf_map_is_dev_bound(map))
  210. return bpf_map_offload_lookup_elem(map, key, value);
  211. bpf_disable_instrumentation();
  212. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  213. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  214. err = bpf_percpu_hash_copy(map, key, value);
  215. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  216. err = bpf_percpu_array_copy(map, key, value);
  217. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
  218. err = bpf_percpu_cgroup_storage_copy(map, key, value);
  219. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  220. err = bpf_stackmap_copy(map, key, value);
  221. } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
  222. err = bpf_fd_array_map_lookup_elem(map, key, value);
  223. } else if (IS_FD_HASH(map)) {
  224. err = bpf_fd_htab_map_lookup_elem(map, key, value);
  225. } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
  226. err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
  227. } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
  228. map->map_type == BPF_MAP_TYPE_STACK) {
  229. err = map->ops->map_peek_elem(map, value);
  230. } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
  231. /* struct_ops map requires directly updating "value" */
  232. err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
  233. } else {
  234. rcu_read_lock();
  235. if (map->ops->map_lookup_elem_sys_only)
  236. ptr = map->ops->map_lookup_elem_sys_only(map, key);
  237. else
  238. ptr = map->ops->map_lookup_elem(map, key);
  239. if (IS_ERR(ptr)) {
  240. err = PTR_ERR(ptr);
  241. } else if (!ptr) {
  242. err = -ENOENT;
  243. } else {
  244. err = 0;
  245. if (flags & BPF_F_LOCK)
  246. /* lock 'ptr' and copy everything but lock */
  247. copy_map_value_locked(map, value, ptr, true);
  248. else
  249. copy_map_value(map, value, ptr);
  250. /* mask lock, since value wasn't zero inited */
  251. check_and_init_map_lock(map, value);
  252. }
  253. rcu_read_unlock();
  254. }
  255. bpf_enable_instrumentation();
  256. maybe_wait_bpf_programs(map);
  257. return err;
  258. }
  259. static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
  260. {
  261. /* We really just want to fail instead of triggering OOM killer
  262. * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
  263. * which is used for lower order allocation requests.
  264. *
  265. * It has been observed that higher order allocation requests done by
  266. * vmalloc with __GFP_NORETRY being set might fail due to not trying
  267. * to reclaim memory from the page cache, thus we set
  268. * __GFP_RETRY_MAYFAIL to avoid such situations.
  269. */
  270. const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
  271. unsigned int flags = 0;
  272. unsigned long align = 1;
  273. void *area;
  274. if (size >= SIZE_MAX)
  275. return NULL;
  276. /* kmalloc()'ed memory can't be mmap()'ed */
  277. if (mmapable) {
  278. BUG_ON(!PAGE_ALIGNED(size));
  279. align = SHMLBA;
  280. flags = VM_USERMAP;
  281. } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  282. area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
  283. numa_node);
  284. if (area != NULL)
  285. return area;
  286. }
  287. return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
  288. gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
  289. flags, numa_node, __builtin_return_address(0));
  290. }
  291. void *bpf_map_area_alloc(u64 size, int numa_node)
  292. {
  293. return __bpf_map_area_alloc(size, numa_node, false);
  294. }
  295. void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
  296. {
  297. return __bpf_map_area_alloc(size, numa_node, true);
  298. }
  299. void bpf_map_area_free(void *area)
  300. {
  301. kvfree(area);
  302. }
  303. static u32 bpf_map_flags_retain_permanent(u32 flags)
  304. {
  305. /* Some map creation flags are not tied to the map object but
  306. * rather to the map fd instead, so they have no meaning upon
  307. * map object inspection since multiple file descriptors with
  308. * different (access) properties can exist here. Thus, given
  309. * this has zero meaning for the map itself, lets clear these
  310. * from here.
  311. */
  312. return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
  313. }
  314. void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
  315. {
  316. map->map_type = attr->map_type;
  317. map->key_size = attr->key_size;
  318. map->value_size = attr->value_size;
  319. map->max_entries = attr->max_entries;
  320. map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
  321. map->numa_node = bpf_map_attr_numa_node(attr);
  322. }
  323. static int bpf_charge_memlock(struct user_struct *user, u32 pages)
  324. {
  325. unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  326. if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
  327. atomic_long_sub(pages, &user->locked_vm);
  328. return -EPERM;
  329. }
  330. return 0;
  331. }
  332. static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
  333. {
  334. if (user)
  335. atomic_long_sub(pages, &user->locked_vm);
  336. }
  337. int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
  338. {
  339. u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
  340. struct user_struct *user;
  341. int ret;
  342. if (size >= U32_MAX - PAGE_SIZE)
  343. return -E2BIG;
  344. user = get_current_user();
  345. ret = bpf_charge_memlock(user, pages);
  346. if (ret) {
  347. free_uid(user);
  348. return ret;
  349. }
  350. mem->pages = pages;
  351. mem->user = user;
  352. return 0;
  353. }
  354. void bpf_map_charge_finish(struct bpf_map_memory *mem)
  355. {
  356. bpf_uncharge_memlock(mem->user, mem->pages);
  357. free_uid(mem->user);
  358. }
  359. void bpf_map_charge_move(struct bpf_map_memory *dst,
  360. struct bpf_map_memory *src)
  361. {
  362. *dst = *src;
  363. /* Make sure src will not be used for the redundant uncharging. */
  364. memset(src, 0, sizeof(struct bpf_map_memory));
  365. }
  366. int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
  367. {
  368. int ret;
  369. ret = bpf_charge_memlock(map->memory.user, pages);
  370. if (ret)
  371. return ret;
  372. map->memory.pages += pages;
  373. return ret;
  374. }
  375. void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
  376. {
  377. bpf_uncharge_memlock(map->memory.user, pages);
  378. map->memory.pages -= pages;
  379. }
  380. static int bpf_map_alloc_id(struct bpf_map *map)
  381. {
  382. int id;
  383. idr_preload(GFP_KERNEL);
  384. spin_lock_bh(&map_idr_lock);
  385. id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
  386. if (id > 0)
  387. map->id = id;
  388. spin_unlock_bh(&map_idr_lock);
  389. idr_preload_end();
  390. if (WARN_ON_ONCE(!id))
  391. return -ENOSPC;
  392. return id > 0 ? 0 : id;
  393. }
  394. void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
  395. {
  396. unsigned long flags;
  397. /* Offloaded maps are removed from the IDR store when their device
  398. * disappears - even if someone holds an fd to them they are unusable,
  399. * the memory is gone, all ops will fail; they are simply waiting for
  400. * refcnt to drop to be freed.
  401. */
  402. if (!map->id)
  403. return;
  404. if (do_idr_lock)
  405. spin_lock_irqsave(&map_idr_lock, flags);
  406. else
  407. __acquire(&map_idr_lock);
  408. idr_remove(&map_idr, map->id);
  409. map->id = 0;
  410. if (do_idr_lock)
  411. spin_unlock_irqrestore(&map_idr_lock, flags);
  412. else
  413. __release(&map_idr_lock);
  414. }
  415. /* called from workqueue */
  416. static void bpf_map_free_deferred(struct work_struct *work)
  417. {
  418. struct bpf_map *map = container_of(work, struct bpf_map, work);
  419. struct bpf_map_memory mem;
  420. bpf_map_charge_move(&mem, &map->memory);
  421. security_bpf_map_free(map);
  422. /* implementation dependent freeing */
  423. map->ops->map_free(map);
  424. bpf_map_charge_finish(&mem);
  425. }
  426. static void bpf_map_put_uref(struct bpf_map *map)
  427. {
  428. if (atomic64_dec_and_test(&map->usercnt)) {
  429. if (map->ops->map_release_uref)
  430. map->ops->map_release_uref(map);
  431. }
  432. }
  433. /* decrement map refcnt and schedule it for freeing via workqueue
  434. * (unrelying map implementation ops->map_free() might sleep)
  435. */
  436. static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
  437. {
  438. if (atomic64_dec_and_test(&map->refcnt)) {
  439. /* bpf_map_free_id() must be called first */
  440. bpf_map_free_id(map, do_idr_lock);
  441. btf_put(map->btf);
  442. INIT_WORK(&map->work, bpf_map_free_deferred);
  443. schedule_work(&map->work);
  444. }
  445. }
  446. void bpf_map_put(struct bpf_map *map)
  447. {
  448. __bpf_map_put(map, true);
  449. }
  450. EXPORT_SYMBOL_GPL(bpf_map_put);
  451. void bpf_map_put_with_uref(struct bpf_map *map)
  452. {
  453. bpf_map_put_uref(map);
  454. bpf_map_put(map);
  455. }
  456. static int bpf_map_release(struct inode *inode, struct file *filp)
  457. {
  458. struct bpf_map *map = filp->private_data;
  459. if (map->ops->map_release)
  460. map->ops->map_release(map, filp);
  461. bpf_map_put_with_uref(map);
  462. return 0;
  463. }
  464. static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
  465. {
  466. fmode_t mode = f.file->f_mode;
  467. /* Our file permissions may have been overridden by global
  468. * map permissions facing syscall side.
  469. */
  470. if (READ_ONCE(map->frozen))
  471. mode &= ~FMODE_CAN_WRITE;
  472. return mode;
  473. }
  474. #ifdef CONFIG_PROC_FS
  475. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  476. {
  477. const struct bpf_map *map = filp->private_data;
  478. const struct bpf_array *array;
  479. u32 type = 0, jited = 0;
  480. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  481. array = container_of(map, struct bpf_array, map);
  482. spin_lock(&array->aux->owner.lock);
  483. type = array->aux->owner.type;
  484. jited = array->aux->owner.jited;
  485. spin_unlock(&array->aux->owner.lock);
  486. }
  487. seq_printf(m,
  488. "map_type:\t%u\n"
  489. "key_size:\t%u\n"
  490. "value_size:\t%u\n"
  491. "max_entries:\t%u\n"
  492. "map_flags:\t%#x\n"
  493. "memlock:\t%llu\n"
  494. "map_id:\t%u\n"
  495. "frozen:\t%u\n",
  496. map->map_type,
  497. map->key_size,
  498. map->value_size,
  499. map->max_entries,
  500. map->map_flags,
  501. map->memory.pages * 1ULL << PAGE_SHIFT,
  502. map->id,
  503. READ_ONCE(map->frozen));
  504. if (type) {
  505. seq_printf(m, "owner_prog_type:\t%u\n", type);
  506. seq_printf(m, "owner_jited:\t%u\n", jited);
  507. }
  508. }
  509. #endif
  510. static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
  511. loff_t *ppos)
  512. {
  513. /* We need this handler such that alloc_file() enables
  514. * f_mode with FMODE_CAN_READ.
  515. */
  516. return -EINVAL;
  517. }
  518. static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
  519. size_t siz, loff_t *ppos)
  520. {
  521. /* We need this handler such that alloc_file() enables
  522. * f_mode with FMODE_CAN_WRITE.
  523. */
  524. return -EINVAL;
  525. }
  526. /* called for any extra memory-mapped regions (except initial) */
  527. static void bpf_map_mmap_open(struct vm_area_struct *vma)
  528. {
  529. struct bpf_map *map = vma->vm_file->private_data;
  530. if (vma->vm_flags & VM_MAYWRITE)
  531. bpf_map_write_active_inc(map);
  532. }
  533. /* called for all unmapped memory region (including initial) */
  534. static void bpf_map_mmap_close(struct vm_area_struct *vma)
  535. {
  536. struct bpf_map *map = vma->vm_file->private_data;
  537. if (vma->vm_flags & VM_MAYWRITE)
  538. bpf_map_write_active_dec(map);
  539. }
  540. static const struct vm_operations_struct bpf_map_default_vmops = {
  541. .open = bpf_map_mmap_open,
  542. .close = bpf_map_mmap_close,
  543. };
  544. static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
  545. {
  546. struct bpf_map *map = filp->private_data;
  547. int err;
  548. if (!map->ops->map_mmap || map_value_has_spin_lock(map))
  549. return -ENOTSUPP;
  550. if (!(vma->vm_flags & VM_SHARED))
  551. return -EINVAL;
  552. mutex_lock(&map->freeze_mutex);
  553. if (vma->vm_flags & VM_WRITE) {
  554. if (map->frozen) {
  555. err = -EPERM;
  556. goto out;
  557. }
  558. /* map is meant to be read-only, so do not allow mapping as
  559. * writable, because it's possible to leak a writable page
  560. * reference and allows user-space to still modify it after
  561. * freezing, while verifier will assume contents do not change
  562. */
  563. if (map->map_flags & BPF_F_RDONLY_PROG) {
  564. err = -EACCES;
  565. goto out;
  566. }
  567. }
  568. /* set default open/close callbacks */
  569. vma->vm_ops = &bpf_map_default_vmops;
  570. vma->vm_private_data = map;
  571. vma->vm_flags &= ~VM_MAYEXEC;
  572. if (!(vma->vm_flags & VM_WRITE))
  573. /* disallow re-mapping with PROT_WRITE */
  574. vma->vm_flags &= ~VM_MAYWRITE;
  575. err = map->ops->map_mmap(map, vma);
  576. if (err)
  577. goto out;
  578. if (vma->vm_flags & VM_MAYWRITE)
  579. bpf_map_write_active_inc(map);
  580. out:
  581. mutex_unlock(&map->freeze_mutex);
  582. return err;
  583. }
  584. static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
  585. {
  586. struct bpf_map *map = filp->private_data;
  587. if (map->ops->map_poll)
  588. return map->ops->map_poll(map, filp, pts);
  589. return EPOLLERR;
  590. }
  591. const struct file_operations bpf_map_fops = {
  592. #ifdef CONFIG_PROC_FS
  593. .show_fdinfo = bpf_map_show_fdinfo,
  594. #endif
  595. .release = bpf_map_release,
  596. .read = bpf_dummy_read,
  597. .write = bpf_dummy_write,
  598. .mmap = bpf_map_mmap,
  599. .poll = bpf_map_poll,
  600. };
  601. int bpf_map_new_fd(struct bpf_map *map, int flags)
  602. {
  603. int ret;
  604. ret = security_bpf_map(map, OPEN_FMODE(flags));
  605. if (ret < 0)
  606. return ret;
  607. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  608. flags | O_CLOEXEC);
  609. }
  610. int bpf_get_file_flag(int flags)
  611. {
  612. if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
  613. return -EINVAL;
  614. if (flags & BPF_F_RDONLY)
  615. return O_RDONLY;
  616. if (flags & BPF_F_WRONLY)
  617. return O_WRONLY;
  618. return O_RDWR;
  619. }
  620. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  621. #define CHECK_ATTR(CMD) \
  622. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  623. sizeof(attr->CMD##_LAST_FIELD), 0, \
  624. sizeof(*attr) - \
  625. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  626. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  627. /* dst and src must have at least "size" number of bytes.
  628. * Return strlen on success and < 0 on error.
  629. */
  630. int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
  631. {
  632. const char *end = src + size;
  633. const char *orig_src = src;
  634. memset(dst, 0, size);
  635. /* Copy all isalnum(), '_' and '.' chars. */
  636. while (src < end && *src) {
  637. if (!isalnum(*src) &&
  638. *src != '_' && *src != '.')
  639. return -EINVAL;
  640. *dst++ = *src++;
  641. }
  642. /* No '\0' found in "size" number of bytes */
  643. if (src == end)
  644. return -EINVAL;
  645. return src - orig_src;
  646. }
  647. int map_check_no_btf(const struct bpf_map *map,
  648. const struct btf *btf,
  649. const struct btf_type *key_type,
  650. const struct btf_type *value_type)
  651. {
  652. return -ENOTSUPP;
  653. }
  654. static int map_check_btf(struct bpf_map *map, const struct btf *btf,
  655. u32 btf_key_id, u32 btf_value_id)
  656. {
  657. const struct btf_type *key_type, *value_type;
  658. u32 key_size, value_size;
  659. int ret = 0;
  660. /* Some maps allow key to be unspecified. */
  661. if (btf_key_id) {
  662. key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
  663. if (!key_type || key_size != map->key_size)
  664. return -EINVAL;
  665. } else {
  666. key_type = btf_type_by_id(btf, 0);
  667. if (!map->ops->map_check_btf)
  668. return -EINVAL;
  669. }
  670. value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
  671. if (!value_type || value_size != map->value_size)
  672. return -EINVAL;
  673. map->spin_lock_off = btf_find_spin_lock(btf, value_type);
  674. if (map_value_has_spin_lock(map)) {
  675. if (map->map_flags & BPF_F_RDONLY_PROG)
  676. return -EACCES;
  677. if (map->map_type != BPF_MAP_TYPE_HASH &&
  678. map->map_type != BPF_MAP_TYPE_ARRAY &&
  679. map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
  680. map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
  681. map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
  682. return -ENOTSUPP;
  683. if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
  684. map->value_size) {
  685. WARN_ONCE(1,
  686. "verifier bug spin_lock_off %d value_size %d\n",
  687. map->spin_lock_off, map->value_size);
  688. return -EFAULT;
  689. }
  690. }
  691. if (map->ops->map_check_btf)
  692. ret = map->ops->map_check_btf(map, btf, key_type, value_type);
  693. return ret;
  694. }
  695. #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
  696. /* called via syscall */
  697. static int map_create(union bpf_attr *attr)
  698. {
  699. int numa_node = bpf_map_attr_numa_node(attr);
  700. struct bpf_map_memory mem;
  701. struct bpf_map *map;
  702. int f_flags;
  703. int err;
  704. err = CHECK_ATTR(BPF_MAP_CREATE);
  705. if (err)
  706. return -EINVAL;
  707. if (attr->btf_vmlinux_value_type_id) {
  708. if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
  709. attr->btf_key_type_id || attr->btf_value_type_id)
  710. return -EINVAL;
  711. } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
  712. return -EINVAL;
  713. }
  714. f_flags = bpf_get_file_flag(attr->map_flags);
  715. if (f_flags < 0)
  716. return f_flags;
  717. if (numa_node != NUMA_NO_NODE &&
  718. ((unsigned int)numa_node >= nr_node_ids ||
  719. !node_online(numa_node)))
  720. return -EINVAL;
  721. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  722. map = find_and_alloc_map(attr);
  723. if (IS_ERR(map))
  724. return PTR_ERR(map);
  725. err = bpf_obj_name_cpy(map->name, attr->map_name,
  726. sizeof(attr->map_name));
  727. if (err < 0)
  728. goto free_map;
  729. atomic64_set(&map->refcnt, 1);
  730. atomic64_set(&map->usercnt, 1);
  731. mutex_init(&map->freeze_mutex);
  732. map->spin_lock_off = -EINVAL;
  733. if (attr->btf_key_type_id || attr->btf_value_type_id ||
  734. /* Even the map's value is a kernel's struct,
  735. * the bpf_prog.o must have BTF to begin with
  736. * to figure out the corresponding kernel's
  737. * counter part. Thus, attr->btf_fd has
  738. * to be valid also.
  739. */
  740. attr->btf_vmlinux_value_type_id) {
  741. struct btf *btf;
  742. btf = btf_get_by_fd(attr->btf_fd);
  743. if (IS_ERR(btf)) {
  744. err = PTR_ERR(btf);
  745. goto free_map;
  746. }
  747. map->btf = btf;
  748. if (attr->btf_value_type_id) {
  749. err = map_check_btf(map, btf, attr->btf_key_type_id,
  750. attr->btf_value_type_id);
  751. if (err)
  752. goto free_map;
  753. }
  754. map->btf_key_type_id = attr->btf_key_type_id;
  755. map->btf_value_type_id = attr->btf_value_type_id;
  756. map->btf_vmlinux_value_type_id =
  757. attr->btf_vmlinux_value_type_id;
  758. }
  759. err = security_bpf_map_alloc(map);
  760. if (err)
  761. goto free_map;
  762. err = bpf_map_alloc_id(map);
  763. if (err)
  764. goto free_map_sec;
  765. err = bpf_map_new_fd(map, f_flags);
  766. if (err < 0) {
  767. /* failed to allocate fd.
  768. * bpf_map_put_with_uref() is needed because the above
  769. * bpf_map_alloc_id() has published the map
  770. * to the userspace and the userspace may
  771. * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
  772. */
  773. bpf_map_put_with_uref(map);
  774. return err;
  775. }
  776. return err;
  777. free_map_sec:
  778. security_bpf_map_free(map);
  779. free_map:
  780. btf_put(map->btf);
  781. bpf_map_charge_move(&mem, &map->memory);
  782. map->ops->map_free(map);
  783. bpf_map_charge_finish(&mem);
  784. return err;
  785. }
  786. /* if error is returned, fd is released.
  787. * On success caller should complete fd access with matching fdput()
  788. */
  789. struct bpf_map *__bpf_map_get(struct fd f)
  790. {
  791. if (!f.file)
  792. return ERR_PTR(-EBADF);
  793. if (f.file->f_op != &bpf_map_fops) {
  794. fdput(f);
  795. return ERR_PTR(-EINVAL);
  796. }
  797. return f.file->private_data;
  798. }
  799. void bpf_map_inc(struct bpf_map *map)
  800. {
  801. atomic64_inc(&map->refcnt);
  802. }
  803. EXPORT_SYMBOL_GPL(bpf_map_inc);
  804. void bpf_map_inc_with_uref(struct bpf_map *map)
  805. {
  806. atomic64_inc(&map->refcnt);
  807. atomic64_inc(&map->usercnt);
  808. }
  809. EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
  810. struct bpf_map *bpf_map_get(u32 ufd)
  811. {
  812. struct fd f = fdget(ufd);
  813. struct bpf_map *map;
  814. map = __bpf_map_get(f);
  815. if (IS_ERR(map))
  816. return map;
  817. bpf_map_inc(map);
  818. fdput(f);
  819. return map;
  820. }
  821. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  822. {
  823. struct fd f = fdget(ufd);
  824. struct bpf_map *map;
  825. map = __bpf_map_get(f);
  826. if (IS_ERR(map))
  827. return map;
  828. bpf_map_inc_with_uref(map);
  829. fdput(f);
  830. return map;
  831. }
  832. /* map_idr_lock should have been held */
  833. static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
  834. {
  835. int refold;
  836. refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
  837. if (!refold)
  838. return ERR_PTR(-ENOENT);
  839. if (uref)
  840. atomic64_inc(&map->usercnt);
  841. return map;
  842. }
  843. struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
  844. {
  845. spin_lock_bh(&map_idr_lock);
  846. map = __bpf_map_inc_not_zero(map, false);
  847. spin_unlock_bh(&map_idr_lock);
  848. return map;
  849. }
  850. EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
  851. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  852. {
  853. return -ENOTSUPP;
  854. }
  855. static void *__bpf_copy_key(void __user *ukey, u64 key_size)
  856. {
  857. if (key_size)
  858. return memdup_user(ukey, key_size);
  859. if (ukey)
  860. return ERR_PTR(-EINVAL);
  861. return NULL;
  862. }
  863. /* last field in 'union bpf_attr' used by this command */
  864. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
  865. static int map_lookup_elem(union bpf_attr *attr)
  866. {
  867. void __user *ukey = u64_to_user_ptr(attr->key);
  868. void __user *uvalue = u64_to_user_ptr(attr->value);
  869. int ufd = attr->map_fd;
  870. struct bpf_map *map;
  871. void *key, *value;
  872. u32 value_size;
  873. struct fd f;
  874. int err;
  875. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  876. return -EINVAL;
  877. if (attr->flags & ~BPF_F_LOCK)
  878. return -EINVAL;
  879. f = fdget(ufd);
  880. map = __bpf_map_get(f);
  881. if (IS_ERR(map))
  882. return PTR_ERR(map);
  883. if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
  884. err = -EPERM;
  885. goto err_put;
  886. }
  887. if ((attr->flags & BPF_F_LOCK) &&
  888. !map_value_has_spin_lock(map)) {
  889. err = -EINVAL;
  890. goto err_put;
  891. }
  892. key = __bpf_copy_key(ukey, map->key_size);
  893. if (IS_ERR(key)) {
  894. err = PTR_ERR(key);
  895. goto err_put;
  896. }
  897. value_size = bpf_map_value_size(map);
  898. err = -ENOMEM;
  899. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  900. if (!value)
  901. goto free_key;
  902. err = bpf_map_copy_value(map, key, value, attr->flags);
  903. if (err)
  904. goto free_value;
  905. err = -EFAULT;
  906. if (copy_to_user(uvalue, value, value_size) != 0)
  907. goto free_value;
  908. err = 0;
  909. free_value:
  910. kfree(value);
  911. free_key:
  912. kfree(key);
  913. err_put:
  914. fdput(f);
  915. return err;
  916. }
  917. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  918. static int map_update_elem(union bpf_attr *attr)
  919. {
  920. void __user *ukey = u64_to_user_ptr(attr->key);
  921. void __user *uvalue = u64_to_user_ptr(attr->value);
  922. int ufd = attr->map_fd;
  923. struct bpf_map *map;
  924. void *key, *value;
  925. u32 value_size;
  926. struct fd f;
  927. int err;
  928. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  929. return -EINVAL;
  930. f = fdget(ufd);
  931. map = __bpf_map_get(f);
  932. if (IS_ERR(map))
  933. return PTR_ERR(map);
  934. bpf_map_write_active_inc(map);
  935. if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
  936. err = -EPERM;
  937. goto err_put;
  938. }
  939. if ((attr->flags & BPF_F_LOCK) &&
  940. !map_value_has_spin_lock(map)) {
  941. err = -EINVAL;
  942. goto err_put;
  943. }
  944. key = __bpf_copy_key(ukey, map->key_size);
  945. if (IS_ERR(key)) {
  946. err = PTR_ERR(key);
  947. goto err_put;
  948. }
  949. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  950. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  951. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
  952. map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
  953. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  954. else
  955. value_size = map->value_size;
  956. err = -ENOMEM;
  957. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  958. if (!value)
  959. goto free_key;
  960. err = -EFAULT;
  961. if (copy_from_user(value, uvalue, value_size) != 0)
  962. goto free_value;
  963. err = bpf_map_update_value(map, f, key, value, attr->flags);
  964. free_value:
  965. kfree(value);
  966. free_key:
  967. kfree(key);
  968. err_put:
  969. bpf_map_write_active_dec(map);
  970. fdput(f);
  971. return err;
  972. }
  973. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  974. static int map_delete_elem(union bpf_attr *attr)
  975. {
  976. void __user *ukey = u64_to_user_ptr(attr->key);
  977. int ufd = attr->map_fd;
  978. struct bpf_map *map;
  979. struct fd f;
  980. void *key;
  981. int err;
  982. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  983. return -EINVAL;
  984. f = fdget(ufd);
  985. map = __bpf_map_get(f);
  986. if (IS_ERR(map))
  987. return PTR_ERR(map);
  988. bpf_map_write_active_inc(map);
  989. if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
  990. err = -EPERM;
  991. goto err_put;
  992. }
  993. key = __bpf_copy_key(ukey, map->key_size);
  994. if (IS_ERR(key)) {
  995. err = PTR_ERR(key);
  996. goto err_put;
  997. }
  998. if (bpf_map_is_dev_bound(map)) {
  999. err = bpf_map_offload_delete_elem(map, key);
  1000. goto out;
  1001. } else if (IS_FD_PROG_ARRAY(map) ||
  1002. map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
  1003. /* These maps require sleepable context */
  1004. err = map->ops->map_delete_elem(map, key);
  1005. goto out;
  1006. }
  1007. bpf_disable_instrumentation();
  1008. rcu_read_lock();
  1009. err = map->ops->map_delete_elem(map, key);
  1010. rcu_read_unlock();
  1011. bpf_enable_instrumentation();
  1012. maybe_wait_bpf_programs(map);
  1013. out:
  1014. kfree(key);
  1015. err_put:
  1016. bpf_map_write_active_dec(map);
  1017. fdput(f);
  1018. return err;
  1019. }
  1020. /* last field in 'union bpf_attr' used by this command */
  1021. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  1022. static int map_get_next_key(union bpf_attr *attr)
  1023. {
  1024. void __user *ukey = u64_to_user_ptr(attr->key);
  1025. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  1026. int ufd = attr->map_fd;
  1027. struct bpf_map *map;
  1028. void *key, *next_key;
  1029. struct fd f;
  1030. int err;
  1031. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  1032. return -EINVAL;
  1033. f = fdget(ufd);
  1034. map = __bpf_map_get(f);
  1035. if (IS_ERR(map))
  1036. return PTR_ERR(map);
  1037. if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
  1038. err = -EPERM;
  1039. goto err_put;
  1040. }
  1041. if (ukey) {
  1042. key = __bpf_copy_key(ukey, map->key_size);
  1043. if (IS_ERR(key)) {
  1044. err = PTR_ERR(key);
  1045. goto err_put;
  1046. }
  1047. } else {
  1048. key = NULL;
  1049. }
  1050. err = -ENOMEM;
  1051. next_key = kmalloc(map->key_size, GFP_USER);
  1052. if (!next_key)
  1053. goto free_key;
  1054. if (bpf_map_is_dev_bound(map)) {
  1055. err = bpf_map_offload_get_next_key(map, key, next_key);
  1056. goto out;
  1057. }
  1058. rcu_read_lock();
  1059. err = map->ops->map_get_next_key(map, key, next_key);
  1060. rcu_read_unlock();
  1061. out:
  1062. if (err)
  1063. goto free_next_key;
  1064. err = -EFAULT;
  1065. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  1066. goto free_next_key;
  1067. err = 0;
  1068. free_next_key:
  1069. kfree(next_key);
  1070. free_key:
  1071. kfree(key);
  1072. err_put:
  1073. fdput(f);
  1074. return err;
  1075. }
  1076. int generic_map_delete_batch(struct bpf_map *map,
  1077. const union bpf_attr *attr,
  1078. union bpf_attr __user *uattr)
  1079. {
  1080. void __user *keys = u64_to_user_ptr(attr->batch.keys);
  1081. u32 cp, max_count;
  1082. int err = 0;
  1083. void *key;
  1084. if (attr->batch.elem_flags & ~BPF_F_LOCK)
  1085. return -EINVAL;
  1086. if ((attr->batch.elem_flags & BPF_F_LOCK) &&
  1087. !map_value_has_spin_lock(map)) {
  1088. return -EINVAL;
  1089. }
  1090. max_count = attr->batch.count;
  1091. if (!max_count)
  1092. return 0;
  1093. key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
  1094. if (!key)
  1095. return -ENOMEM;
  1096. for (cp = 0; cp < max_count; cp++) {
  1097. err = -EFAULT;
  1098. if (copy_from_user(key, keys + cp * map->key_size,
  1099. map->key_size))
  1100. break;
  1101. if (bpf_map_is_dev_bound(map)) {
  1102. err = bpf_map_offload_delete_elem(map, key);
  1103. break;
  1104. }
  1105. bpf_disable_instrumentation();
  1106. rcu_read_lock();
  1107. err = map->ops->map_delete_elem(map, key);
  1108. rcu_read_unlock();
  1109. bpf_enable_instrumentation();
  1110. maybe_wait_bpf_programs(map);
  1111. if (err)
  1112. break;
  1113. cond_resched();
  1114. }
  1115. if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
  1116. err = -EFAULT;
  1117. kfree(key);
  1118. return err;
  1119. }
  1120. int generic_map_update_batch(struct bpf_map *map,
  1121. const union bpf_attr *attr,
  1122. union bpf_attr __user *uattr)
  1123. {
  1124. void __user *values = u64_to_user_ptr(attr->batch.values);
  1125. void __user *keys = u64_to_user_ptr(attr->batch.keys);
  1126. u32 value_size, cp, max_count;
  1127. int ufd = attr->batch.map_fd;
  1128. void *key, *value;
  1129. struct fd f;
  1130. int err = 0;
  1131. if (attr->batch.elem_flags & ~BPF_F_LOCK)
  1132. return -EINVAL;
  1133. if ((attr->batch.elem_flags & BPF_F_LOCK) &&
  1134. !map_value_has_spin_lock(map)) {
  1135. return -EINVAL;
  1136. }
  1137. value_size = bpf_map_value_size(map);
  1138. max_count = attr->batch.count;
  1139. if (!max_count)
  1140. return 0;
  1141. key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
  1142. if (!key)
  1143. return -ENOMEM;
  1144. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  1145. if (!value) {
  1146. kfree(key);
  1147. return -ENOMEM;
  1148. }
  1149. f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
  1150. for (cp = 0; cp < max_count; cp++) {
  1151. err = -EFAULT;
  1152. if (copy_from_user(key, keys + cp * map->key_size,
  1153. map->key_size) ||
  1154. copy_from_user(value, values + cp * value_size, value_size))
  1155. break;
  1156. err = bpf_map_update_value(map, f, key, value,
  1157. attr->batch.elem_flags);
  1158. if (err)
  1159. break;
  1160. cond_resched();
  1161. }
  1162. if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
  1163. err = -EFAULT;
  1164. kfree(value);
  1165. kfree(key);
  1166. fdput(f);
  1167. return err;
  1168. }
  1169. #define MAP_LOOKUP_RETRIES 3
  1170. int generic_map_lookup_batch(struct bpf_map *map,
  1171. const union bpf_attr *attr,
  1172. union bpf_attr __user *uattr)
  1173. {
  1174. void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
  1175. void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
  1176. void __user *values = u64_to_user_ptr(attr->batch.values);
  1177. void __user *keys = u64_to_user_ptr(attr->batch.keys);
  1178. void *buf, *buf_prevkey, *prev_key, *key, *value;
  1179. int err, retry = MAP_LOOKUP_RETRIES;
  1180. u32 value_size, cp, max_count;
  1181. if (attr->batch.elem_flags & ~BPF_F_LOCK)
  1182. return -EINVAL;
  1183. if ((attr->batch.elem_flags & BPF_F_LOCK) &&
  1184. !map_value_has_spin_lock(map))
  1185. return -EINVAL;
  1186. value_size = bpf_map_value_size(map);
  1187. max_count = attr->batch.count;
  1188. if (!max_count)
  1189. return 0;
  1190. if (put_user(0, &uattr->batch.count))
  1191. return -EFAULT;
  1192. buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
  1193. if (!buf_prevkey)
  1194. return -ENOMEM;
  1195. buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
  1196. if (!buf) {
  1197. kfree(buf_prevkey);
  1198. return -ENOMEM;
  1199. }
  1200. err = -EFAULT;
  1201. prev_key = NULL;
  1202. if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
  1203. goto free_buf;
  1204. key = buf;
  1205. value = key + map->key_size;
  1206. if (ubatch)
  1207. prev_key = buf_prevkey;
  1208. for (cp = 0; cp < max_count;) {
  1209. rcu_read_lock();
  1210. err = map->ops->map_get_next_key(map, prev_key, key);
  1211. rcu_read_unlock();
  1212. if (err)
  1213. break;
  1214. err = bpf_map_copy_value(map, key, value,
  1215. attr->batch.elem_flags);
  1216. if (err == -ENOENT) {
  1217. if (retry) {
  1218. retry--;
  1219. continue;
  1220. }
  1221. err = -EINTR;
  1222. break;
  1223. }
  1224. if (err)
  1225. goto free_buf;
  1226. if (copy_to_user(keys + cp * map->key_size, key,
  1227. map->key_size)) {
  1228. err = -EFAULT;
  1229. goto free_buf;
  1230. }
  1231. if (copy_to_user(values + cp * value_size, value, value_size)) {
  1232. err = -EFAULT;
  1233. goto free_buf;
  1234. }
  1235. if (!prev_key)
  1236. prev_key = buf_prevkey;
  1237. swap(prev_key, key);
  1238. retry = MAP_LOOKUP_RETRIES;
  1239. cp++;
  1240. cond_resched();
  1241. }
  1242. if (err == -EFAULT)
  1243. goto free_buf;
  1244. if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
  1245. (cp && copy_to_user(uobatch, prev_key, map->key_size))))
  1246. err = -EFAULT;
  1247. free_buf:
  1248. kfree(buf_prevkey);
  1249. kfree(buf);
  1250. return err;
  1251. }
  1252. #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
  1253. static int map_lookup_and_delete_elem(union bpf_attr *attr)
  1254. {
  1255. void __user *ukey = u64_to_user_ptr(attr->key);
  1256. void __user *uvalue = u64_to_user_ptr(attr->value);
  1257. int ufd = attr->map_fd;
  1258. struct bpf_map *map;
  1259. void *key, *value;
  1260. u32 value_size;
  1261. struct fd f;
  1262. int err;
  1263. if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
  1264. return -EINVAL;
  1265. f = fdget(ufd);
  1266. map = __bpf_map_get(f);
  1267. if (IS_ERR(map))
  1268. return PTR_ERR(map);
  1269. bpf_map_write_active_inc(map);
  1270. if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
  1271. !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
  1272. err = -EPERM;
  1273. goto err_put;
  1274. }
  1275. key = __bpf_copy_key(ukey, map->key_size);
  1276. if (IS_ERR(key)) {
  1277. err = PTR_ERR(key);
  1278. goto err_put;
  1279. }
  1280. value_size = map->value_size;
  1281. err = -ENOMEM;
  1282. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  1283. if (!value)
  1284. goto free_key;
  1285. if (map->map_type == BPF_MAP_TYPE_QUEUE ||
  1286. map->map_type == BPF_MAP_TYPE_STACK) {
  1287. err = map->ops->map_pop_elem(map, value);
  1288. } else {
  1289. err = -ENOTSUPP;
  1290. }
  1291. if (err)
  1292. goto free_value;
  1293. if (copy_to_user(uvalue, value, value_size) != 0) {
  1294. err = -EFAULT;
  1295. goto free_value;
  1296. }
  1297. err = 0;
  1298. free_value:
  1299. kfree(value);
  1300. free_key:
  1301. kfree(key);
  1302. err_put:
  1303. bpf_map_write_active_dec(map);
  1304. fdput(f);
  1305. return err;
  1306. }
  1307. #define BPF_MAP_FREEZE_LAST_FIELD map_fd
  1308. static int map_freeze(const union bpf_attr *attr)
  1309. {
  1310. int err = 0, ufd = attr->map_fd;
  1311. struct bpf_map *map;
  1312. struct fd f;
  1313. if (CHECK_ATTR(BPF_MAP_FREEZE))
  1314. return -EINVAL;
  1315. f = fdget(ufd);
  1316. map = __bpf_map_get(f);
  1317. if (IS_ERR(map))
  1318. return PTR_ERR(map);
  1319. if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
  1320. fdput(f);
  1321. return -ENOTSUPP;
  1322. }
  1323. mutex_lock(&map->freeze_mutex);
  1324. if (bpf_map_write_active(map)) {
  1325. err = -EBUSY;
  1326. goto err_put;
  1327. }
  1328. if (READ_ONCE(map->frozen)) {
  1329. err = -EBUSY;
  1330. goto err_put;
  1331. }
  1332. if (!bpf_capable()) {
  1333. err = -EPERM;
  1334. goto err_put;
  1335. }
  1336. WRITE_ONCE(map->frozen, true);
  1337. err_put:
  1338. mutex_unlock(&map->freeze_mutex);
  1339. fdput(f);
  1340. return err;
  1341. }
  1342. static const struct bpf_prog_ops * const bpf_prog_types[] = {
  1343. #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
  1344. [_id] = & _name ## _prog_ops,
  1345. #define BPF_MAP_TYPE(_id, _ops)
  1346. #define BPF_LINK_TYPE(_id, _name)
  1347. #include <linux/bpf_types.h>
  1348. #undef BPF_PROG_TYPE
  1349. #undef BPF_MAP_TYPE
  1350. #undef BPF_LINK_TYPE
  1351. };
  1352. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  1353. {
  1354. const struct bpf_prog_ops *ops;
  1355. if (type >= ARRAY_SIZE(bpf_prog_types))
  1356. return -EINVAL;
  1357. type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
  1358. ops = bpf_prog_types[type];
  1359. if (!ops)
  1360. return -EINVAL;
  1361. if (!bpf_prog_is_dev_bound(prog->aux))
  1362. prog->aux->ops = ops;
  1363. else
  1364. prog->aux->ops = &bpf_offload_prog_ops;
  1365. prog->type = type;
  1366. return 0;
  1367. }
  1368. enum bpf_audit {
  1369. BPF_AUDIT_LOAD,
  1370. BPF_AUDIT_UNLOAD,
  1371. BPF_AUDIT_MAX,
  1372. };
  1373. static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
  1374. [BPF_AUDIT_LOAD] = "LOAD",
  1375. [BPF_AUDIT_UNLOAD] = "UNLOAD",
  1376. };
  1377. static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
  1378. {
  1379. struct audit_context *ctx = NULL;
  1380. struct audit_buffer *ab;
  1381. if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
  1382. return;
  1383. if (audit_enabled == AUDIT_OFF)
  1384. return;
  1385. if (op == BPF_AUDIT_LOAD)
  1386. ctx = audit_context();
  1387. ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
  1388. if (unlikely(!ab))
  1389. return;
  1390. audit_log_format(ab, "prog-id=%u op=%s",
  1391. prog->aux->id, bpf_audit_str[op]);
  1392. audit_log_end(ab);
  1393. }
  1394. int __bpf_prog_charge(struct user_struct *user, u32 pages)
  1395. {
  1396. unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  1397. unsigned long user_bufs;
  1398. if (user) {
  1399. user_bufs = atomic_long_add_return(pages, &user->locked_vm);
  1400. if (user_bufs > memlock_limit) {
  1401. atomic_long_sub(pages, &user->locked_vm);
  1402. return -EPERM;
  1403. }
  1404. }
  1405. return 0;
  1406. }
  1407. void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  1408. {
  1409. if (user)
  1410. atomic_long_sub(pages, &user->locked_vm);
  1411. }
  1412. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  1413. {
  1414. struct user_struct *user = get_current_user();
  1415. int ret;
  1416. ret = __bpf_prog_charge(user, prog->pages);
  1417. if (ret) {
  1418. free_uid(user);
  1419. return ret;
  1420. }
  1421. prog->aux->user = user;
  1422. return 0;
  1423. }
  1424. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  1425. {
  1426. struct user_struct *user = prog->aux->user;
  1427. __bpf_prog_uncharge(user, prog->pages);
  1428. free_uid(user);
  1429. }
  1430. static int bpf_prog_alloc_id(struct bpf_prog *prog)
  1431. {
  1432. int id;
  1433. idr_preload(GFP_KERNEL);
  1434. spin_lock_bh(&prog_idr_lock);
  1435. id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
  1436. if (id > 0)
  1437. prog->aux->id = id;
  1438. spin_unlock_bh(&prog_idr_lock);
  1439. idr_preload_end();
  1440. /* id is in [1, INT_MAX) */
  1441. if (WARN_ON_ONCE(!id))
  1442. return -ENOSPC;
  1443. return id > 0 ? 0 : id;
  1444. }
  1445. void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
  1446. {
  1447. /* cBPF to eBPF migrations are currently not in the idr store.
  1448. * Offloaded programs are removed from the store when their device
  1449. * disappears - even if someone grabs an fd to them they are unusable,
  1450. * simply waiting for refcnt to drop to be freed.
  1451. */
  1452. if (!prog->aux->id)
  1453. return;
  1454. if (do_idr_lock)
  1455. spin_lock_bh(&prog_idr_lock);
  1456. else
  1457. __acquire(&prog_idr_lock);
  1458. idr_remove(&prog_idr, prog->aux->id);
  1459. prog->aux->id = 0;
  1460. if (do_idr_lock)
  1461. spin_unlock_bh(&prog_idr_lock);
  1462. else
  1463. __release(&prog_idr_lock);
  1464. }
  1465. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  1466. {
  1467. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  1468. kvfree(aux->func_info);
  1469. kfree(aux->func_info_aux);
  1470. bpf_prog_uncharge_memlock(aux->prog);
  1471. security_bpf_prog_free(aux);
  1472. bpf_prog_free(aux->prog);
  1473. }
  1474. static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
  1475. {
  1476. bpf_prog_kallsyms_del_all(prog);
  1477. btf_put(prog->aux->btf);
  1478. bpf_prog_free_linfo(prog);
  1479. if (deferred) {
  1480. if (prog->aux->sleepable)
  1481. call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
  1482. else
  1483. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  1484. } else {
  1485. __bpf_prog_put_rcu(&prog->aux->rcu);
  1486. }
  1487. }
  1488. static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
  1489. {
  1490. if (atomic64_dec_and_test(&prog->aux->refcnt)) {
  1491. perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
  1492. bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
  1493. /* bpf_prog_free_id() must be called first */
  1494. bpf_prog_free_id(prog, do_idr_lock);
  1495. __bpf_prog_put_noref(prog, true);
  1496. }
  1497. }
  1498. void bpf_prog_put(struct bpf_prog *prog)
  1499. {
  1500. __bpf_prog_put(prog, true);
  1501. }
  1502. EXPORT_SYMBOL_GPL(bpf_prog_put);
  1503. static int bpf_prog_release(struct inode *inode, struct file *filp)
  1504. {
  1505. struct bpf_prog *prog = filp->private_data;
  1506. bpf_prog_put(prog);
  1507. return 0;
  1508. }
  1509. static void bpf_prog_get_stats(const struct bpf_prog *prog,
  1510. struct bpf_prog_stats *stats)
  1511. {
  1512. u64 nsecs = 0, cnt = 0;
  1513. int cpu;
  1514. for_each_possible_cpu(cpu) {
  1515. const struct bpf_prog_stats *st;
  1516. unsigned int start;
  1517. u64 tnsecs, tcnt;
  1518. st = per_cpu_ptr(prog->aux->stats, cpu);
  1519. do {
  1520. start = u64_stats_fetch_begin_irq(&st->syncp);
  1521. tnsecs = st->nsecs;
  1522. tcnt = st->cnt;
  1523. } while (u64_stats_fetch_retry_irq(&st->syncp, start));
  1524. nsecs += tnsecs;
  1525. cnt += tcnt;
  1526. }
  1527. stats->nsecs = nsecs;
  1528. stats->cnt = cnt;
  1529. }
  1530. #ifdef CONFIG_PROC_FS
  1531. static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
  1532. {
  1533. const struct bpf_prog *prog = filp->private_data;
  1534. char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
  1535. struct bpf_prog_stats stats;
  1536. bpf_prog_get_stats(prog, &stats);
  1537. bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
  1538. seq_printf(m,
  1539. "prog_type:\t%u\n"
  1540. "prog_jited:\t%u\n"
  1541. "prog_tag:\t%s\n"
  1542. "memlock:\t%llu\n"
  1543. "prog_id:\t%u\n"
  1544. "run_time_ns:\t%llu\n"
  1545. "run_cnt:\t%llu\n",
  1546. prog->type,
  1547. prog->jited,
  1548. prog_tag,
  1549. prog->pages * 1ULL << PAGE_SHIFT,
  1550. prog->aux->id,
  1551. stats.nsecs,
  1552. stats.cnt);
  1553. }
  1554. #endif
  1555. const struct file_operations bpf_prog_fops = {
  1556. #ifdef CONFIG_PROC_FS
  1557. .show_fdinfo = bpf_prog_show_fdinfo,
  1558. #endif
  1559. .release = bpf_prog_release,
  1560. .read = bpf_dummy_read,
  1561. .write = bpf_dummy_write,
  1562. };
  1563. int bpf_prog_new_fd(struct bpf_prog *prog)
  1564. {
  1565. int ret;
  1566. ret = security_bpf_prog(prog);
  1567. if (ret < 0)
  1568. return ret;
  1569. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  1570. O_RDWR | O_CLOEXEC);
  1571. }
  1572. static struct bpf_prog *____bpf_prog_get(struct fd f)
  1573. {
  1574. if (!f.file)
  1575. return ERR_PTR(-EBADF);
  1576. if (f.file->f_op != &bpf_prog_fops) {
  1577. fdput(f);
  1578. return ERR_PTR(-EINVAL);
  1579. }
  1580. return f.file->private_data;
  1581. }
  1582. void bpf_prog_add(struct bpf_prog *prog, int i)
  1583. {
  1584. atomic64_add(i, &prog->aux->refcnt);
  1585. }
  1586. EXPORT_SYMBOL_GPL(bpf_prog_add);
  1587. void bpf_prog_sub(struct bpf_prog *prog, int i)
  1588. {
  1589. /* Only to be used for undoing previous bpf_prog_add() in some
  1590. * error path. We still know that another entity in our call
  1591. * path holds a reference to the program, thus atomic_sub() can
  1592. * be safely used in such cases!
  1593. */
  1594. WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
  1595. }
  1596. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  1597. void bpf_prog_inc(struct bpf_prog *prog)
  1598. {
  1599. atomic64_inc(&prog->aux->refcnt);
  1600. }
  1601. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  1602. /* prog_idr_lock should have been held */
  1603. struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
  1604. {
  1605. int refold;
  1606. refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
  1607. if (!refold)
  1608. return ERR_PTR(-ENOENT);
  1609. return prog;
  1610. }
  1611. EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
  1612. bool bpf_prog_get_ok(struct bpf_prog *prog,
  1613. enum bpf_prog_type *attach_type, bool attach_drv)
  1614. {
  1615. /* not an attachment, just a refcount inc, always allow */
  1616. if (!attach_type)
  1617. return true;
  1618. if (prog->type != *attach_type)
  1619. return false;
  1620. if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
  1621. return false;
  1622. return true;
  1623. }
  1624. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
  1625. bool attach_drv)
  1626. {
  1627. struct fd f = fdget(ufd);
  1628. struct bpf_prog *prog;
  1629. prog = ____bpf_prog_get(f);
  1630. if (IS_ERR(prog))
  1631. return prog;
  1632. if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
  1633. prog = ERR_PTR(-EINVAL);
  1634. goto out;
  1635. }
  1636. bpf_prog_inc(prog);
  1637. out:
  1638. fdput(f);
  1639. return prog;
  1640. }
  1641. struct bpf_prog *bpf_prog_get(u32 ufd)
  1642. {
  1643. return __bpf_prog_get(ufd, NULL, false);
  1644. }
  1645. struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
  1646. bool attach_drv)
  1647. {
  1648. return __bpf_prog_get(ufd, &type, attach_drv);
  1649. }
  1650. EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
  1651. /* Initially all BPF programs could be loaded w/o specifying
  1652. * expected_attach_type. Later for some of them specifying expected_attach_type
  1653. * at load time became required so that program could be validated properly.
  1654. * Programs of types that are allowed to be loaded both w/ and w/o (for
  1655. * backward compatibility) expected_attach_type, should have the default attach
  1656. * type assigned to expected_attach_type for the latter case, so that it can be
  1657. * validated later at attach time.
  1658. *
  1659. * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
  1660. * prog type requires it but has some attach types that have to be backward
  1661. * compatible.
  1662. */
  1663. static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
  1664. {
  1665. switch (attr->prog_type) {
  1666. case BPF_PROG_TYPE_CGROUP_SOCK:
  1667. /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
  1668. * exist so checking for non-zero is the way to go here.
  1669. */
  1670. if (!attr->expected_attach_type)
  1671. attr->expected_attach_type =
  1672. BPF_CGROUP_INET_SOCK_CREATE;
  1673. break;
  1674. }
  1675. }
  1676. static int
  1677. bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
  1678. enum bpf_attach_type expected_attach_type,
  1679. u32 btf_id, u32 prog_fd)
  1680. {
  1681. if (btf_id) {
  1682. if (btf_id > BTF_MAX_TYPE)
  1683. return -EINVAL;
  1684. switch (prog_type) {
  1685. case BPF_PROG_TYPE_TRACING:
  1686. case BPF_PROG_TYPE_LSM:
  1687. case BPF_PROG_TYPE_STRUCT_OPS:
  1688. case BPF_PROG_TYPE_EXT:
  1689. break;
  1690. default:
  1691. return -EINVAL;
  1692. }
  1693. }
  1694. if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
  1695. prog_type != BPF_PROG_TYPE_EXT)
  1696. return -EINVAL;
  1697. switch (prog_type) {
  1698. case BPF_PROG_TYPE_CGROUP_SOCK:
  1699. switch (expected_attach_type) {
  1700. case BPF_CGROUP_INET_SOCK_CREATE:
  1701. case BPF_CGROUP_INET_SOCK_RELEASE:
  1702. case BPF_CGROUP_INET4_POST_BIND:
  1703. case BPF_CGROUP_INET6_POST_BIND:
  1704. return 0;
  1705. default:
  1706. return -EINVAL;
  1707. }
  1708. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  1709. switch (expected_attach_type) {
  1710. case BPF_CGROUP_INET4_BIND:
  1711. case BPF_CGROUP_INET6_BIND:
  1712. case BPF_CGROUP_INET4_CONNECT:
  1713. case BPF_CGROUP_INET6_CONNECT:
  1714. case BPF_CGROUP_INET4_GETPEERNAME:
  1715. case BPF_CGROUP_INET6_GETPEERNAME:
  1716. case BPF_CGROUP_INET4_GETSOCKNAME:
  1717. case BPF_CGROUP_INET6_GETSOCKNAME:
  1718. case BPF_CGROUP_UDP4_SENDMSG:
  1719. case BPF_CGROUP_UDP6_SENDMSG:
  1720. case BPF_CGROUP_UDP4_RECVMSG:
  1721. case BPF_CGROUP_UDP6_RECVMSG:
  1722. return 0;
  1723. default:
  1724. return -EINVAL;
  1725. }
  1726. case BPF_PROG_TYPE_CGROUP_SKB:
  1727. switch (expected_attach_type) {
  1728. case BPF_CGROUP_INET_INGRESS:
  1729. case BPF_CGROUP_INET_EGRESS:
  1730. return 0;
  1731. default:
  1732. return -EINVAL;
  1733. }
  1734. case BPF_PROG_TYPE_CGROUP_SOCKOPT:
  1735. switch (expected_attach_type) {
  1736. case BPF_CGROUP_SETSOCKOPT:
  1737. case BPF_CGROUP_GETSOCKOPT:
  1738. return 0;
  1739. default:
  1740. return -EINVAL;
  1741. }
  1742. case BPF_PROG_TYPE_SK_LOOKUP:
  1743. if (expected_attach_type == BPF_SK_LOOKUP)
  1744. return 0;
  1745. return -EINVAL;
  1746. case BPF_PROG_TYPE_EXT:
  1747. if (expected_attach_type)
  1748. return -EINVAL;
  1749. fallthrough;
  1750. default:
  1751. return 0;
  1752. }
  1753. }
  1754. static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
  1755. {
  1756. switch (prog_type) {
  1757. case BPF_PROG_TYPE_SCHED_CLS:
  1758. case BPF_PROG_TYPE_SCHED_ACT:
  1759. case BPF_PROG_TYPE_XDP:
  1760. case BPF_PROG_TYPE_LWT_IN:
  1761. case BPF_PROG_TYPE_LWT_OUT:
  1762. case BPF_PROG_TYPE_LWT_XMIT:
  1763. case BPF_PROG_TYPE_LWT_SEG6LOCAL:
  1764. case BPF_PROG_TYPE_SK_SKB:
  1765. case BPF_PROG_TYPE_SK_MSG:
  1766. case BPF_PROG_TYPE_LIRC_MODE2:
  1767. case BPF_PROG_TYPE_FLOW_DISSECTOR:
  1768. case BPF_PROG_TYPE_CGROUP_DEVICE:
  1769. case BPF_PROG_TYPE_CGROUP_SOCK:
  1770. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  1771. case BPF_PROG_TYPE_CGROUP_SOCKOPT:
  1772. case BPF_PROG_TYPE_CGROUP_SYSCTL:
  1773. case BPF_PROG_TYPE_SOCK_OPS:
  1774. case BPF_PROG_TYPE_EXT: /* extends any prog */
  1775. return true;
  1776. case BPF_PROG_TYPE_CGROUP_SKB:
  1777. /* always unpriv */
  1778. case BPF_PROG_TYPE_SK_REUSEPORT:
  1779. /* equivalent to SOCKET_FILTER. need CAP_BPF only */
  1780. default:
  1781. return false;
  1782. }
  1783. }
  1784. static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
  1785. {
  1786. switch (prog_type) {
  1787. case BPF_PROG_TYPE_KPROBE:
  1788. case BPF_PROG_TYPE_TRACEPOINT:
  1789. case BPF_PROG_TYPE_PERF_EVENT:
  1790. case BPF_PROG_TYPE_RAW_TRACEPOINT:
  1791. case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
  1792. case BPF_PROG_TYPE_TRACING:
  1793. case BPF_PROG_TYPE_LSM:
  1794. case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
  1795. case BPF_PROG_TYPE_EXT: /* extends any prog */
  1796. return true;
  1797. default:
  1798. return false;
  1799. }
  1800. }
  1801. /* last field in 'union bpf_attr' used by this command */
  1802. #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
  1803. static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
  1804. {
  1805. enum bpf_prog_type type = attr->prog_type;
  1806. struct bpf_prog *prog;
  1807. int err;
  1808. char license[128];
  1809. bool is_gpl;
  1810. if (CHECK_ATTR(BPF_PROG_LOAD))
  1811. return -EINVAL;
  1812. if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
  1813. BPF_F_ANY_ALIGNMENT |
  1814. BPF_F_TEST_STATE_FREQ |
  1815. BPF_F_SLEEPABLE |
  1816. BPF_F_TEST_RND_HI32))
  1817. return -EINVAL;
  1818. if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
  1819. (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
  1820. !bpf_capable())
  1821. return -EPERM;
  1822. /* copy eBPF program license from user space */
  1823. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  1824. sizeof(license) - 1) < 0)
  1825. return -EFAULT;
  1826. license[sizeof(license) - 1] = 0;
  1827. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  1828. is_gpl = license_is_gpl_compatible(license);
  1829. if (attr->insn_cnt == 0 ||
  1830. attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
  1831. return -E2BIG;
  1832. if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
  1833. type != BPF_PROG_TYPE_CGROUP_SKB &&
  1834. !bpf_capable())
  1835. return -EPERM;
  1836. if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
  1837. return -EPERM;
  1838. if (is_perfmon_prog_type(type) && !perfmon_capable())
  1839. return -EPERM;
  1840. bpf_prog_load_fixup_attach_type(attr);
  1841. if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
  1842. attr->attach_btf_id,
  1843. attr->attach_prog_fd))
  1844. return -EINVAL;
  1845. /* plain bpf_prog allocation */
  1846. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  1847. if (!prog)
  1848. return -ENOMEM;
  1849. prog->expected_attach_type = attr->expected_attach_type;
  1850. prog->aux->attach_btf_id = attr->attach_btf_id;
  1851. if (attr->attach_prog_fd) {
  1852. struct bpf_prog *dst_prog;
  1853. dst_prog = bpf_prog_get(attr->attach_prog_fd);
  1854. if (IS_ERR(dst_prog)) {
  1855. err = PTR_ERR(dst_prog);
  1856. goto free_prog_nouncharge;
  1857. }
  1858. prog->aux->dst_prog = dst_prog;
  1859. }
  1860. prog->aux->offload_requested = !!attr->prog_ifindex;
  1861. prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
  1862. err = security_bpf_prog_alloc(prog->aux);
  1863. if (err)
  1864. goto free_prog_nouncharge;
  1865. err = bpf_prog_charge_memlock(prog);
  1866. if (err)
  1867. goto free_prog_sec;
  1868. prog->len = attr->insn_cnt;
  1869. err = -EFAULT;
  1870. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  1871. bpf_prog_insn_size(prog)) != 0)
  1872. goto free_prog;
  1873. prog->orig_prog = NULL;
  1874. prog->jited = 0;
  1875. atomic64_set(&prog->aux->refcnt, 1);
  1876. prog->gpl_compatible = is_gpl ? 1 : 0;
  1877. if (bpf_prog_is_dev_bound(prog->aux)) {
  1878. err = bpf_prog_offload_init(prog, attr);
  1879. if (err)
  1880. goto free_prog;
  1881. }
  1882. /* find program type: socket_filter vs tracing_filter */
  1883. err = find_prog_type(type, prog);
  1884. if (err < 0)
  1885. goto free_prog;
  1886. prog->aux->load_time = ktime_get_boottime_ns();
  1887. err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
  1888. sizeof(attr->prog_name));
  1889. if (err < 0)
  1890. goto free_prog;
  1891. /* run eBPF verifier */
  1892. err = bpf_check(&prog, attr, uattr);
  1893. if (err < 0)
  1894. goto free_used_maps;
  1895. prog = bpf_prog_select_runtime(prog, &err);
  1896. if (err < 0)
  1897. goto free_used_maps;
  1898. err = bpf_prog_alloc_id(prog);
  1899. if (err)
  1900. goto free_used_maps;
  1901. /* Upon success of bpf_prog_alloc_id(), the BPF prog is
  1902. * effectively publicly exposed. However, retrieving via
  1903. * bpf_prog_get_fd_by_id() will take another reference,
  1904. * therefore it cannot be gone underneath us.
  1905. *
  1906. * Only for the time /after/ successful bpf_prog_new_fd()
  1907. * and before returning to userspace, we might just hold
  1908. * one reference and any parallel close on that fd could
  1909. * rip everything out. Hence, below notifications must
  1910. * happen before bpf_prog_new_fd().
  1911. *
  1912. * Also, any failure handling from this point onwards must
  1913. * be using bpf_prog_put() given the program is exposed.
  1914. */
  1915. bpf_prog_kallsyms_add(prog);
  1916. perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
  1917. bpf_audit_prog(prog, BPF_AUDIT_LOAD);
  1918. err = bpf_prog_new_fd(prog);
  1919. if (err < 0)
  1920. bpf_prog_put(prog);
  1921. return err;
  1922. free_used_maps:
  1923. /* In case we have subprogs, we need to wait for a grace
  1924. * period before we can tear down JIT memory since symbols
  1925. * are already exposed under kallsyms.
  1926. */
  1927. __bpf_prog_put_noref(prog, prog->aux->func_cnt);
  1928. return err;
  1929. free_prog:
  1930. bpf_prog_uncharge_memlock(prog);
  1931. free_prog_sec:
  1932. security_bpf_prog_free(prog->aux);
  1933. free_prog_nouncharge:
  1934. bpf_prog_free(prog);
  1935. return err;
  1936. }
  1937. #define BPF_OBJ_LAST_FIELD file_flags
  1938. static int bpf_obj_pin(const union bpf_attr *attr)
  1939. {
  1940. if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
  1941. return -EINVAL;
  1942. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  1943. }
  1944. static int bpf_obj_get(const union bpf_attr *attr)
  1945. {
  1946. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
  1947. attr->file_flags & ~BPF_OBJ_FLAG_MASK)
  1948. return -EINVAL;
  1949. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
  1950. attr->file_flags);
  1951. }
  1952. void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
  1953. const struct bpf_link_ops *ops, struct bpf_prog *prog)
  1954. {
  1955. atomic64_set(&link->refcnt, 1);
  1956. link->type = type;
  1957. link->id = 0;
  1958. link->ops = ops;
  1959. link->prog = prog;
  1960. }
  1961. static void bpf_link_free_id(int id)
  1962. {
  1963. if (!id)
  1964. return;
  1965. spin_lock_bh(&link_idr_lock);
  1966. idr_remove(&link_idr, id);
  1967. spin_unlock_bh(&link_idr_lock);
  1968. }
  1969. /* Clean up bpf_link and corresponding anon_inode file and FD. After
  1970. * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
  1971. * anon_inode's release() call. This helper marksbpf_link as
  1972. * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
  1973. * is not decremented, it's the responsibility of a calling code that failed
  1974. * to complete bpf_link initialization.
  1975. */
  1976. void bpf_link_cleanup(struct bpf_link_primer *primer)
  1977. {
  1978. primer->link->prog = NULL;
  1979. bpf_link_free_id(primer->id);
  1980. fput(primer->file);
  1981. put_unused_fd(primer->fd);
  1982. }
  1983. void bpf_link_inc(struct bpf_link *link)
  1984. {
  1985. atomic64_inc(&link->refcnt);
  1986. }
  1987. /* bpf_link_free is guaranteed to be called from process context */
  1988. static void bpf_link_free(struct bpf_link *link)
  1989. {
  1990. bpf_link_free_id(link->id);
  1991. if (link->prog) {
  1992. /* detach BPF program, clean up used resources */
  1993. link->ops->release(link);
  1994. bpf_prog_put(link->prog);
  1995. }
  1996. /* free bpf_link and its containing memory */
  1997. link->ops->dealloc(link);
  1998. }
  1999. static void bpf_link_put_deferred(struct work_struct *work)
  2000. {
  2001. struct bpf_link *link = container_of(work, struct bpf_link, work);
  2002. bpf_link_free(link);
  2003. }
  2004. /* bpf_link_put can be called from atomic context, but ensures that resources
  2005. * are freed from process context
  2006. */
  2007. void bpf_link_put(struct bpf_link *link)
  2008. {
  2009. if (!atomic64_dec_and_test(&link->refcnt))
  2010. return;
  2011. if (in_atomic()) {
  2012. INIT_WORK(&link->work, bpf_link_put_deferred);
  2013. schedule_work(&link->work);
  2014. } else {
  2015. bpf_link_free(link);
  2016. }
  2017. }
  2018. static int bpf_link_release(struct inode *inode, struct file *filp)
  2019. {
  2020. struct bpf_link *link = filp->private_data;
  2021. bpf_link_put(link);
  2022. return 0;
  2023. }
  2024. #ifdef CONFIG_PROC_FS
  2025. #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
  2026. #define BPF_MAP_TYPE(_id, _ops)
  2027. #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
  2028. static const char *bpf_link_type_strs[] = {
  2029. [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
  2030. #include <linux/bpf_types.h>
  2031. };
  2032. #undef BPF_PROG_TYPE
  2033. #undef BPF_MAP_TYPE
  2034. #undef BPF_LINK_TYPE
  2035. static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
  2036. {
  2037. const struct bpf_link *link = filp->private_data;
  2038. const struct bpf_prog *prog = link->prog;
  2039. char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
  2040. bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
  2041. seq_printf(m,
  2042. "link_type:\t%s\n"
  2043. "link_id:\t%u\n"
  2044. "prog_tag:\t%s\n"
  2045. "prog_id:\t%u\n",
  2046. bpf_link_type_strs[link->type],
  2047. link->id,
  2048. prog_tag,
  2049. prog->aux->id);
  2050. if (link->ops->show_fdinfo)
  2051. link->ops->show_fdinfo(link, m);
  2052. }
  2053. #endif
  2054. static const struct file_operations bpf_link_fops = {
  2055. #ifdef CONFIG_PROC_FS
  2056. .show_fdinfo = bpf_link_show_fdinfo,
  2057. #endif
  2058. .release = bpf_link_release,
  2059. .read = bpf_dummy_read,
  2060. .write = bpf_dummy_write,
  2061. };
  2062. static int bpf_link_alloc_id(struct bpf_link *link)
  2063. {
  2064. int id;
  2065. idr_preload(GFP_KERNEL);
  2066. spin_lock_bh(&link_idr_lock);
  2067. id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
  2068. spin_unlock_bh(&link_idr_lock);
  2069. idr_preload_end();
  2070. return id;
  2071. }
  2072. /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
  2073. * reserving unused FD and allocating ID from link_idr. This is to be paired
  2074. * with bpf_link_settle() to install FD and ID and expose bpf_link to
  2075. * user-space, if bpf_link is successfully attached. If not, bpf_link and
  2076. * pre-allocated resources are to be freed with bpf_cleanup() call. All the
  2077. * transient state is passed around in struct bpf_link_primer.
  2078. * This is preferred way to create and initialize bpf_link, especially when
  2079. * there are complicated and expensive operations inbetween creating bpf_link
  2080. * itself and attaching it to BPF hook. By using bpf_link_prime() and
  2081. * bpf_link_settle() kernel code using bpf_link doesn't have to perform
  2082. * expensive (and potentially failing) roll back operations in a rare case
  2083. * that file, FD, or ID can't be allocated.
  2084. */
  2085. int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
  2086. {
  2087. struct file *file;
  2088. int fd, id;
  2089. fd = get_unused_fd_flags(O_CLOEXEC);
  2090. if (fd < 0)
  2091. return fd;
  2092. id = bpf_link_alloc_id(link);
  2093. if (id < 0) {
  2094. put_unused_fd(fd);
  2095. return id;
  2096. }
  2097. file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
  2098. if (IS_ERR(file)) {
  2099. bpf_link_free_id(id);
  2100. put_unused_fd(fd);
  2101. return PTR_ERR(file);
  2102. }
  2103. primer->link = link;
  2104. primer->file = file;
  2105. primer->fd = fd;
  2106. primer->id = id;
  2107. return 0;
  2108. }
  2109. int bpf_link_settle(struct bpf_link_primer *primer)
  2110. {
  2111. /* make bpf_link fetchable by ID */
  2112. spin_lock_bh(&link_idr_lock);
  2113. primer->link->id = primer->id;
  2114. spin_unlock_bh(&link_idr_lock);
  2115. /* make bpf_link fetchable by FD */
  2116. fd_install(primer->fd, primer->file);
  2117. /* pass through installed FD */
  2118. return primer->fd;
  2119. }
  2120. int bpf_link_new_fd(struct bpf_link *link)
  2121. {
  2122. return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
  2123. }
  2124. struct bpf_link *bpf_link_get_from_fd(u32 ufd)
  2125. {
  2126. struct fd f = fdget(ufd);
  2127. struct bpf_link *link;
  2128. if (!f.file)
  2129. return ERR_PTR(-EBADF);
  2130. if (f.file->f_op != &bpf_link_fops) {
  2131. fdput(f);
  2132. return ERR_PTR(-EINVAL);
  2133. }
  2134. link = f.file->private_data;
  2135. bpf_link_inc(link);
  2136. fdput(f);
  2137. return link;
  2138. }
  2139. struct bpf_tracing_link {
  2140. struct bpf_link link;
  2141. enum bpf_attach_type attach_type;
  2142. struct bpf_trampoline *trampoline;
  2143. struct bpf_prog *tgt_prog;
  2144. };
  2145. static void bpf_tracing_link_release(struct bpf_link *link)
  2146. {
  2147. struct bpf_tracing_link *tr_link =
  2148. container_of(link, struct bpf_tracing_link, link);
  2149. WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
  2150. tr_link->trampoline));
  2151. bpf_trampoline_put(tr_link->trampoline);
  2152. /* tgt_prog is NULL if target is a kernel function */
  2153. if (tr_link->tgt_prog)
  2154. bpf_prog_put(tr_link->tgt_prog);
  2155. }
  2156. static void bpf_tracing_link_dealloc(struct bpf_link *link)
  2157. {
  2158. struct bpf_tracing_link *tr_link =
  2159. container_of(link, struct bpf_tracing_link, link);
  2160. kfree(tr_link);
  2161. }
  2162. static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
  2163. struct seq_file *seq)
  2164. {
  2165. struct bpf_tracing_link *tr_link =
  2166. container_of(link, struct bpf_tracing_link, link);
  2167. seq_printf(seq,
  2168. "attach_type:\t%d\n",
  2169. tr_link->attach_type);
  2170. }
  2171. static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
  2172. struct bpf_link_info *info)
  2173. {
  2174. struct bpf_tracing_link *tr_link =
  2175. container_of(link, struct bpf_tracing_link, link);
  2176. info->tracing.attach_type = tr_link->attach_type;
  2177. return 0;
  2178. }
  2179. static const struct bpf_link_ops bpf_tracing_link_lops = {
  2180. .release = bpf_tracing_link_release,
  2181. .dealloc = bpf_tracing_link_dealloc,
  2182. .show_fdinfo = bpf_tracing_link_show_fdinfo,
  2183. .fill_link_info = bpf_tracing_link_fill_link_info,
  2184. };
  2185. static int bpf_tracing_prog_attach(struct bpf_prog *prog,
  2186. int tgt_prog_fd,
  2187. u32 btf_id)
  2188. {
  2189. struct bpf_link_primer link_primer;
  2190. struct bpf_prog *tgt_prog = NULL;
  2191. struct bpf_trampoline *tr = NULL;
  2192. struct bpf_tracing_link *link;
  2193. u64 key = 0;
  2194. int err;
  2195. switch (prog->type) {
  2196. case BPF_PROG_TYPE_TRACING:
  2197. if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
  2198. prog->expected_attach_type != BPF_TRACE_FEXIT &&
  2199. prog->expected_attach_type != BPF_MODIFY_RETURN) {
  2200. err = -EINVAL;
  2201. goto out_put_prog;
  2202. }
  2203. break;
  2204. case BPF_PROG_TYPE_EXT:
  2205. if (prog->expected_attach_type != 0) {
  2206. err = -EINVAL;
  2207. goto out_put_prog;
  2208. }
  2209. break;
  2210. case BPF_PROG_TYPE_LSM:
  2211. if (prog->expected_attach_type != BPF_LSM_MAC) {
  2212. err = -EINVAL;
  2213. goto out_put_prog;
  2214. }
  2215. break;
  2216. default:
  2217. err = -EINVAL;
  2218. goto out_put_prog;
  2219. }
  2220. if (!!tgt_prog_fd != !!btf_id) {
  2221. err = -EINVAL;
  2222. goto out_put_prog;
  2223. }
  2224. if (tgt_prog_fd) {
  2225. /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
  2226. if (prog->type != BPF_PROG_TYPE_EXT) {
  2227. err = -EINVAL;
  2228. goto out_put_prog;
  2229. }
  2230. tgt_prog = bpf_prog_get(tgt_prog_fd);
  2231. if (IS_ERR(tgt_prog)) {
  2232. err = PTR_ERR(tgt_prog);
  2233. tgt_prog = NULL;
  2234. goto out_put_prog;
  2235. }
  2236. key = bpf_trampoline_compute_key(tgt_prog, btf_id);
  2237. }
  2238. link = kzalloc(sizeof(*link), GFP_USER);
  2239. if (!link) {
  2240. err = -ENOMEM;
  2241. goto out_put_prog;
  2242. }
  2243. bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
  2244. &bpf_tracing_link_lops, prog);
  2245. link->attach_type = prog->expected_attach_type;
  2246. mutex_lock(&prog->aux->dst_mutex);
  2247. /* There are a few possible cases here:
  2248. *
  2249. * - if prog->aux->dst_trampoline is set, the program was just loaded
  2250. * and not yet attached to anything, so we can use the values stored
  2251. * in prog->aux
  2252. *
  2253. * - if prog->aux->dst_trampoline is NULL, the program has already been
  2254. * attached to a target and its initial target was cleared (below)
  2255. *
  2256. * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
  2257. * target_btf_id using the link_create API.
  2258. *
  2259. * - if tgt_prog == NULL when this function was called using the old
  2260. * raw_tracepoint_open API, and we need a target from prog->aux
  2261. *
  2262. * The combination of no saved target in prog->aux, and no target
  2263. * specified on load is illegal, and we reject that here.
  2264. */
  2265. if (!prog->aux->dst_trampoline && !tgt_prog) {
  2266. err = -ENOENT;
  2267. goto out_unlock;
  2268. }
  2269. if (!prog->aux->dst_trampoline ||
  2270. (key && key != prog->aux->dst_trampoline->key)) {
  2271. /* If there is no saved target, or the specified target is
  2272. * different from the destination specified at load time, we
  2273. * need a new trampoline and a check for compatibility
  2274. */
  2275. struct bpf_attach_target_info tgt_info = {};
  2276. err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
  2277. &tgt_info);
  2278. if (err)
  2279. goto out_unlock;
  2280. tr = bpf_trampoline_get(key, &tgt_info);
  2281. if (!tr) {
  2282. err = -ENOMEM;
  2283. goto out_unlock;
  2284. }
  2285. } else {
  2286. /* The caller didn't specify a target, or the target was the
  2287. * same as the destination supplied during program load. This
  2288. * means we can reuse the trampoline and reference from program
  2289. * load time, and there is no need to allocate a new one. This
  2290. * can only happen once for any program, as the saved values in
  2291. * prog->aux are cleared below.
  2292. */
  2293. tr = prog->aux->dst_trampoline;
  2294. tgt_prog = prog->aux->dst_prog;
  2295. }
  2296. err = bpf_link_prime(&link->link, &link_primer);
  2297. if (err)
  2298. goto out_unlock;
  2299. err = bpf_trampoline_link_prog(prog, tr);
  2300. if (err) {
  2301. bpf_link_cleanup(&link_primer);
  2302. link = NULL;
  2303. goto out_unlock;
  2304. }
  2305. link->tgt_prog = tgt_prog;
  2306. link->trampoline = tr;
  2307. /* Always clear the trampoline and target prog from prog->aux to make
  2308. * sure the original attach destination is not kept alive after a
  2309. * program is (re-)attached to another target.
  2310. */
  2311. if (prog->aux->dst_prog &&
  2312. (tgt_prog_fd || tr != prog->aux->dst_trampoline))
  2313. /* got extra prog ref from syscall, or attaching to different prog */
  2314. bpf_prog_put(prog->aux->dst_prog);
  2315. if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
  2316. /* we allocated a new trampoline, so free the old one */
  2317. bpf_trampoline_put(prog->aux->dst_trampoline);
  2318. prog->aux->dst_prog = NULL;
  2319. prog->aux->dst_trampoline = NULL;
  2320. mutex_unlock(&prog->aux->dst_mutex);
  2321. return bpf_link_settle(&link_primer);
  2322. out_unlock:
  2323. if (tr && tr != prog->aux->dst_trampoline)
  2324. bpf_trampoline_put(tr);
  2325. mutex_unlock(&prog->aux->dst_mutex);
  2326. kfree(link);
  2327. out_put_prog:
  2328. if (tgt_prog_fd && tgt_prog)
  2329. bpf_prog_put(tgt_prog);
  2330. return err;
  2331. }
  2332. struct bpf_raw_tp_link {
  2333. struct bpf_link link;
  2334. struct bpf_raw_event_map *btp;
  2335. };
  2336. static void bpf_raw_tp_link_release(struct bpf_link *link)
  2337. {
  2338. struct bpf_raw_tp_link *raw_tp =
  2339. container_of(link, struct bpf_raw_tp_link, link);
  2340. bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
  2341. bpf_put_raw_tracepoint(raw_tp->btp);
  2342. }
  2343. static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
  2344. {
  2345. struct bpf_raw_tp_link *raw_tp =
  2346. container_of(link, struct bpf_raw_tp_link, link);
  2347. kfree(raw_tp);
  2348. }
  2349. static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
  2350. struct seq_file *seq)
  2351. {
  2352. struct bpf_raw_tp_link *raw_tp_link =
  2353. container_of(link, struct bpf_raw_tp_link, link);
  2354. seq_printf(seq,
  2355. "tp_name:\t%s\n",
  2356. raw_tp_link->btp->tp->name);
  2357. }
  2358. static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
  2359. struct bpf_link_info *info)
  2360. {
  2361. struct bpf_raw_tp_link *raw_tp_link =
  2362. container_of(link, struct bpf_raw_tp_link, link);
  2363. char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
  2364. const char *tp_name = raw_tp_link->btp->tp->name;
  2365. u32 ulen = info->raw_tracepoint.tp_name_len;
  2366. size_t tp_len = strlen(tp_name);
  2367. if (!ulen ^ !ubuf)
  2368. return -EINVAL;
  2369. info->raw_tracepoint.tp_name_len = tp_len + 1;
  2370. if (!ubuf)
  2371. return 0;
  2372. if (ulen >= tp_len + 1) {
  2373. if (copy_to_user(ubuf, tp_name, tp_len + 1))
  2374. return -EFAULT;
  2375. } else {
  2376. char zero = '\0';
  2377. if (copy_to_user(ubuf, tp_name, ulen - 1))
  2378. return -EFAULT;
  2379. if (put_user(zero, ubuf + ulen - 1))
  2380. return -EFAULT;
  2381. return -ENOSPC;
  2382. }
  2383. return 0;
  2384. }
  2385. static const struct bpf_link_ops bpf_raw_tp_link_lops = {
  2386. .release = bpf_raw_tp_link_release,
  2387. .dealloc = bpf_raw_tp_link_dealloc,
  2388. .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
  2389. .fill_link_info = bpf_raw_tp_link_fill_link_info,
  2390. };
  2391. #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
  2392. static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
  2393. {
  2394. struct bpf_link_primer link_primer;
  2395. struct bpf_raw_tp_link *link;
  2396. struct bpf_raw_event_map *btp;
  2397. struct bpf_prog *prog;
  2398. const char *tp_name;
  2399. char buf[128];
  2400. int err;
  2401. if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
  2402. return -EINVAL;
  2403. prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
  2404. if (IS_ERR(prog))
  2405. return PTR_ERR(prog);
  2406. switch (prog->type) {
  2407. case BPF_PROG_TYPE_TRACING:
  2408. case BPF_PROG_TYPE_EXT:
  2409. case BPF_PROG_TYPE_LSM:
  2410. if (attr->raw_tracepoint.name) {
  2411. /* The attach point for this category of programs
  2412. * should be specified via btf_id during program load.
  2413. */
  2414. err = -EINVAL;
  2415. goto out_put_prog;
  2416. }
  2417. if (prog->type == BPF_PROG_TYPE_TRACING &&
  2418. prog->expected_attach_type == BPF_TRACE_RAW_TP) {
  2419. tp_name = prog->aux->attach_func_name;
  2420. break;
  2421. }
  2422. err = bpf_tracing_prog_attach(prog, 0, 0);
  2423. if (err >= 0)
  2424. return err;
  2425. goto out_put_prog;
  2426. case BPF_PROG_TYPE_RAW_TRACEPOINT:
  2427. case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
  2428. if (strncpy_from_user(buf,
  2429. u64_to_user_ptr(attr->raw_tracepoint.name),
  2430. sizeof(buf) - 1) < 0) {
  2431. err = -EFAULT;
  2432. goto out_put_prog;
  2433. }
  2434. buf[sizeof(buf) - 1] = 0;
  2435. tp_name = buf;
  2436. break;
  2437. default:
  2438. err = -EINVAL;
  2439. goto out_put_prog;
  2440. }
  2441. btp = bpf_get_raw_tracepoint(tp_name);
  2442. if (!btp) {
  2443. err = -ENOENT;
  2444. goto out_put_prog;
  2445. }
  2446. link = kzalloc(sizeof(*link), GFP_USER);
  2447. if (!link) {
  2448. err = -ENOMEM;
  2449. goto out_put_btp;
  2450. }
  2451. bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
  2452. &bpf_raw_tp_link_lops, prog);
  2453. link->btp = btp;
  2454. err = bpf_link_prime(&link->link, &link_primer);
  2455. if (err) {
  2456. kfree(link);
  2457. goto out_put_btp;
  2458. }
  2459. err = bpf_probe_register(link->btp, prog);
  2460. if (err) {
  2461. bpf_link_cleanup(&link_primer);
  2462. goto out_put_btp;
  2463. }
  2464. return bpf_link_settle(&link_primer);
  2465. out_put_btp:
  2466. bpf_put_raw_tracepoint(btp);
  2467. out_put_prog:
  2468. bpf_prog_put(prog);
  2469. return err;
  2470. }
  2471. static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
  2472. enum bpf_attach_type attach_type)
  2473. {
  2474. switch (prog->type) {
  2475. case BPF_PROG_TYPE_CGROUP_SOCK:
  2476. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  2477. case BPF_PROG_TYPE_CGROUP_SOCKOPT:
  2478. case BPF_PROG_TYPE_SK_LOOKUP:
  2479. return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
  2480. case BPF_PROG_TYPE_CGROUP_SKB:
  2481. if (!capable(CAP_NET_ADMIN))
  2482. /* cg-skb progs can be loaded by unpriv user.
  2483. * check permissions at attach time.
  2484. */
  2485. return -EPERM;
  2486. return prog->enforce_expected_attach_type &&
  2487. prog->expected_attach_type != attach_type ?
  2488. -EINVAL : 0;
  2489. default:
  2490. return 0;
  2491. }
  2492. }
  2493. static enum bpf_prog_type
  2494. attach_type_to_prog_type(enum bpf_attach_type attach_type)
  2495. {
  2496. switch (attach_type) {
  2497. case BPF_CGROUP_INET_INGRESS:
  2498. case BPF_CGROUP_INET_EGRESS:
  2499. return BPF_PROG_TYPE_CGROUP_SKB;
  2500. case BPF_CGROUP_INET_SOCK_CREATE:
  2501. case BPF_CGROUP_INET_SOCK_RELEASE:
  2502. case BPF_CGROUP_INET4_POST_BIND:
  2503. case BPF_CGROUP_INET6_POST_BIND:
  2504. return BPF_PROG_TYPE_CGROUP_SOCK;
  2505. case BPF_CGROUP_INET4_BIND:
  2506. case BPF_CGROUP_INET6_BIND:
  2507. case BPF_CGROUP_INET4_CONNECT:
  2508. case BPF_CGROUP_INET6_CONNECT:
  2509. case BPF_CGROUP_INET4_GETPEERNAME:
  2510. case BPF_CGROUP_INET6_GETPEERNAME:
  2511. case BPF_CGROUP_INET4_GETSOCKNAME:
  2512. case BPF_CGROUP_INET6_GETSOCKNAME:
  2513. case BPF_CGROUP_UDP4_SENDMSG:
  2514. case BPF_CGROUP_UDP6_SENDMSG:
  2515. case BPF_CGROUP_UDP4_RECVMSG:
  2516. case BPF_CGROUP_UDP6_RECVMSG:
  2517. return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
  2518. case BPF_CGROUP_SOCK_OPS:
  2519. return BPF_PROG_TYPE_SOCK_OPS;
  2520. case BPF_CGROUP_DEVICE:
  2521. return BPF_PROG_TYPE_CGROUP_DEVICE;
  2522. case BPF_SK_MSG_VERDICT:
  2523. return BPF_PROG_TYPE_SK_MSG;
  2524. case BPF_SK_SKB_STREAM_PARSER:
  2525. case BPF_SK_SKB_STREAM_VERDICT:
  2526. return BPF_PROG_TYPE_SK_SKB;
  2527. case BPF_LIRC_MODE2:
  2528. return BPF_PROG_TYPE_LIRC_MODE2;
  2529. case BPF_FLOW_DISSECTOR:
  2530. return BPF_PROG_TYPE_FLOW_DISSECTOR;
  2531. case BPF_CGROUP_SYSCTL:
  2532. return BPF_PROG_TYPE_CGROUP_SYSCTL;
  2533. case BPF_CGROUP_GETSOCKOPT:
  2534. case BPF_CGROUP_SETSOCKOPT:
  2535. return BPF_PROG_TYPE_CGROUP_SOCKOPT;
  2536. case BPF_TRACE_ITER:
  2537. return BPF_PROG_TYPE_TRACING;
  2538. case BPF_SK_LOOKUP:
  2539. return BPF_PROG_TYPE_SK_LOOKUP;
  2540. case BPF_XDP:
  2541. return BPF_PROG_TYPE_XDP;
  2542. default:
  2543. return BPF_PROG_TYPE_UNSPEC;
  2544. }
  2545. }
  2546. #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
  2547. #define BPF_F_ATTACH_MASK \
  2548. (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
  2549. static int bpf_prog_attach(const union bpf_attr *attr)
  2550. {
  2551. enum bpf_prog_type ptype;
  2552. struct bpf_prog *prog;
  2553. int ret;
  2554. if (CHECK_ATTR(BPF_PROG_ATTACH))
  2555. return -EINVAL;
  2556. if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
  2557. return -EINVAL;
  2558. ptype = attach_type_to_prog_type(attr->attach_type);
  2559. if (ptype == BPF_PROG_TYPE_UNSPEC)
  2560. return -EINVAL;
  2561. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  2562. if (IS_ERR(prog))
  2563. return PTR_ERR(prog);
  2564. if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
  2565. bpf_prog_put(prog);
  2566. return -EINVAL;
  2567. }
  2568. switch (ptype) {
  2569. case BPF_PROG_TYPE_SK_SKB:
  2570. case BPF_PROG_TYPE_SK_MSG:
  2571. ret = sock_map_get_from_fd(attr, prog);
  2572. break;
  2573. case BPF_PROG_TYPE_LIRC_MODE2:
  2574. ret = lirc_prog_attach(attr, prog);
  2575. break;
  2576. case BPF_PROG_TYPE_FLOW_DISSECTOR:
  2577. ret = netns_bpf_prog_attach(attr, prog);
  2578. break;
  2579. case BPF_PROG_TYPE_CGROUP_DEVICE:
  2580. case BPF_PROG_TYPE_CGROUP_SKB:
  2581. case BPF_PROG_TYPE_CGROUP_SOCK:
  2582. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  2583. case BPF_PROG_TYPE_CGROUP_SOCKOPT:
  2584. case BPF_PROG_TYPE_CGROUP_SYSCTL:
  2585. case BPF_PROG_TYPE_SOCK_OPS:
  2586. ret = cgroup_bpf_prog_attach(attr, ptype, prog);
  2587. break;
  2588. default:
  2589. ret = -EINVAL;
  2590. }
  2591. if (ret)
  2592. bpf_prog_put(prog);
  2593. return ret;
  2594. }
  2595. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  2596. static int bpf_prog_detach(const union bpf_attr *attr)
  2597. {
  2598. enum bpf_prog_type ptype;
  2599. if (CHECK_ATTR(BPF_PROG_DETACH))
  2600. return -EINVAL;
  2601. ptype = attach_type_to_prog_type(attr->attach_type);
  2602. switch (ptype) {
  2603. case BPF_PROG_TYPE_SK_MSG:
  2604. case BPF_PROG_TYPE_SK_SKB:
  2605. return sock_map_prog_detach(attr, ptype);
  2606. case BPF_PROG_TYPE_LIRC_MODE2:
  2607. return lirc_prog_detach(attr);
  2608. case BPF_PROG_TYPE_FLOW_DISSECTOR:
  2609. return netns_bpf_prog_detach(attr, ptype);
  2610. case BPF_PROG_TYPE_CGROUP_DEVICE:
  2611. case BPF_PROG_TYPE_CGROUP_SKB:
  2612. case BPF_PROG_TYPE_CGROUP_SOCK:
  2613. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  2614. case BPF_PROG_TYPE_CGROUP_SOCKOPT:
  2615. case BPF_PROG_TYPE_CGROUP_SYSCTL:
  2616. case BPF_PROG_TYPE_SOCK_OPS:
  2617. return cgroup_bpf_prog_detach(attr, ptype);
  2618. default:
  2619. return -EINVAL;
  2620. }
  2621. }
  2622. #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
  2623. static int bpf_prog_query(const union bpf_attr *attr,
  2624. union bpf_attr __user *uattr)
  2625. {
  2626. if (!capable(CAP_NET_ADMIN))
  2627. return -EPERM;
  2628. if (CHECK_ATTR(BPF_PROG_QUERY))
  2629. return -EINVAL;
  2630. if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
  2631. return -EINVAL;
  2632. switch (attr->query.attach_type) {
  2633. case BPF_CGROUP_INET_INGRESS:
  2634. case BPF_CGROUP_INET_EGRESS:
  2635. case BPF_CGROUP_INET_SOCK_CREATE:
  2636. case BPF_CGROUP_INET_SOCK_RELEASE:
  2637. case BPF_CGROUP_INET4_BIND:
  2638. case BPF_CGROUP_INET6_BIND:
  2639. case BPF_CGROUP_INET4_POST_BIND:
  2640. case BPF_CGROUP_INET6_POST_BIND:
  2641. case BPF_CGROUP_INET4_CONNECT:
  2642. case BPF_CGROUP_INET6_CONNECT:
  2643. case BPF_CGROUP_INET4_GETPEERNAME:
  2644. case BPF_CGROUP_INET6_GETPEERNAME:
  2645. case BPF_CGROUP_INET4_GETSOCKNAME:
  2646. case BPF_CGROUP_INET6_GETSOCKNAME:
  2647. case BPF_CGROUP_UDP4_SENDMSG:
  2648. case BPF_CGROUP_UDP6_SENDMSG:
  2649. case BPF_CGROUP_UDP4_RECVMSG:
  2650. case BPF_CGROUP_UDP6_RECVMSG:
  2651. case BPF_CGROUP_SOCK_OPS:
  2652. case BPF_CGROUP_DEVICE:
  2653. case BPF_CGROUP_SYSCTL:
  2654. case BPF_CGROUP_GETSOCKOPT:
  2655. case BPF_CGROUP_SETSOCKOPT:
  2656. return cgroup_bpf_prog_query(attr, uattr);
  2657. case BPF_LIRC_MODE2:
  2658. return lirc_prog_query(attr, uattr);
  2659. case BPF_FLOW_DISSECTOR:
  2660. case BPF_SK_LOOKUP:
  2661. return netns_bpf_prog_query(attr, uattr);
  2662. default:
  2663. return -EINVAL;
  2664. }
  2665. }
  2666. #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
  2667. static int bpf_prog_test_run(const union bpf_attr *attr,
  2668. union bpf_attr __user *uattr)
  2669. {
  2670. struct bpf_prog *prog;
  2671. int ret = -ENOTSUPP;
  2672. if (CHECK_ATTR(BPF_PROG_TEST_RUN))
  2673. return -EINVAL;
  2674. if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
  2675. (!attr->test.ctx_size_in && attr->test.ctx_in))
  2676. return -EINVAL;
  2677. if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
  2678. (!attr->test.ctx_size_out && attr->test.ctx_out))
  2679. return -EINVAL;
  2680. prog = bpf_prog_get(attr->test.prog_fd);
  2681. if (IS_ERR(prog))
  2682. return PTR_ERR(prog);
  2683. if (prog->aux->ops->test_run)
  2684. ret = prog->aux->ops->test_run(prog, attr, uattr);
  2685. bpf_prog_put(prog);
  2686. return ret;
  2687. }
  2688. #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
  2689. static int bpf_obj_get_next_id(const union bpf_attr *attr,
  2690. union bpf_attr __user *uattr,
  2691. struct idr *idr,
  2692. spinlock_t *lock)
  2693. {
  2694. u32 next_id = attr->start_id;
  2695. int err = 0;
  2696. if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
  2697. return -EINVAL;
  2698. if (!capable(CAP_SYS_ADMIN))
  2699. return -EPERM;
  2700. next_id++;
  2701. spin_lock_bh(lock);
  2702. if (!idr_get_next(idr, &next_id))
  2703. err = -ENOENT;
  2704. spin_unlock_bh(lock);
  2705. if (!err)
  2706. err = put_user(next_id, &uattr->next_id);
  2707. return err;
  2708. }
  2709. struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
  2710. {
  2711. struct bpf_map *map;
  2712. spin_lock_bh(&map_idr_lock);
  2713. again:
  2714. map = idr_get_next(&map_idr, id);
  2715. if (map) {
  2716. map = __bpf_map_inc_not_zero(map, false);
  2717. if (IS_ERR(map)) {
  2718. (*id)++;
  2719. goto again;
  2720. }
  2721. }
  2722. spin_unlock_bh(&map_idr_lock);
  2723. return map;
  2724. }
  2725. struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
  2726. {
  2727. struct bpf_prog *prog;
  2728. spin_lock_bh(&prog_idr_lock);
  2729. again:
  2730. prog = idr_get_next(&prog_idr, id);
  2731. if (prog) {
  2732. prog = bpf_prog_inc_not_zero(prog);
  2733. if (IS_ERR(prog)) {
  2734. (*id)++;
  2735. goto again;
  2736. }
  2737. }
  2738. spin_unlock_bh(&prog_idr_lock);
  2739. return prog;
  2740. }
  2741. #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
  2742. struct bpf_prog *bpf_prog_by_id(u32 id)
  2743. {
  2744. struct bpf_prog *prog;
  2745. if (!id)
  2746. return ERR_PTR(-ENOENT);
  2747. spin_lock_bh(&prog_idr_lock);
  2748. prog = idr_find(&prog_idr, id);
  2749. if (prog)
  2750. prog = bpf_prog_inc_not_zero(prog);
  2751. else
  2752. prog = ERR_PTR(-ENOENT);
  2753. spin_unlock_bh(&prog_idr_lock);
  2754. return prog;
  2755. }
  2756. static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
  2757. {
  2758. struct bpf_prog *prog;
  2759. u32 id = attr->prog_id;
  2760. int fd;
  2761. if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
  2762. return -EINVAL;
  2763. if (!capable(CAP_SYS_ADMIN))
  2764. return -EPERM;
  2765. prog = bpf_prog_by_id(id);
  2766. if (IS_ERR(prog))
  2767. return PTR_ERR(prog);
  2768. fd = bpf_prog_new_fd(prog);
  2769. if (fd < 0)
  2770. bpf_prog_put(prog);
  2771. return fd;
  2772. }
  2773. #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
  2774. static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
  2775. {
  2776. struct bpf_map *map;
  2777. u32 id = attr->map_id;
  2778. int f_flags;
  2779. int fd;
  2780. if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
  2781. attr->open_flags & ~BPF_OBJ_FLAG_MASK)
  2782. return -EINVAL;
  2783. if (!capable(CAP_SYS_ADMIN))
  2784. return -EPERM;
  2785. f_flags = bpf_get_file_flag(attr->open_flags);
  2786. if (f_flags < 0)
  2787. return f_flags;
  2788. spin_lock_bh(&map_idr_lock);
  2789. map = idr_find(&map_idr, id);
  2790. if (map)
  2791. map = __bpf_map_inc_not_zero(map, true);
  2792. else
  2793. map = ERR_PTR(-ENOENT);
  2794. spin_unlock_bh(&map_idr_lock);
  2795. if (IS_ERR(map))
  2796. return PTR_ERR(map);
  2797. fd = bpf_map_new_fd(map, f_flags);
  2798. if (fd < 0)
  2799. bpf_map_put_with_uref(map);
  2800. return fd;
  2801. }
  2802. static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
  2803. unsigned long addr, u32 *off,
  2804. u32 *type)
  2805. {
  2806. const struct bpf_map *map;
  2807. int i;
  2808. mutex_lock(&prog->aux->used_maps_mutex);
  2809. for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
  2810. map = prog->aux->used_maps[i];
  2811. if (map == (void *)addr) {
  2812. *type = BPF_PSEUDO_MAP_FD;
  2813. goto out;
  2814. }
  2815. if (!map->ops->map_direct_value_meta)
  2816. continue;
  2817. if (!map->ops->map_direct_value_meta(map, addr, off)) {
  2818. *type = BPF_PSEUDO_MAP_VALUE;
  2819. goto out;
  2820. }
  2821. }
  2822. map = NULL;
  2823. out:
  2824. mutex_unlock(&prog->aux->used_maps_mutex);
  2825. return map;
  2826. }
  2827. static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
  2828. const struct cred *f_cred)
  2829. {
  2830. const struct bpf_map *map;
  2831. struct bpf_insn *insns;
  2832. u32 off, type;
  2833. u64 imm;
  2834. u8 code;
  2835. int i;
  2836. insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
  2837. GFP_USER);
  2838. if (!insns)
  2839. return insns;
  2840. for (i = 0; i < prog->len; i++) {
  2841. code = insns[i].code;
  2842. if (code == (BPF_JMP | BPF_TAIL_CALL)) {
  2843. insns[i].code = BPF_JMP | BPF_CALL;
  2844. insns[i].imm = BPF_FUNC_tail_call;
  2845. /* fall-through */
  2846. }
  2847. if (code == (BPF_JMP | BPF_CALL) ||
  2848. code == (BPF_JMP | BPF_CALL_ARGS)) {
  2849. if (code == (BPF_JMP | BPF_CALL_ARGS))
  2850. insns[i].code = BPF_JMP | BPF_CALL;
  2851. if (!bpf_dump_raw_ok(f_cred))
  2852. insns[i].imm = 0;
  2853. continue;
  2854. }
  2855. if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
  2856. insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
  2857. continue;
  2858. }
  2859. if (code != (BPF_LD | BPF_IMM | BPF_DW))
  2860. continue;
  2861. imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
  2862. map = bpf_map_from_imm(prog, imm, &off, &type);
  2863. if (map) {
  2864. insns[i].src_reg = type;
  2865. insns[i].imm = map->id;
  2866. insns[i + 1].imm = off;
  2867. continue;
  2868. }
  2869. }
  2870. return insns;
  2871. }
  2872. static int set_info_rec_size(struct bpf_prog_info *info)
  2873. {
  2874. /*
  2875. * Ensure info.*_rec_size is the same as kernel expected size
  2876. *
  2877. * or
  2878. *
  2879. * Only allow zero *_rec_size if both _rec_size and _cnt are
  2880. * zero. In this case, the kernel will set the expected
  2881. * _rec_size back to the info.
  2882. */
  2883. if ((info->nr_func_info || info->func_info_rec_size) &&
  2884. info->func_info_rec_size != sizeof(struct bpf_func_info))
  2885. return -EINVAL;
  2886. if ((info->nr_line_info || info->line_info_rec_size) &&
  2887. info->line_info_rec_size != sizeof(struct bpf_line_info))
  2888. return -EINVAL;
  2889. if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
  2890. info->jited_line_info_rec_size != sizeof(__u64))
  2891. return -EINVAL;
  2892. info->func_info_rec_size = sizeof(struct bpf_func_info);
  2893. info->line_info_rec_size = sizeof(struct bpf_line_info);
  2894. info->jited_line_info_rec_size = sizeof(__u64);
  2895. return 0;
  2896. }
  2897. static int bpf_prog_get_info_by_fd(struct file *file,
  2898. struct bpf_prog *prog,
  2899. const union bpf_attr *attr,
  2900. union bpf_attr __user *uattr)
  2901. {
  2902. struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
  2903. struct bpf_prog_info info;
  2904. u32 info_len = attr->info.info_len;
  2905. struct bpf_prog_stats stats;
  2906. char __user *uinsns;
  2907. u32 ulen;
  2908. int err;
  2909. err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
  2910. if (err)
  2911. return err;
  2912. info_len = min_t(u32, sizeof(info), info_len);
  2913. memset(&info, 0, sizeof(info));
  2914. if (copy_from_user(&info, uinfo, info_len))
  2915. return -EFAULT;
  2916. info.type = prog->type;
  2917. info.id = prog->aux->id;
  2918. info.load_time = prog->aux->load_time;
  2919. info.created_by_uid = from_kuid_munged(current_user_ns(),
  2920. prog->aux->user->uid);
  2921. info.gpl_compatible = prog->gpl_compatible;
  2922. memcpy(info.tag, prog->tag, sizeof(prog->tag));
  2923. memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
  2924. mutex_lock(&prog->aux->used_maps_mutex);
  2925. ulen = info.nr_map_ids;
  2926. info.nr_map_ids = prog->aux->used_map_cnt;
  2927. ulen = min_t(u32, info.nr_map_ids, ulen);
  2928. if (ulen) {
  2929. u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
  2930. u32 i;
  2931. for (i = 0; i < ulen; i++)
  2932. if (put_user(prog->aux->used_maps[i]->id,
  2933. &user_map_ids[i])) {
  2934. mutex_unlock(&prog->aux->used_maps_mutex);
  2935. return -EFAULT;
  2936. }
  2937. }
  2938. mutex_unlock(&prog->aux->used_maps_mutex);
  2939. err = set_info_rec_size(&info);
  2940. if (err)
  2941. return err;
  2942. bpf_prog_get_stats(prog, &stats);
  2943. info.run_time_ns = stats.nsecs;
  2944. info.run_cnt = stats.cnt;
  2945. if (!bpf_capable()) {
  2946. info.jited_prog_len = 0;
  2947. info.xlated_prog_len = 0;
  2948. info.nr_jited_ksyms = 0;
  2949. info.nr_jited_func_lens = 0;
  2950. info.nr_func_info = 0;
  2951. info.nr_line_info = 0;
  2952. info.nr_jited_line_info = 0;
  2953. goto done;
  2954. }
  2955. ulen = info.xlated_prog_len;
  2956. info.xlated_prog_len = bpf_prog_insn_size(prog);
  2957. if (info.xlated_prog_len && ulen) {
  2958. struct bpf_insn *insns_sanitized;
  2959. bool fault;
  2960. if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
  2961. info.xlated_prog_insns = 0;
  2962. goto done;
  2963. }
  2964. insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
  2965. if (!insns_sanitized)
  2966. return -ENOMEM;
  2967. uinsns = u64_to_user_ptr(info.xlated_prog_insns);
  2968. ulen = min_t(u32, info.xlated_prog_len, ulen);
  2969. fault = copy_to_user(uinsns, insns_sanitized, ulen);
  2970. kfree(insns_sanitized);
  2971. if (fault)
  2972. return -EFAULT;
  2973. }
  2974. if (bpf_prog_is_dev_bound(prog->aux)) {
  2975. err = bpf_prog_offload_info_fill(&info, prog);
  2976. if (err)
  2977. return err;
  2978. goto done;
  2979. }
  2980. /* NOTE: the following code is supposed to be skipped for offload.
  2981. * bpf_prog_offload_info_fill() is the place to fill similar fields
  2982. * for offload.
  2983. */
  2984. ulen = info.jited_prog_len;
  2985. if (prog->aux->func_cnt) {
  2986. u32 i;
  2987. info.jited_prog_len = 0;
  2988. for (i = 0; i < prog->aux->func_cnt; i++)
  2989. info.jited_prog_len += prog->aux->func[i]->jited_len;
  2990. } else {
  2991. info.jited_prog_len = prog->jited_len;
  2992. }
  2993. if (info.jited_prog_len && ulen) {
  2994. if (bpf_dump_raw_ok(file->f_cred)) {
  2995. uinsns = u64_to_user_ptr(info.jited_prog_insns);
  2996. ulen = min_t(u32, info.jited_prog_len, ulen);
  2997. /* for multi-function programs, copy the JITed
  2998. * instructions for all the functions
  2999. */
  3000. if (prog->aux->func_cnt) {
  3001. u32 len, free, i;
  3002. u8 *img;
  3003. free = ulen;
  3004. for (i = 0; i < prog->aux->func_cnt; i++) {
  3005. len = prog->aux->func[i]->jited_len;
  3006. len = min_t(u32, len, free);
  3007. img = (u8 *) prog->aux->func[i]->bpf_func;
  3008. if (copy_to_user(uinsns, img, len))
  3009. return -EFAULT;
  3010. uinsns += len;
  3011. free -= len;
  3012. if (!free)
  3013. break;
  3014. }
  3015. } else {
  3016. if (copy_to_user(uinsns, prog->bpf_func, ulen))
  3017. return -EFAULT;
  3018. }
  3019. } else {
  3020. info.jited_prog_insns = 0;
  3021. }
  3022. }
  3023. ulen = info.nr_jited_ksyms;
  3024. info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
  3025. if (ulen) {
  3026. if (bpf_dump_raw_ok(file->f_cred)) {
  3027. unsigned long ksym_addr;
  3028. u64 __user *user_ksyms;
  3029. u32 i;
  3030. /* copy the address of the kernel symbol
  3031. * corresponding to each function
  3032. */
  3033. ulen = min_t(u32, info.nr_jited_ksyms, ulen);
  3034. user_ksyms = u64_to_user_ptr(info.jited_ksyms);
  3035. if (prog->aux->func_cnt) {
  3036. for (i = 0; i < ulen; i++) {
  3037. ksym_addr = (unsigned long)
  3038. prog->aux->func[i]->bpf_func;
  3039. if (put_user((u64) ksym_addr,
  3040. &user_ksyms[i]))
  3041. return -EFAULT;
  3042. }
  3043. } else {
  3044. ksym_addr = (unsigned long) prog->bpf_func;
  3045. if (put_user((u64) ksym_addr, &user_ksyms[0]))
  3046. return -EFAULT;
  3047. }
  3048. } else {
  3049. info.jited_ksyms = 0;
  3050. }
  3051. }
  3052. ulen = info.nr_jited_func_lens;
  3053. info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
  3054. if (ulen) {
  3055. if (bpf_dump_raw_ok(file->f_cred)) {
  3056. u32 __user *user_lens;
  3057. u32 func_len, i;
  3058. /* copy the JITed image lengths for each function */
  3059. ulen = min_t(u32, info.nr_jited_func_lens, ulen);
  3060. user_lens = u64_to_user_ptr(info.jited_func_lens);
  3061. if (prog->aux->func_cnt) {
  3062. for (i = 0; i < ulen; i++) {
  3063. func_len =
  3064. prog->aux->func[i]->jited_len;
  3065. if (put_user(func_len, &user_lens[i]))
  3066. return -EFAULT;
  3067. }
  3068. } else {
  3069. func_len = prog->jited_len;
  3070. if (put_user(func_len, &user_lens[0]))
  3071. return -EFAULT;
  3072. }
  3073. } else {
  3074. info.jited_func_lens = 0;
  3075. }
  3076. }
  3077. if (prog->aux->btf)
  3078. info.btf_id = btf_id(prog->aux->btf);
  3079. ulen = info.nr_func_info;
  3080. info.nr_func_info = prog->aux->func_info_cnt;
  3081. if (info.nr_func_info && ulen) {
  3082. char __user *user_finfo;
  3083. user_finfo = u64_to_user_ptr(info.func_info);
  3084. ulen = min_t(u32, info.nr_func_info, ulen);
  3085. if (copy_to_user(user_finfo, prog->aux->func_info,
  3086. info.func_info_rec_size * ulen))
  3087. return -EFAULT;
  3088. }
  3089. ulen = info.nr_line_info;
  3090. info.nr_line_info = prog->aux->nr_linfo;
  3091. if (info.nr_line_info && ulen) {
  3092. __u8 __user *user_linfo;
  3093. user_linfo = u64_to_user_ptr(info.line_info);
  3094. ulen = min_t(u32, info.nr_line_info, ulen);
  3095. if (copy_to_user(user_linfo, prog->aux->linfo,
  3096. info.line_info_rec_size * ulen))
  3097. return -EFAULT;
  3098. }
  3099. ulen = info.nr_jited_line_info;
  3100. if (prog->aux->jited_linfo)
  3101. info.nr_jited_line_info = prog->aux->nr_linfo;
  3102. else
  3103. info.nr_jited_line_info = 0;
  3104. if (info.nr_jited_line_info && ulen) {
  3105. if (bpf_dump_raw_ok(file->f_cred)) {
  3106. __u64 __user *user_linfo;
  3107. u32 i;
  3108. user_linfo = u64_to_user_ptr(info.jited_line_info);
  3109. ulen = min_t(u32, info.nr_jited_line_info, ulen);
  3110. for (i = 0; i < ulen; i++) {
  3111. if (put_user((__u64)(long)prog->aux->jited_linfo[i],
  3112. &user_linfo[i]))
  3113. return -EFAULT;
  3114. }
  3115. } else {
  3116. info.jited_line_info = 0;
  3117. }
  3118. }
  3119. ulen = info.nr_prog_tags;
  3120. info.nr_prog_tags = prog->aux->func_cnt ? : 1;
  3121. if (ulen) {
  3122. __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
  3123. u32 i;
  3124. user_prog_tags = u64_to_user_ptr(info.prog_tags);
  3125. ulen = min_t(u32, info.nr_prog_tags, ulen);
  3126. if (prog->aux->func_cnt) {
  3127. for (i = 0; i < ulen; i++) {
  3128. if (copy_to_user(user_prog_tags[i],
  3129. prog->aux->func[i]->tag,
  3130. BPF_TAG_SIZE))
  3131. return -EFAULT;
  3132. }
  3133. } else {
  3134. if (copy_to_user(user_prog_tags[0],
  3135. prog->tag, BPF_TAG_SIZE))
  3136. return -EFAULT;
  3137. }
  3138. }
  3139. done:
  3140. if (copy_to_user(uinfo, &info, info_len) ||
  3141. put_user(info_len, &uattr->info.info_len))
  3142. return -EFAULT;
  3143. return 0;
  3144. }
  3145. static int bpf_map_get_info_by_fd(struct file *file,
  3146. struct bpf_map *map,
  3147. const union bpf_attr *attr,
  3148. union bpf_attr __user *uattr)
  3149. {
  3150. struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
  3151. struct bpf_map_info info;
  3152. u32 info_len = attr->info.info_len;
  3153. int err;
  3154. err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
  3155. if (err)
  3156. return err;
  3157. info_len = min_t(u32, sizeof(info), info_len);
  3158. memset(&info, 0, sizeof(info));
  3159. info.type = map->map_type;
  3160. info.id = map->id;
  3161. info.key_size = map->key_size;
  3162. info.value_size = map->value_size;
  3163. info.max_entries = map->max_entries;
  3164. info.map_flags = map->map_flags;
  3165. memcpy(info.name, map->name, sizeof(map->name));
  3166. if (map->btf) {
  3167. info.btf_id = btf_id(map->btf);
  3168. info.btf_key_type_id = map->btf_key_type_id;
  3169. info.btf_value_type_id = map->btf_value_type_id;
  3170. }
  3171. info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
  3172. if (bpf_map_is_dev_bound(map)) {
  3173. err = bpf_map_offload_info_fill(&info, map);
  3174. if (err)
  3175. return err;
  3176. }
  3177. if (copy_to_user(uinfo, &info, info_len) ||
  3178. put_user(info_len, &uattr->info.info_len))
  3179. return -EFAULT;
  3180. return 0;
  3181. }
  3182. static int bpf_btf_get_info_by_fd(struct file *file,
  3183. struct btf *btf,
  3184. const union bpf_attr *attr,
  3185. union bpf_attr __user *uattr)
  3186. {
  3187. struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
  3188. u32 info_len = attr->info.info_len;
  3189. int err;
  3190. err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
  3191. if (err)
  3192. return err;
  3193. return btf_get_info_by_fd(btf, attr, uattr);
  3194. }
  3195. static int bpf_link_get_info_by_fd(struct file *file,
  3196. struct bpf_link *link,
  3197. const union bpf_attr *attr,
  3198. union bpf_attr __user *uattr)
  3199. {
  3200. struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
  3201. struct bpf_link_info info;
  3202. u32 info_len = attr->info.info_len;
  3203. int err;
  3204. err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
  3205. if (err)
  3206. return err;
  3207. info_len = min_t(u32, sizeof(info), info_len);
  3208. memset(&info, 0, sizeof(info));
  3209. if (copy_from_user(&info, uinfo, info_len))
  3210. return -EFAULT;
  3211. info.type = link->type;
  3212. info.id = link->id;
  3213. info.prog_id = link->prog->aux->id;
  3214. if (link->ops->fill_link_info) {
  3215. err = link->ops->fill_link_info(link, &info);
  3216. if (err)
  3217. return err;
  3218. }
  3219. if (copy_to_user(uinfo, &info, info_len) ||
  3220. put_user(info_len, &uattr->info.info_len))
  3221. return -EFAULT;
  3222. return 0;
  3223. }
  3224. #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
  3225. static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
  3226. union bpf_attr __user *uattr)
  3227. {
  3228. int ufd = attr->info.bpf_fd;
  3229. struct fd f;
  3230. int err;
  3231. if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
  3232. return -EINVAL;
  3233. f = fdget(ufd);
  3234. if (!f.file)
  3235. return -EBADFD;
  3236. if (f.file->f_op == &bpf_prog_fops)
  3237. err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
  3238. uattr);
  3239. else if (f.file->f_op == &bpf_map_fops)
  3240. err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
  3241. uattr);
  3242. else if (f.file->f_op == &btf_fops)
  3243. err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
  3244. else if (f.file->f_op == &bpf_link_fops)
  3245. err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
  3246. attr, uattr);
  3247. else
  3248. err = -EINVAL;
  3249. fdput(f);
  3250. return err;
  3251. }
  3252. #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
  3253. static int bpf_btf_load(const union bpf_attr *attr)
  3254. {
  3255. if (CHECK_ATTR(BPF_BTF_LOAD))
  3256. return -EINVAL;
  3257. if (!bpf_capable())
  3258. return -EPERM;
  3259. return btf_new_fd(attr);
  3260. }
  3261. #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
  3262. static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
  3263. {
  3264. if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
  3265. return -EINVAL;
  3266. if (!capable(CAP_SYS_ADMIN))
  3267. return -EPERM;
  3268. return btf_get_fd_by_id(attr->btf_id);
  3269. }
  3270. static int bpf_task_fd_query_copy(const union bpf_attr *attr,
  3271. union bpf_attr __user *uattr,
  3272. u32 prog_id, u32 fd_type,
  3273. const char *buf, u64 probe_offset,
  3274. u64 probe_addr)
  3275. {
  3276. char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
  3277. u32 len = buf ? strlen(buf) : 0, input_len;
  3278. int err = 0;
  3279. if (put_user(len, &uattr->task_fd_query.buf_len))
  3280. return -EFAULT;
  3281. input_len = attr->task_fd_query.buf_len;
  3282. if (input_len && ubuf) {
  3283. if (!len) {
  3284. /* nothing to copy, just make ubuf NULL terminated */
  3285. char zero = '\0';
  3286. if (put_user(zero, ubuf))
  3287. return -EFAULT;
  3288. } else if (input_len >= len + 1) {
  3289. /* ubuf can hold the string with NULL terminator */
  3290. if (copy_to_user(ubuf, buf, len + 1))
  3291. return -EFAULT;
  3292. } else {
  3293. /* ubuf cannot hold the string with NULL terminator,
  3294. * do a partial copy with NULL terminator.
  3295. */
  3296. char zero = '\0';
  3297. err = -ENOSPC;
  3298. if (copy_to_user(ubuf, buf, input_len - 1))
  3299. return -EFAULT;
  3300. if (put_user(zero, ubuf + input_len - 1))
  3301. return -EFAULT;
  3302. }
  3303. }
  3304. if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
  3305. put_user(fd_type, &uattr->task_fd_query.fd_type) ||
  3306. put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
  3307. put_user(probe_addr, &uattr->task_fd_query.probe_addr))
  3308. return -EFAULT;
  3309. return err;
  3310. }
  3311. #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
  3312. static int bpf_task_fd_query(const union bpf_attr *attr,
  3313. union bpf_attr __user *uattr)
  3314. {
  3315. pid_t pid = attr->task_fd_query.pid;
  3316. u32 fd = attr->task_fd_query.fd;
  3317. const struct perf_event *event;
  3318. struct files_struct *files;
  3319. struct task_struct *task;
  3320. struct file *file;
  3321. int err;
  3322. if (CHECK_ATTR(BPF_TASK_FD_QUERY))
  3323. return -EINVAL;
  3324. if (!capable(CAP_SYS_ADMIN))
  3325. return -EPERM;
  3326. if (attr->task_fd_query.flags != 0)
  3327. return -EINVAL;
  3328. task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
  3329. if (!task)
  3330. return -ENOENT;
  3331. files = get_files_struct(task);
  3332. put_task_struct(task);
  3333. if (!files)
  3334. return -ENOENT;
  3335. err = 0;
  3336. spin_lock(&files->file_lock);
  3337. file = fcheck_files(files, fd);
  3338. if (!file)
  3339. err = -EBADF;
  3340. else
  3341. get_file(file);
  3342. spin_unlock(&files->file_lock);
  3343. put_files_struct(files);
  3344. if (err)
  3345. goto out;
  3346. if (file->f_op == &bpf_link_fops) {
  3347. struct bpf_link *link = file->private_data;
  3348. if (link->ops == &bpf_raw_tp_link_lops) {
  3349. struct bpf_raw_tp_link *raw_tp =
  3350. container_of(link, struct bpf_raw_tp_link, link);
  3351. struct bpf_raw_event_map *btp = raw_tp->btp;
  3352. err = bpf_task_fd_query_copy(attr, uattr,
  3353. raw_tp->link.prog->aux->id,
  3354. BPF_FD_TYPE_RAW_TRACEPOINT,
  3355. btp->tp->name, 0, 0);
  3356. goto put_file;
  3357. }
  3358. goto out_not_supp;
  3359. }
  3360. event = perf_get_event(file);
  3361. if (!IS_ERR(event)) {
  3362. u64 probe_offset, probe_addr;
  3363. u32 prog_id, fd_type;
  3364. const char *buf;
  3365. err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
  3366. &buf, &probe_offset,
  3367. &probe_addr);
  3368. if (!err)
  3369. err = bpf_task_fd_query_copy(attr, uattr, prog_id,
  3370. fd_type, buf,
  3371. probe_offset,
  3372. probe_addr);
  3373. goto put_file;
  3374. }
  3375. out_not_supp:
  3376. err = -ENOTSUPP;
  3377. put_file:
  3378. fput(file);
  3379. out:
  3380. return err;
  3381. }
  3382. #define BPF_MAP_BATCH_LAST_FIELD batch.flags
  3383. #define BPF_DO_BATCH(fn) \
  3384. do { \
  3385. if (!fn) { \
  3386. err = -ENOTSUPP; \
  3387. goto err_put; \
  3388. } \
  3389. err = fn(map, attr, uattr); \
  3390. } while (0)
  3391. static int bpf_map_do_batch(const union bpf_attr *attr,
  3392. union bpf_attr __user *uattr,
  3393. int cmd)
  3394. {
  3395. bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
  3396. cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
  3397. bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
  3398. struct bpf_map *map;
  3399. int err, ufd;
  3400. struct fd f;
  3401. if (CHECK_ATTR(BPF_MAP_BATCH))
  3402. return -EINVAL;
  3403. ufd = attr->batch.map_fd;
  3404. f = fdget(ufd);
  3405. map = __bpf_map_get(f);
  3406. if (IS_ERR(map))
  3407. return PTR_ERR(map);
  3408. if (has_write)
  3409. bpf_map_write_active_inc(map);
  3410. if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
  3411. err = -EPERM;
  3412. goto err_put;
  3413. }
  3414. if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
  3415. err = -EPERM;
  3416. goto err_put;
  3417. }
  3418. if (cmd == BPF_MAP_LOOKUP_BATCH)
  3419. BPF_DO_BATCH(map->ops->map_lookup_batch);
  3420. else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
  3421. BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
  3422. else if (cmd == BPF_MAP_UPDATE_BATCH)
  3423. BPF_DO_BATCH(map->ops->map_update_batch);
  3424. else
  3425. BPF_DO_BATCH(map->ops->map_delete_batch);
  3426. err_put:
  3427. if (has_write)
  3428. bpf_map_write_active_dec(map);
  3429. fdput(f);
  3430. return err;
  3431. }
  3432. static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  3433. {
  3434. if (attr->link_create.attach_type != prog->expected_attach_type)
  3435. return -EINVAL;
  3436. if (prog->expected_attach_type == BPF_TRACE_ITER)
  3437. return bpf_iter_link_attach(attr, prog);
  3438. else if (prog->type == BPF_PROG_TYPE_EXT)
  3439. return bpf_tracing_prog_attach(prog,
  3440. attr->link_create.target_fd,
  3441. attr->link_create.target_btf_id);
  3442. return -EINVAL;
  3443. }
  3444. #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
  3445. static int link_create(union bpf_attr *attr)
  3446. {
  3447. enum bpf_prog_type ptype;
  3448. struct bpf_prog *prog;
  3449. int ret;
  3450. if (CHECK_ATTR(BPF_LINK_CREATE))
  3451. return -EINVAL;
  3452. prog = bpf_prog_get(attr->link_create.prog_fd);
  3453. if (IS_ERR(prog))
  3454. return PTR_ERR(prog);
  3455. ret = bpf_prog_attach_check_attach_type(prog,
  3456. attr->link_create.attach_type);
  3457. if (ret)
  3458. goto out;
  3459. if (prog->type == BPF_PROG_TYPE_EXT) {
  3460. ret = tracing_bpf_link_attach(attr, prog);
  3461. goto out;
  3462. }
  3463. ptype = attach_type_to_prog_type(attr->link_create.attach_type);
  3464. if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
  3465. ret = -EINVAL;
  3466. goto out;
  3467. }
  3468. switch (ptype) {
  3469. case BPF_PROG_TYPE_CGROUP_SKB:
  3470. case BPF_PROG_TYPE_CGROUP_SOCK:
  3471. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  3472. case BPF_PROG_TYPE_SOCK_OPS:
  3473. case BPF_PROG_TYPE_CGROUP_DEVICE:
  3474. case BPF_PROG_TYPE_CGROUP_SYSCTL:
  3475. case BPF_PROG_TYPE_CGROUP_SOCKOPT:
  3476. ret = cgroup_bpf_link_attach(attr, prog);
  3477. break;
  3478. case BPF_PROG_TYPE_TRACING:
  3479. ret = tracing_bpf_link_attach(attr, prog);
  3480. break;
  3481. case BPF_PROG_TYPE_FLOW_DISSECTOR:
  3482. case BPF_PROG_TYPE_SK_LOOKUP:
  3483. ret = netns_bpf_link_create(attr, prog);
  3484. break;
  3485. #ifdef CONFIG_NET
  3486. case BPF_PROG_TYPE_XDP:
  3487. ret = bpf_xdp_link_attach(attr, prog);
  3488. break;
  3489. #endif
  3490. default:
  3491. ret = -EINVAL;
  3492. }
  3493. out:
  3494. if (ret < 0)
  3495. bpf_prog_put(prog);
  3496. return ret;
  3497. }
  3498. #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
  3499. static int link_update(union bpf_attr *attr)
  3500. {
  3501. struct bpf_prog *old_prog = NULL, *new_prog;
  3502. struct bpf_link *link;
  3503. u32 flags;
  3504. int ret;
  3505. if (CHECK_ATTR(BPF_LINK_UPDATE))
  3506. return -EINVAL;
  3507. flags = attr->link_update.flags;
  3508. if (flags & ~BPF_F_REPLACE)
  3509. return -EINVAL;
  3510. link = bpf_link_get_from_fd(attr->link_update.link_fd);
  3511. if (IS_ERR(link))
  3512. return PTR_ERR(link);
  3513. new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
  3514. if (IS_ERR(new_prog)) {
  3515. ret = PTR_ERR(new_prog);
  3516. goto out_put_link;
  3517. }
  3518. if (flags & BPF_F_REPLACE) {
  3519. old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
  3520. if (IS_ERR(old_prog)) {
  3521. ret = PTR_ERR(old_prog);
  3522. old_prog = NULL;
  3523. goto out_put_progs;
  3524. }
  3525. } else if (attr->link_update.old_prog_fd) {
  3526. ret = -EINVAL;
  3527. goto out_put_progs;
  3528. }
  3529. if (link->ops->update_prog)
  3530. ret = link->ops->update_prog(link, new_prog, old_prog);
  3531. else
  3532. ret = -EINVAL;
  3533. out_put_progs:
  3534. if (old_prog)
  3535. bpf_prog_put(old_prog);
  3536. if (ret)
  3537. bpf_prog_put(new_prog);
  3538. out_put_link:
  3539. bpf_link_put(link);
  3540. return ret;
  3541. }
  3542. #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
  3543. static int link_detach(union bpf_attr *attr)
  3544. {
  3545. struct bpf_link *link;
  3546. int ret;
  3547. if (CHECK_ATTR(BPF_LINK_DETACH))
  3548. return -EINVAL;
  3549. link = bpf_link_get_from_fd(attr->link_detach.link_fd);
  3550. if (IS_ERR(link))
  3551. return PTR_ERR(link);
  3552. if (link->ops->detach)
  3553. ret = link->ops->detach(link);
  3554. else
  3555. ret = -EOPNOTSUPP;
  3556. bpf_link_put(link);
  3557. return ret;
  3558. }
  3559. static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
  3560. {
  3561. return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
  3562. }
  3563. struct bpf_link *bpf_link_by_id(u32 id)
  3564. {
  3565. struct bpf_link *link;
  3566. if (!id)
  3567. return ERR_PTR(-ENOENT);
  3568. spin_lock_bh(&link_idr_lock);
  3569. /* before link is "settled", ID is 0, pretend it doesn't exist yet */
  3570. link = idr_find(&link_idr, id);
  3571. if (link) {
  3572. if (link->id)
  3573. link = bpf_link_inc_not_zero(link);
  3574. else
  3575. link = ERR_PTR(-EAGAIN);
  3576. } else {
  3577. link = ERR_PTR(-ENOENT);
  3578. }
  3579. spin_unlock_bh(&link_idr_lock);
  3580. return link;
  3581. }
  3582. #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
  3583. static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
  3584. {
  3585. struct bpf_link *link;
  3586. u32 id = attr->link_id;
  3587. int fd;
  3588. if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
  3589. return -EINVAL;
  3590. if (!capable(CAP_SYS_ADMIN))
  3591. return -EPERM;
  3592. link = bpf_link_by_id(id);
  3593. if (IS_ERR(link))
  3594. return PTR_ERR(link);
  3595. fd = bpf_link_new_fd(link);
  3596. if (fd < 0)
  3597. bpf_link_put(link);
  3598. return fd;
  3599. }
  3600. DEFINE_MUTEX(bpf_stats_enabled_mutex);
  3601. static int bpf_stats_release(struct inode *inode, struct file *file)
  3602. {
  3603. mutex_lock(&bpf_stats_enabled_mutex);
  3604. static_key_slow_dec(&bpf_stats_enabled_key.key);
  3605. mutex_unlock(&bpf_stats_enabled_mutex);
  3606. return 0;
  3607. }
  3608. static const struct file_operations bpf_stats_fops = {
  3609. .release = bpf_stats_release,
  3610. };
  3611. static int bpf_enable_runtime_stats(void)
  3612. {
  3613. int fd;
  3614. mutex_lock(&bpf_stats_enabled_mutex);
  3615. /* Set a very high limit to avoid overflow */
  3616. if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
  3617. mutex_unlock(&bpf_stats_enabled_mutex);
  3618. return -EBUSY;
  3619. }
  3620. fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
  3621. if (fd >= 0)
  3622. static_key_slow_inc(&bpf_stats_enabled_key.key);
  3623. mutex_unlock(&bpf_stats_enabled_mutex);
  3624. return fd;
  3625. }
  3626. #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
  3627. static int bpf_enable_stats(union bpf_attr *attr)
  3628. {
  3629. if (CHECK_ATTR(BPF_ENABLE_STATS))
  3630. return -EINVAL;
  3631. if (!capable(CAP_SYS_ADMIN))
  3632. return -EPERM;
  3633. switch (attr->enable_stats.type) {
  3634. case BPF_STATS_RUN_TIME:
  3635. return bpf_enable_runtime_stats();
  3636. default:
  3637. break;
  3638. }
  3639. return -EINVAL;
  3640. }
  3641. #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
  3642. static int bpf_iter_create(union bpf_attr *attr)
  3643. {
  3644. struct bpf_link *link;
  3645. int err;
  3646. if (CHECK_ATTR(BPF_ITER_CREATE))
  3647. return -EINVAL;
  3648. if (attr->iter_create.flags)
  3649. return -EINVAL;
  3650. link = bpf_link_get_from_fd(attr->iter_create.link_fd);
  3651. if (IS_ERR(link))
  3652. return PTR_ERR(link);
  3653. err = bpf_iter_new_fd(link);
  3654. bpf_link_put(link);
  3655. return err;
  3656. }
  3657. #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
  3658. static int bpf_prog_bind_map(union bpf_attr *attr)
  3659. {
  3660. struct bpf_prog *prog;
  3661. struct bpf_map *map;
  3662. struct bpf_map **used_maps_old, **used_maps_new;
  3663. int i, ret = 0;
  3664. if (CHECK_ATTR(BPF_PROG_BIND_MAP))
  3665. return -EINVAL;
  3666. if (attr->prog_bind_map.flags)
  3667. return -EINVAL;
  3668. prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
  3669. if (IS_ERR(prog))
  3670. return PTR_ERR(prog);
  3671. map = bpf_map_get(attr->prog_bind_map.map_fd);
  3672. if (IS_ERR(map)) {
  3673. ret = PTR_ERR(map);
  3674. goto out_prog_put;
  3675. }
  3676. mutex_lock(&prog->aux->used_maps_mutex);
  3677. used_maps_old = prog->aux->used_maps;
  3678. for (i = 0; i < prog->aux->used_map_cnt; i++)
  3679. if (used_maps_old[i] == map) {
  3680. bpf_map_put(map);
  3681. goto out_unlock;
  3682. }
  3683. used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
  3684. sizeof(used_maps_new[0]),
  3685. GFP_KERNEL);
  3686. if (!used_maps_new) {
  3687. ret = -ENOMEM;
  3688. goto out_unlock;
  3689. }
  3690. memcpy(used_maps_new, used_maps_old,
  3691. sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
  3692. used_maps_new[prog->aux->used_map_cnt] = map;
  3693. prog->aux->used_map_cnt++;
  3694. prog->aux->used_maps = used_maps_new;
  3695. kfree(used_maps_old);
  3696. out_unlock:
  3697. mutex_unlock(&prog->aux->used_maps_mutex);
  3698. if (ret)
  3699. bpf_map_put(map);
  3700. out_prog_put:
  3701. bpf_prog_put(prog);
  3702. return ret;
  3703. }
  3704. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  3705. {
  3706. union bpf_attr attr;
  3707. int err;
  3708. if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
  3709. return -EPERM;
  3710. err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
  3711. if (err)
  3712. return err;
  3713. size = min_t(u32, size, sizeof(attr));
  3714. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  3715. memset(&attr, 0, sizeof(attr));
  3716. if (copy_from_user(&attr, uattr, size) != 0)
  3717. return -EFAULT;
  3718. trace_android_vh_check_bpf_syscall(cmd, &attr, size);
  3719. err = security_bpf(cmd, &attr, size);
  3720. if (err < 0)
  3721. return err;
  3722. switch (cmd) {
  3723. case BPF_MAP_CREATE:
  3724. err = map_create(&attr);
  3725. break;
  3726. case BPF_MAP_LOOKUP_ELEM:
  3727. err = map_lookup_elem(&attr);
  3728. break;
  3729. case BPF_MAP_UPDATE_ELEM:
  3730. err = map_update_elem(&attr);
  3731. break;
  3732. case BPF_MAP_DELETE_ELEM:
  3733. err = map_delete_elem(&attr);
  3734. break;
  3735. case BPF_MAP_GET_NEXT_KEY:
  3736. err = map_get_next_key(&attr);
  3737. break;
  3738. case BPF_MAP_FREEZE:
  3739. err = map_freeze(&attr);
  3740. break;
  3741. case BPF_PROG_LOAD:
  3742. err = bpf_prog_load(&attr, uattr);
  3743. break;
  3744. case BPF_OBJ_PIN:
  3745. err = bpf_obj_pin(&attr);
  3746. break;
  3747. case BPF_OBJ_GET:
  3748. err = bpf_obj_get(&attr);
  3749. break;
  3750. case BPF_PROG_ATTACH:
  3751. err = bpf_prog_attach(&attr);
  3752. break;
  3753. case BPF_PROG_DETACH:
  3754. err = bpf_prog_detach(&attr);
  3755. break;
  3756. case BPF_PROG_QUERY:
  3757. err = bpf_prog_query(&attr, uattr);
  3758. break;
  3759. case BPF_PROG_TEST_RUN:
  3760. err = bpf_prog_test_run(&attr, uattr);
  3761. break;
  3762. case BPF_PROG_GET_NEXT_ID:
  3763. err = bpf_obj_get_next_id(&attr, uattr,
  3764. &prog_idr, &prog_idr_lock);
  3765. break;
  3766. case BPF_MAP_GET_NEXT_ID:
  3767. err = bpf_obj_get_next_id(&attr, uattr,
  3768. &map_idr, &map_idr_lock);
  3769. break;
  3770. case BPF_BTF_GET_NEXT_ID:
  3771. err = bpf_obj_get_next_id(&attr, uattr,
  3772. &btf_idr, &btf_idr_lock);
  3773. break;
  3774. case BPF_PROG_GET_FD_BY_ID:
  3775. err = bpf_prog_get_fd_by_id(&attr);
  3776. break;
  3777. case BPF_MAP_GET_FD_BY_ID:
  3778. err = bpf_map_get_fd_by_id(&attr);
  3779. break;
  3780. case BPF_OBJ_GET_INFO_BY_FD:
  3781. err = bpf_obj_get_info_by_fd(&attr, uattr);
  3782. break;
  3783. case BPF_RAW_TRACEPOINT_OPEN:
  3784. err = bpf_raw_tracepoint_open(&attr);
  3785. break;
  3786. case BPF_BTF_LOAD:
  3787. err = bpf_btf_load(&attr);
  3788. break;
  3789. case BPF_BTF_GET_FD_BY_ID:
  3790. err = bpf_btf_get_fd_by_id(&attr);
  3791. break;
  3792. case BPF_TASK_FD_QUERY:
  3793. err = bpf_task_fd_query(&attr, uattr);
  3794. break;
  3795. case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
  3796. err = map_lookup_and_delete_elem(&attr);
  3797. break;
  3798. case BPF_MAP_LOOKUP_BATCH:
  3799. err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
  3800. break;
  3801. case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
  3802. err = bpf_map_do_batch(&attr, uattr,
  3803. BPF_MAP_LOOKUP_AND_DELETE_BATCH);
  3804. break;
  3805. case BPF_MAP_UPDATE_BATCH:
  3806. err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
  3807. break;
  3808. case BPF_MAP_DELETE_BATCH:
  3809. err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
  3810. break;
  3811. case BPF_LINK_CREATE:
  3812. err = link_create(&attr);
  3813. break;
  3814. case BPF_LINK_UPDATE:
  3815. err = link_update(&attr);
  3816. break;
  3817. case BPF_LINK_GET_FD_BY_ID:
  3818. err = bpf_link_get_fd_by_id(&attr);
  3819. break;
  3820. case BPF_LINK_GET_NEXT_ID:
  3821. err = bpf_obj_get_next_id(&attr, uattr,
  3822. &link_idr, &link_idr_lock);
  3823. break;
  3824. case BPF_ENABLE_STATS:
  3825. err = bpf_enable_stats(&attr);
  3826. break;
  3827. case BPF_ITER_CREATE:
  3828. err = bpf_iter_create(&attr);
  3829. break;
  3830. case BPF_LINK_DETACH:
  3831. err = link_detach(&attr);
  3832. break;
  3833. case BPF_PROG_BIND_MAP:
  3834. err = bpf_prog_bind_map(&attr);
  3835. break;
  3836. default:
  3837. err = -EINVAL;
  3838. break;
  3839. }
  3840. return err;
  3841. }