cgroup.c 175 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735
  1. /*
  2. * Generic process-grouping system.
  3. *
  4. * Based originally on the cpuset system, extracted by Paul Menage
  5. * Copyright (C) 2006 Google, Inc
  6. *
  7. * Notifications support
  8. * Copyright (C) 2009 Nokia Corporation
  9. * Author: Kirill A. Shutemov
  10. *
  11. * Copyright notices from the original cpuset code:
  12. * --------------------------------------------------
  13. * Copyright (C) 2003 BULL SA.
  14. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  15. *
  16. * Portions derived from Patrick Mochel's sysfs code.
  17. * sysfs is Copyright (c) 2001-3 Patrick Mochel
  18. *
  19. * 2003-10-10 Written by Simon Derr.
  20. * 2003-10-22 Updates by Stephen Hemminger.
  21. * 2004 May-July Rework by Paul Jackson.
  22. * ---------------------------------------------------
  23. *
  24. * This file is subject to the terms and conditions of the GNU General Public
  25. * License. See the file COPYING in the main directory of the Linux
  26. * distribution for more details.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include "cgroup-internal.h"
  30. #include <linux/cred.h>
  31. #include <linux/errno.h>
  32. #include <linux/init_task.h>
  33. #include <linux/kernel.h>
  34. #include <linux/magic.h>
  35. #include <linux/mutex.h>
  36. #include <linux/mount.h>
  37. #include <linux/pagemap.h>
  38. #include <linux/proc_fs.h>
  39. #include <linux/rcupdate.h>
  40. #include <linux/sched.h>
  41. #include <linux/sched/task.h>
  42. #include <linux/slab.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/percpu-rwsem.h>
  45. #include <linux/string.h>
  46. #include <linux/hashtable.h>
  47. #include <linux/idr.h>
  48. #include <linux/kthread.h>
  49. #include <linux/atomic.h>
  50. #include <linux/cpuset.h>
  51. #include <linux/proc_ns.h>
  52. #include <linux/nsproxy.h>
  53. #include <linux/file.h>
  54. #include <linux/fs_parser.h>
  55. #include <linux/sched/cputime.h>
  56. #include <linux/psi.h>
  57. #include <net/sock.h>
  58. #define CREATE_TRACE_POINTS
  59. #include <trace/events/cgroup.h>
  60. #undef CREATE_TRACE_POINTS
  61. #include <trace/hooks/cgroup.h>
  62. #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
  63. MAX_CFTYPE_NAME + 2)
  64. /* let's not notify more than 100 times per second */
  65. #define CGROUP_FILE_NOTIFY_MIN_INTV DIV_ROUND_UP(HZ, 100)
  66. /*
  67. * cgroup_mutex is the master lock. Any modification to cgroup or its
  68. * hierarchy must be performed while holding it.
  69. *
  70. * css_set_lock protects task->cgroups pointer, the list of css_set
  71. * objects, and the chain of tasks off each css_set.
  72. *
  73. * These locks are exported if CONFIG_PROVE_RCU so that accessors in
  74. * cgroup.h can use them for lockdep annotations.
  75. */
  76. DEFINE_MUTEX(cgroup_mutex);
  77. DEFINE_SPINLOCK(css_set_lock);
  78. #ifdef CONFIG_PROVE_RCU
  79. EXPORT_SYMBOL_GPL(cgroup_mutex);
  80. EXPORT_SYMBOL_GPL(css_set_lock);
  81. #endif
  82. DEFINE_SPINLOCK(trace_cgroup_path_lock);
  83. char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
  84. bool cgroup_debug __read_mostly;
  85. /*
  86. * Protects cgroup_idr and css_idr so that IDs can be released without
  87. * grabbing cgroup_mutex.
  88. */
  89. static DEFINE_SPINLOCK(cgroup_idr_lock);
  90. /*
  91. * Protects cgroup_file->kn for !self csses. It synchronizes notifications
  92. * against file removal/re-creation across css hiding.
  93. */
  94. static DEFINE_SPINLOCK(cgroup_file_kn_lock);
  95. DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
  96. #define cgroup_assert_mutex_or_rcu_locked() \
  97. RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
  98. !lockdep_is_held(&cgroup_mutex), \
  99. "cgroup_mutex or RCU read lock required");
  100. /*
  101. * cgroup destruction makes heavy use of work items and there can be a lot
  102. * of concurrent destructions. Use a separate workqueue so that cgroup
  103. * destruction work items don't end up filling up max_active of system_wq
  104. * which may lead to deadlock.
  105. */
  106. static struct workqueue_struct *cgroup_destroy_wq;
  107. /* generate an array of cgroup subsystem pointers */
  108. #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
  109. struct cgroup_subsys *cgroup_subsys[] = {
  110. #include <linux/cgroup_subsys.h>
  111. };
  112. #undef SUBSYS
  113. /* array of cgroup subsystem names */
  114. #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
  115. static const char *cgroup_subsys_name[] = {
  116. #include <linux/cgroup_subsys.h>
  117. };
  118. #undef SUBSYS
  119. /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
  120. #define SUBSYS(_x) \
  121. DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \
  122. DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \
  123. EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \
  124. EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
  125. #include <linux/cgroup_subsys.h>
  126. #undef SUBSYS
  127. #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
  128. static struct static_key_true *cgroup_subsys_enabled_key[] = {
  129. #include <linux/cgroup_subsys.h>
  130. };
  131. #undef SUBSYS
  132. #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
  133. static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
  134. #include <linux/cgroup_subsys.h>
  135. };
  136. #undef SUBSYS
  137. static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu);
  138. /* the default hierarchy */
  139. struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
  140. EXPORT_SYMBOL_GPL(cgrp_dfl_root);
  141. /*
  142. * The default hierarchy always exists but is hidden until mounted for the
  143. * first time. This is for backward compatibility.
  144. */
  145. static bool cgrp_dfl_visible;
  146. /* some controllers are not supported in the default hierarchy */
  147. static u16 cgrp_dfl_inhibit_ss_mask;
  148. /* some controllers are implicitly enabled on the default hierarchy */
  149. static u16 cgrp_dfl_implicit_ss_mask;
  150. /* some controllers can be threaded on the default hierarchy */
  151. static u16 cgrp_dfl_threaded_ss_mask;
  152. /* The list of hierarchy roots */
  153. LIST_HEAD(cgroup_roots);
  154. static int cgroup_root_count;
  155. /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
  156. static DEFINE_IDR(cgroup_hierarchy_idr);
  157. /*
  158. * Assign a monotonically increasing serial number to csses. It guarantees
  159. * cgroups with bigger numbers are newer than those with smaller numbers.
  160. * Also, as csses are always appended to the parent's ->children list, it
  161. * guarantees that sibling csses are always sorted in the ascending serial
  162. * number order on the list. Protected by cgroup_mutex.
  163. */
  164. static u64 css_serial_nr_next = 1;
  165. /*
  166. * These bitmasks identify subsystems with specific features to avoid
  167. * having to do iterative checks repeatedly.
  168. */
  169. static u16 have_fork_callback __read_mostly;
  170. static u16 have_exit_callback __read_mostly;
  171. static u16 have_release_callback __read_mostly;
  172. static u16 have_canfork_callback __read_mostly;
  173. /* cgroup namespace for init task */
  174. struct cgroup_namespace init_cgroup_ns = {
  175. .count = REFCOUNT_INIT(2),
  176. .user_ns = &init_user_ns,
  177. .ns.ops = &cgroupns_operations,
  178. .ns.inum = PROC_CGROUP_INIT_INO,
  179. .root_cset = &init_css_set,
  180. };
  181. static struct file_system_type cgroup2_fs_type;
  182. static struct cftype cgroup_base_files[];
  183. /* cgroup optional features */
  184. enum cgroup_opt_features {
  185. #ifdef CONFIG_PSI
  186. OPT_FEATURE_PRESSURE,
  187. #endif
  188. OPT_FEATURE_COUNT
  189. };
  190. static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = {
  191. #ifdef CONFIG_PSI
  192. "pressure",
  193. #endif
  194. };
  195. static u16 cgroup_feature_disable_mask __read_mostly;
  196. static int cgroup_apply_control(struct cgroup *cgrp);
  197. static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
  198. static void css_task_iter_skip(struct css_task_iter *it,
  199. struct task_struct *task);
  200. static int cgroup_destroy_locked(struct cgroup *cgrp);
  201. static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
  202. struct cgroup_subsys *ss);
  203. static void css_release(struct percpu_ref *ref);
  204. static void kill_css(struct cgroup_subsys_state *css);
  205. static int cgroup_addrm_files(struct cgroup_subsys_state *css,
  206. struct cgroup *cgrp, struct cftype cfts[],
  207. bool is_add);
  208. /**
  209. * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
  210. * @ssid: subsys ID of interest
  211. *
  212. * cgroup_subsys_enabled() can only be used with literal subsys names which
  213. * is fine for individual subsystems but unsuitable for cgroup core. This
  214. * is slower static_key_enabled() based test indexed by @ssid.
  215. */
  216. bool cgroup_ssid_enabled(int ssid)
  217. {
  218. if (CGROUP_SUBSYS_COUNT == 0)
  219. return false;
  220. return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
  221. }
  222. /**
  223. * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
  224. * @cgrp: the cgroup of interest
  225. *
  226. * The default hierarchy is the v2 interface of cgroup and this function
  227. * can be used to test whether a cgroup is on the default hierarchy for
  228. * cases where a subsystem should behave differnetly depending on the
  229. * interface version.
  230. *
  231. * List of changed behaviors:
  232. *
  233. * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
  234. * and "name" are disallowed.
  235. *
  236. * - When mounting an existing superblock, mount options should match.
  237. *
  238. * - Remount is disallowed.
  239. *
  240. * - rename(2) is disallowed.
  241. *
  242. * - "tasks" is removed. Everything should be at process granularity. Use
  243. * "cgroup.procs" instead.
  244. *
  245. * - "cgroup.procs" is not sorted. pids will be unique unless they got
  246. * recycled inbetween reads.
  247. *
  248. * - "release_agent" and "notify_on_release" are removed. Replacement
  249. * notification mechanism will be implemented.
  250. *
  251. * - "cgroup.clone_children" is removed.
  252. *
  253. * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
  254. * and its descendants contain no task; otherwise, 1. The file also
  255. * generates kernfs notification which can be monitored through poll and
  256. * [di]notify when the value of the file changes.
  257. *
  258. * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
  259. * take masks of ancestors with non-empty cpus/mems, instead of being
  260. * moved to an ancestor.
  261. *
  262. * - cpuset: a task can be moved into an empty cpuset, and again it takes
  263. * masks of ancestors.
  264. *
  265. * - memcg: use_hierarchy is on by default and the cgroup file for the flag
  266. * is not created.
  267. *
  268. * - blkcg: blk-throttle becomes properly hierarchical.
  269. *
  270. * - debug: disallowed on the default hierarchy.
  271. */
  272. bool cgroup_on_dfl(const struct cgroup *cgrp)
  273. {
  274. return cgrp->root == &cgrp_dfl_root;
  275. }
  276. /* IDR wrappers which synchronize using cgroup_idr_lock */
  277. static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
  278. gfp_t gfp_mask)
  279. {
  280. int ret;
  281. idr_preload(gfp_mask);
  282. spin_lock_bh(&cgroup_idr_lock);
  283. ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
  284. spin_unlock_bh(&cgroup_idr_lock);
  285. idr_preload_end();
  286. return ret;
  287. }
  288. static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
  289. {
  290. void *ret;
  291. spin_lock_bh(&cgroup_idr_lock);
  292. ret = idr_replace(idr, ptr, id);
  293. spin_unlock_bh(&cgroup_idr_lock);
  294. return ret;
  295. }
  296. static void cgroup_idr_remove(struct idr *idr, int id)
  297. {
  298. spin_lock_bh(&cgroup_idr_lock);
  299. idr_remove(idr, id);
  300. spin_unlock_bh(&cgroup_idr_lock);
  301. }
  302. static bool cgroup_has_tasks(struct cgroup *cgrp)
  303. {
  304. return cgrp->nr_populated_csets;
  305. }
  306. bool cgroup_is_threaded(struct cgroup *cgrp)
  307. {
  308. return cgrp->dom_cgrp != cgrp;
  309. }
  310. /* can @cgrp host both domain and threaded children? */
  311. static bool cgroup_is_mixable(struct cgroup *cgrp)
  312. {
  313. /*
  314. * Root isn't under domain level resource control exempting it from
  315. * the no-internal-process constraint, so it can serve as a thread
  316. * root and a parent of resource domains at the same time.
  317. */
  318. return !cgroup_parent(cgrp);
  319. }
  320. /* can @cgrp become a thread root? should always be true for a thread root */
  321. static bool cgroup_can_be_thread_root(struct cgroup *cgrp)
  322. {
  323. /* mixables don't care */
  324. if (cgroup_is_mixable(cgrp))
  325. return true;
  326. /* domain roots can't be nested under threaded */
  327. if (cgroup_is_threaded(cgrp))
  328. return false;
  329. /* can only have either domain or threaded children */
  330. if (cgrp->nr_populated_domain_children)
  331. return false;
  332. /* and no domain controllers can be enabled */
  333. if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
  334. return false;
  335. return true;
  336. }
  337. /* is @cgrp root of a threaded subtree? */
  338. bool cgroup_is_thread_root(struct cgroup *cgrp)
  339. {
  340. /* thread root should be a domain */
  341. if (cgroup_is_threaded(cgrp))
  342. return false;
  343. /* a domain w/ threaded children is a thread root */
  344. if (cgrp->nr_threaded_children)
  345. return true;
  346. /*
  347. * A domain which has tasks and explicit threaded controllers
  348. * enabled is a thread root.
  349. */
  350. if (cgroup_has_tasks(cgrp) &&
  351. (cgrp->subtree_control & cgrp_dfl_threaded_ss_mask))
  352. return true;
  353. return false;
  354. }
  355. /* a domain which isn't connected to the root w/o brekage can't be used */
  356. static bool cgroup_is_valid_domain(struct cgroup *cgrp)
  357. {
  358. /* the cgroup itself can be a thread root */
  359. if (cgroup_is_threaded(cgrp))
  360. return false;
  361. /* but the ancestors can't be unless mixable */
  362. while ((cgrp = cgroup_parent(cgrp))) {
  363. if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp))
  364. return false;
  365. if (cgroup_is_threaded(cgrp))
  366. return false;
  367. }
  368. return true;
  369. }
  370. /* subsystems visibly enabled on a cgroup */
  371. static u16 cgroup_control(struct cgroup *cgrp)
  372. {
  373. struct cgroup *parent = cgroup_parent(cgrp);
  374. u16 root_ss_mask = cgrp->root->subsys_mask;
  375. if (parent) {
  376. u16 ss_mask = parent->subtree_control;
  377. /* threaded cgroups can only have threaded controllers */
  378. if (cgroup_is_threaded(cgrp))
  379. ss_mask &= cgrp_dfl_threaded_ss_mask;
  380. return ss_mask;
  381. }
  382. if (cgroup_on_dfl(cgrp))
  383. root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
  384. cgrp_dfl_implicit_ss_mask);
  385. return root_ss_mask;
  386. }
  387. /* subsystems enabled on a cgroup */
  388. static u16 cgroup_ss_mask(struct cgroup *cgrp)
  389. {
  390. struct cgroup *parent = cgroup_parent(cgrp);
  391. if (parent) {
  392. u16 ss_mask = parent->subtree_ss_mask;
  393. /* threaded cgroups can only have threaded controllers */
  394. if (cgroup_is_threaded(cgrp))
  395. ss_mask &= cgrp_dfl_threaded_ss_mask;
  396. return ss_mask;
  397. }
  398. return cgrp->root->subsys_mask;
  399. }
  400. /**
  401. * cgroup_css - obtain a cgroup's css for the specified subsystem
  402. * @cgrp: the cgroup of interest
  403. * @ss: the subsystem of interest (%NULL returns @cgrp->self)
  404. *
  405. * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
  406. * function must be called either under cgroup_mutex or rcu_read_lock() and
  407. * the caller is responsible for pinning the returned css if it wants to
  408. * keep accessing it outside the said locks. This function may return
  409. * %NULL if @cgrp doesn't have @subsys_id enabled.
  410. */
  411. static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
  412. struct cgroup_subsys *ss)
  413. {
  414. if (ss)
  415. return rcu_dereference_check(cgrp->subsys[ss->id],
  416. lockdep_is_held(&cgroup_mutex));
  417. else
  418. return &cgrp->self;
  419. }
  420. /**
  421. * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
  422. * @cgrp: the cgroup of interest
  423. * @ss: the subsystem of interest
  424. *
  425. * Find and get @cgrp's css assocaited with @ss. If the css doesn't exist
  426. * or is offline, %NULL is returned.
  427. */
  428. static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
  429. struct cgroup_subsys *ss)
  430. {
  431. struct cgroup_subsys_state *css;
  432. rcu_read_lock();
  433. css = cgroup_css(cgrp, ss);
  434. if (css && !css_tryget_online(css))
  435. css = NULL;
  436. rcu_read_unlock();
  437. return css;
  438. }
  439. /**
  440. * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
  441. * @cgrp: the cgroup of interest
  442. * @ss: the subsystem of interest (%NULL returns @cgrp->self)
  443. *
  444. * Similar to cgroup_css() but returns the effective css, which is defined
  445. * as the matching css of the nearest ancestor including self which has @ss
  446. * enabled. If @ss is associated with the hierarchy @cgrp is on, this
  447. * function is guaranteed to return non-NULL css.
  448. */
  449. static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
  450. struct cgroup_subsys *ss)
  451. {
  452. lockdep_assert_held(&cgroup_mutex);
  453. if (!ss)
  454. return &cgrp->self;
  455. /*
  456. * This function is used while updating css associations and thus
  457. * can't test the csses directly. Test ss_mask.
  458. */
  459. while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
  460. cgrp = cgroup_parent(cgrp);
  461. if (!cgrp)
  462. return NULL;
  463. }
  464. return cgroup_css(cgrp, ss);
  465. }
  466. /**
  467. * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
  468. * @cgrp: the cgroup of interest
  469. * @ss: the subsystem of interest
  470. *
  471. * Find and get the effective css of @cgrp for @ss. The effective css is
  472. * defined as the matching css of the nearest ancestor including self which
  473. * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
  474. * the root css is returned, so this function always returns a valid css.
  475. *
  476. * The returned css is not guaranteed to be online, and therefore it is the
  477. * callers responsiblity to tryget a reference for it.
  478. */
  479. struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
  480. struct cgroup_subsys *ss)
  481. {
  482. struct cgroup_subsys_state *css;
  483. do {
  484. css = cgroup_css(cgrp, ss);
  485. if (css)
  486. return css;
  487. cgrp = cgroup_parent(cgrp);
  488. } while (cgrp);
  489. return init_css_set.subsys[ss->id];
  490. }
  491. /**
  492. * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
  493. * @cgrp: the cgroup of interest
  494. * @ss: the subsystem of interest
  495. *
  496. * Find and get the effective css of @cgrp for @ss. The effective css is
  497. * defined as the matching css of the nearest ancestor including self which
  498. * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
  499. * the root css is returned, so this function always returns a valid css.
  500. * The returned css must be put using css_put().
  501. */
  502. struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
  503. struct cgroup_subsys *ss)
  504. {
  505. struct cgroup_subsys_state *css;
  506. rcu_read_lock();
  507. do {
  508. css = cgroup_css(cgrp, ss);
  509. if (css && css_tryget_online(css))
  510. goto out_unlock;
  511. cgrp = cgroup_parent(cgrp);
  512. } while (cgrp);
  513. css = init_css_set.subsys[ss->id];
  514. css_get(css);
  515. out_unlock:
  516. rcu_read_unlock();
  517. return css;
  518. }
  519. static void cgroup_get_live(struct cgroup *cgrp)
  520. {
  521. WARN_ON_ONCE(cgroup_is_dead(cgrp));
  522. css_get(&cgrp->self);
  523. }
  524. /**
  525. * __cgroup_task_count - count the number of tasks in a cgroup. The caller
  526. * is responsible for taking the css_set_lock.
  527. * @cgrp: the cgroup in question
  528. */
  529. int __cgroup_task_count(const struct cgroup *cgrp)
  530. {
  531. int count = 0;
  532. struct cgrp_cset_link *link;
  533. lockdep_assert_held(&css_set_lock);
  534. list_for_each_entry(link, &cgrp->cset_links, cset_link)
  535. count += link->cset->nr_tasks;
  536. return count;
  537. }
  538. /**
  539. * cgroup_task_count - count the number of tasks in a cgroup.
  540. * @cgrp: the cgroup in question
  541. */
  542. int cgroup_task_count(const struct cgroup *cgrp)
  543. {
  544. int count;
  545. spin_lock_irq(&css_set_lock);
  546. count = __cgroup_task_count(cgrp);
  547. spin_unlock_irq(&css_set_lock);
  548. return count;
  549. }
  550. struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
  551. {
  552. struct cgroup *cgrp = of->kn->parent->priv;
  553. struct cftype *cft = of_cft(of);
  554. /*
  555. * This is open and unprotected implementation of cgroup_css().
  556. * seq_css() is only called from a kernfs file operation which has
  557. * an active reference on the file. Because all the subsystem
  558. * files are drained before a css is disassociated with a cgroup,
  559. * the matching css from the cgroup's subsys table is guaranteed to
  560. * be and stay valid until the enclosing operation is complete.
  561. */
  562. if (cft->ss)
  563. return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
  564. else
  565. return &cgrp->self;
  566. }
  567. EXPORT_SYMBOL_GPL(of_css);
  568. /**
  569. * for_each_css - iterate all css's of a cgroup
  570. * @css: the iteration cursor
  571. * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
  572. * @cgrp: the target cgroup to iterate css's of
  573. *
  574. * Should be called under cgroup_[tree_]mutex.
  575. */
  576. #define for_each_css(css, ssid, cgrp) \
  577. for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
  578. if (!((css) = rcu_dereference_check( \
  579. (cgrp)->subsys[(ssid)], \
  580. lockdep_is_held(&cgroup_mutex)))) { } \
  581. else
  582. /**
  583. * for_each_e_css - iterate all effective css's of a cgroup
  584. * @css: the iteration cursor
  585. * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
  586. * @cgrp: the target cgroup to iterate css's of
  587. *
  588. * Should be called under cgroup_[tree_]mutex.
  589. */
  590. #define for_each_e_css(css, ssid, cgrp) \
  591. for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
  592. if (!((css) = cgroup_e_css_by_mask(cgrp, \
  593. cgroup_subsys[(ssid)]))) \
  594. ; \
  595. else
  596. /**
  597. * do_each_subsys_mask - filter for_each_subsys with a bitmask
  598. * @ss: the iteration cursor
  599. * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
  600. * @ss_mask: the bitmask
  601. *
  602. * The block will only run for cases where the ssid-th bit (1 << ssid) of
  603. * @ss_mask is set.
  604. */
  605. #define do_each_subsys_mask(ss, ssid, ss_mask) do { \
  606. unsigned long __ss_mask = (ss_mask); \
  607. if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */ \
  608. (ssid) = 0; \
  609. break; \
  610. } \
  611. for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \
  612. (ss) = cgroup_subsys[ssid]; \
  613. {
  614. #define while_each_subsys_mask() \
  615. } \
  616. } \
  617. } while (false)
  618. /* iterate over child cgrps, lock should be held throughout iteration */
  619. #define cgroup_for_each_live_child(child, cgrp) \
  620. list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
  621. if (({ lockdep_assert_held(&cgroup_mutex); \
  622. cgroup_is_dead(child); })) \
  623. ; \
  624. else
  625. /* walk live descendants in preorder */
  626. #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
  627. css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
  628. if (({ lockdep_assert_held(&cgroup_mutex); \
  629. (dsct) = (d_css)->cgroup; \
  630. cgroup_is_dead(dsct); })) \
  631. ; \
  632. else
  633. /* walk live descendants in postorder */
  634. #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \
  635. css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \
  636. if (({ lockdep_assert_held(&cgroup_mutex); \
  637. (dsct) = (d_css)->cgroup; \
  638. cgroup_is_dead(dsct); })) \
  639. ; \
  640. else
  641. /*
  642. * The default css_set - used by init and its children prior to any
  643. * hierarchies being mounted. It contains a pointer to the root state
  644. * for each subsystem. Also used to anchor the list of css_sets. Not
  645. * reference-counted, to improve performance when child cgroups
  646. * haven't been created.
  647. */
  648. struct css_set init_css_set = {
  649. .refcount = REFCOUNT_INIT(1),
  650. .dom_cset = &init_css_set,
  651. .tasks = LIST_HEAD_INIT(init_css_set.tasks),
  652. .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
  653. .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
  654. .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
  655. .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
  656. .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
  657. .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
  658. .mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
  659. /*
  660. * The following field is re-initialized when this cset gets linked
  661. * in cgroup_init(). However, let's initialize the field
  662. * statically too so that the default cgroup can be accessed safely
  663. * early during boot.
  664. */
  665. .dfl_cgrp = &cgrp_dfl_root.cgrp,
  666. };
  667. static int css_set_count = 1; /* 1 for init_css_set */
  668. static bool css_set_threaded(struct css_set *cset)
  669. {
  670. return cset->dom_cset != cset;
  671. }
  672. /**
  673. * css_set_populated - does a css_set contain any tasks?
  674. * @cset: target css_set
  675. *
  676. * css_set_populated() should be the same as !!cset->nr_tasks at steady
  677. * state. However, css_set_populated() can be called while a task is being
  678. * added to or removed from the linked list before the nr_tasks is
  679. * properly updated. Hence, we can't just look at ->nr_tasks here.
  680. */
  681. static bool css_set_populated(struct css_set *cset)
  682. {
  683. lockdep_assert_held(&css_set_lock);
  684. return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
  685. }
  686. /**
  687. * cgroup_update_populated - update the populated count of a cgroup
  688. * @cgrp: the target cgroup
  689. * @populated: inc or dec populated count
  690. *
  691. * One of the css_sets associated with @cgrp is either getting its first
  692. * task or losing the last. Update @cgrp->nr_populated_* accordingly. The
  693. * count is propagated towards root so that a given cgroup's
  694. * nr_populated_children is zero iff none of its descendants contain any
  695. * tasks.
  696. *
  697. * @cgrp's interface file "cgroup.populated" is zero if both
  698. * @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and
  699. * 1 otherwise. When the sum changes from or to zero, userland is notified
  700. * that the content of the interface file has changed. This can be used to
  701. * detect when @cgrp and its descendants become populated or empty.
  702. */
  703. static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
  704. {
  705. struct cgroup *child = NULL;
  706. int adj = populated ? 1 : -1;
  707. lockdep_assert_held(&css_set_lock);
  708. do {
  709. bool was_populated = cgroup_is_populated(cgrp);
  710. if (!child) {
  711. cgrp->nr_populated_csets += adj;
  712. } else {
  713. if (cgroup_is_threaded(child))
  714. cgrp->nr_populated_threaded_children += adj;
  715. else
  716. cgrp->nr_populated_domain_children += adj;
  717. }
  718. if (was_populated == cgroup_is_populated(cgrp))
  719. break;
  720. cgroup1_check_for_release(cgrp);
  721. TRACE_CGROUP_PATH(notify_populated, cgrp,
  722. cgroup_is_populated(cgrp));
  723. cgroup_file_notify(&cgrp->events_file);
  724. child = cgrp;
  725. cgrp = cgroup_parent(cgrp);
  726. } while (cgrp);
  727. }
  728. /**
  729. * css_set_update_populated - update populated state of a css_set
  730. * @cset: target css_set
  731. * @populated: whether @cset is populated or depopulated
  732. *
  733. * @cset is either getting the first task or losing the last. Update the
  734. * populated counters of all associated cgroups accordingly.
  735. */
  736. static void css_set_update_populated(struct css_set *cset, bool populated)
  737. {
  738. struct cgrp_cset_link *link;
  739. lockdep_assert_held(&css_set_lock);
  740. list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
  741. cgroup_update_populated(link->cgrp, populated);
  742. }
  743. /*
  744. * @task is leaving, advance task iterators which are pointing to it so
  745. * that they can resume at the next position. Advancing an iterator might
  746. * remove it from the list, use safe walk. See css_task_iter_skip() for
  747. * details.
  748. */
  749. static void css_set_skip_task_iters(struct css_set *cset,
  750. struct task_struct *task)
  751. {
  752. struct css_task_iter *it, *pos;
  753. list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
  754. css_task_iter_skip(it, task);
  755. }
  756. /**
  757. * css_set_move_task - move a task from one css_set to another
  758. * @task: task being moved
  759. * @from_cset: css_set @task currently belongs to (may be NULL)
  760. * @to_cset: new css_set @task is being moved to (may be NULL)
  761. * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
  762. *
  763. * Move @task from @from_cset to @to_cset. If @task didn't belong to any
  764. * css_set, @from_cset can be NULL. If @task is being disassociated
  765. * instead of moved, @to_cset can be NULL.
  766. *
  767. * This function automatically handles populated counter updates and
  768. * css_task_iter adjustments but the caller is responsible for managing
  769. * @from_cset and @to_cset's reference counts.
  770. */
  771. static void css_set_move_task(struct task_struct *task,
  772. struct css_set *from_cset, struct css_set *to_cset,
  773. bool use_mg_tasks)
  774. {
  775. lockdep_assert_held(&css_set_lock);
  776. if (to_cset && !css_set_populated(to_cset))
  777. css_set_update_populated(to_cset, true);
  778. if (from_cset) {
  779. WARN_ON_ONCE(list_empty(&task->cg_list));
  780. css_set_skip_task_iters(from_cset, task);
  781. list_del_init(&task->cg_list);
  782. if (!css_set_populated(from_cset))
  783. css_set_update_populated(from_cset, false);
  784. } else {
  785. WARN_ON_ONCE(!list_empty(&task->cg_list));
  786. }
  787. if (to_cset) {
  788. /*
  789. * We are synchronized through cgroup_threadgroup_rwsem
  790. * against PF_EXITING setting such that we can't race
  791. * against cgroup_exit()/cgroup_free() dropping the css_set.
  792. */
  793. WARN_ON_ONCE(task->flags & PF_EXITING);
  794. cgroup_move_task(task, to_cset);
  795. list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
  796. &to_cset->tasks);
  797. }
  798. }
  799. /*
  800. * hash table for cgroup groups. This improves the performance to find
  801. * an existing css_set. This hash doesn't (currently) take into
  802. * account cgroups in empty hierarchies.
  803. */
  804. #define CSS_SET_HASH_BITS 7
  805. static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
  806. static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
  807. {
  808. unsigned long key = 0UL;
  809. struct cgroup_subsys *ss;
  810. int i;
  811. for_each_subsys(ss, i)
  812. key += (unsigned long)css[i];
  813. key = (key >> 16) ^ key;
  814. return key;
  815. }
  816. void put_css_set_locked(struct css_set *cset)
  817. {
  818. struct cgrp_cset_link *link, *tmp_link;
  819. struct cgroup_subsys *ss;
  820. int ssid;
  821. lockdep_assert_held(&css_set_lock);
  822. if (!refcount_dec_and_test(&cset->refcount))
  823. return;
  824. WARN_ON_ONCE(!list_empty(&cset->threaded_csets));
  825. /* This css_set is dead. unlink it and release cgroup and css refs */
  826. for_each_subsys(ss, ssid) {
  827. list_del(&cset->e_cset_node[ssid]);
  828. css_put(cset->subsys[ssid]);
  829. }
  830. hash_del(&cset->hlist);
  831. css_set_count--;
  832. list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
  833. list_del(&link->cset_link);
  834. list_del(&link->cgrp_link);
  835. if (cgroup_parent(link->cgrp))
  836. cgroup_put(link->cgrp);
  837. kfree(link);
  838. }
  839. if (css_set_threaded(cset)) {
  840. list_del(&cset->threaded_csets_node);
  841. put_css_set_locked(cset->dom_cset);
  842. }
  843. kfree_rcu(cset, rcu_head);
  844. }
  845. /**
  846. * compare_css_sets - helper function for find_existing_css_set().
  847. * @cset: candidate css_set being tested
  848. * @old_cset: existing css_set for a task
  849. * @new_cgrp: cgroup that's being entered by the task
  850. * @template: desired set of css pointers in css_set (pre-calculated)
  851. *
  852. * Returns true if "cset" matches "old_cset" except for the hierarchy
  853. * which "new_cgrp" belongs to, for which it should match "new_cgrp".
  854. */
  855. static bool compare_css_sets(struct css_set *cset,
  856. struct css_set *old_cset,
  857. struct cgroup *new_cgrp,
  858. struct cgroup_subsys_state *template[])
  859. {
  860. struct cgroup *new_dfl_cgrp;
  861. struct list_head *l1, *l2;
  862. /*
  863. * On the default hierarchy, there can be csets which are
  864. * associated with the same set of cgroups but different csses.
  865. * Let's first ensure that csses match.
  866. */
  867. if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
  868. return false;
  869. /* @cset's domain should match the default cgroup's */
  870. if (cgroup_on_dfl(new_cgrp))
  871. new_dfl_cgrp = new_cgrp;
  872. else
  873. new_dfl_cgrp = old_cset->dfl_cgrp;
  874. if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp)
  875. return false;
  876. /*
  877. * Compare cgroup pointers in order to distinguish between
  878. * different cgroups in hierarchies. As different cgroups may
  879. * share the same effective css, this comparison is always
  880. * necessary.
  881. */
  882. l1 = &cset->cgrp_links;
  883. l2 = &old_cset->cgrp_links;
  884. while (1) {
  885. struct cgrp_cset_link *link1, *link2;
  886. struct cgroup *cgrp1, *cgrp2;
  887. l1 = l1->next;
  888. l2 = l2->next;
  889. /* See if we reached the end - both lists are equal length. */
  890. if (l1 == &cset->cgrp_links) {
  891. BUG_ON(l2 != &old_cset->cgrp_links);
  892. break;
  893. } else {
  894. BUG_ON(l2 == &old_cset->cgrp_links);
  895. }
  896. /* Locate the cgroups associated with these links. */
  897. link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
  898. link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
  899. cgrp1 = link1->cgrp;
  900. cgrp2 = link2->cgrp;
  901. /* Hierarchies should be linked in the same order. */
  902. BUG_ON(cgrp1->root != cgrp2->root);
  903. /*
  904. * If this hierarchy is the hierarchy of the cgroup
  905. * that's changing, then we need to check that this
  906. * css_set points to the new cgroup; if it's any other
  907. * hierarchy, then this css_set should point to the
  908. * same cgroup as the old css_set.
  909. */
  910. if (cgrp1->root == new_cgrp->root) {
  911. if (cgrp1 != new_cgrp)
  912. return false;
  913. } else {
  914. if (cgrp1 != cgrp2)
  915. return false;
  916. }
  917. }
  918. return true;
  919. }
  920. /**
  921. * find_existing_css_set - init css array and find the matching css_set
  922. * @old_cset: the css_set that we're using before the cgroup transition
  923. * @cgrp: the cgroup that we're moving into
  924. * @template: out param for the new set of csses, should be clear on entry
  925. */
  926. static struct css_set *find_existing_css_set(struct css_set *old_cset,
  927. struct cgroup *cgrp,
  928. struct cgroup_subsys_state *template[])
  929. {
  930. struct cgroup_root *root = cgrp->root;
  931. struct cgroup_subsys *ss;
  932. struct css_set *cset;
  933. unsigned long key;
  934. int i;
  935. /*
  936. * Build the set of subsystem state objects that we want to see in the
  937. * new css_set. while subsystems can change globally, the entries here
  938. * won't change, so no need for locking.
  939. */
  940. for_each_subsys(ss, i) {
  941. if (root->subsys_mask & (1UL << i)) {
  942. /*
  943. * @ss is in this hierarchy, so we want the
  944. * effective css from @cgrp.
  945. */
  946. template[i] = cgroup_e_css_by_mask(cgrp, ss);
  947. } else {
  948. /*
  949. * @ss is not in this hierarchy, so we don't want
  950. * to change the css.
  951. */
  952. template[i] = old_cset->subsys[i];
  953. }
  954. }
  955. key = css_set_hash(template);
  956. hash_for_each_possible(css_set_table, cset, hlist, key) {
  957. if (!compare_css_sets(cset, old_cset, cgrp, template))
  958. continue;
  959. /* This css_set matches what we need */
  960. return cset;
  961. }
  962. /* No existing cgroup group matched */
  963. return NULL;
  964. }
  965. static void free_cgrp_cset_links(struct list_head *links_to_free)
  966. {
  967. struct cgrp_cset_link *link, *tmp_link;
  968. list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
  969. list_del(&link->cset_link);
  970. kfree(link);
  971. }
  972. }
  973. /**
  974. * allocate_cgrp_cset_links - allocate cgrp_cset_links
  975. * @count: the number of links to allocate
  976. * @tmp_links: list_head the allocated links are put on
  977. *
  978. * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
  979. * through ->cset_link. Returns 0 on success or -errno.
  980. */
  981. static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
  982. {
  983. struct cgrp_cset_link *link;
  984. int i;
  985. INIT_LIST_HEAD(tmp_links);
  986. for (i = 0; i < count; i++) {
  987. link = kzalloc(sizeof(*link), GFP_KERNEL);
  988. if (!link) {
  989. free_cgrp_cset_links(tmp_links);
  990. return -ENOMEM;
  991. }
  992. list_add(&link->cset_link, tmp_links);
  993. }
  994. return 0;
  995. }
  996. /**
  997. * link_css_set - a helper function to link a css_set to a cgroup
  998. * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
  999. * @cset: the css_set to be linked
  1000. * @cgrp: the destination cgroup
  1001. */
  1002. static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
  1003. struct cgroup *cgrp)
  1004. {
  1005. struct cgrp_cset_link *link;
  1006. BUG_ON(list_empty(tmp_links));
  1007. if (cgroup_on_dfl(cgrp))
  1008. cset->dfl_cgrp = cgrp;
  1009. link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
  1010. link->cset = cset;
  1011. link->cgrp = cgrp;
  1012. /*
  1013. * Always add links to the tail of the lists so that the lists are
  1014. * in choronological order.
  1015. */
  1016. list_move_tail(&link->cset_link, &cgrp->cset_links);
  1017. list_add_tail(&link->cgrp_link, &cset->cgrp_links);
  1018. if (cgroup_parent(cgrp))
  1019. cgroup_get_live(cgrp);
  1020. }
  1021. /**
  1022. * find_css_set - return a new css_set with one cgroup updated
  1023. * @old_cset: the baseline css_set
  1024. * @cgrp: the cgroup to be updated
  1025. *
  1026. * Return a new css_set that's equivalent to @old_cset, but with @cgrp
  1027. * substituted into the appropriate hierarchy.
  1028. */
  1029. static struct css_set *find_css_set(struct css_set *old_cset,
  1030. struct cgroup *cgrp)
  1031. {
  1032. struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
  1033. struct css_set *cset;
  1034. struct list_head tmp_links;
  1035. struct cgrp_cset_link *link;
  1036. struct cgroup_subsys *ss;
  1037. unsigned long key;
  1038. int ssid;
  1039. lockdep_assert_held(&cgroup_mutex);
  1040. /* First see if we already have a cgroup group that matches
  1041. * the desired set */
  1042. spin_lock_irq(&css_set_lock);
  1043. cset = find_existing_css_set(old_cset, cgrp, template);
  1044. if (cset)
  1045. get_css_set(cset);
  1046. spin_unlock_irq(&css_set_lock);
  1047. if (cset)
  1048. return cset;
  1049. cset = kzalloc(sizeof(*cset), GFP_KERNEL);
  1050. if (!cset)
  1051. return NULL;
  1052. /* Allocate all the cgrp_cset_link objects that we'll need */
  1053. if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
  1054. kfree(cset);
  1055. return NULL;
  1056. }
  1057. refcount_set(&cset->refcount, 1);
  1058. cset->dom_cset = cset;
  1059. INIT_LIST_HEAD(&cset->tasks);
  1060. INIT_LIST_HEAD(&cset->mg_tasks);
  1061. INIT_LIST_HEAD(&cset->dying_tasks);
  1062. INIT_LIST_HEAD(&cset->task_iters);
  1063. INIT_LIST_HEAD(&cset->threaded_csets);
  1064. INIT_HLIST_NODE(&cset->hlist);
  1065. INIT_LIST_HEAD(&cset->cgrp_links);
  1066. INIT_LIST_HEAD(&cset->mg_preload_node);
  1067. INIT_LIST_HEAD(&cset->mg_node);
  1068. /* Copy the set of subsystem state objects generated in
  1069. * find_existing_css_set() */
  1070. memcpy(cset->subsys, template, sizeof(cset->subsys));
  1071. spin_lock_irq(&css_set_lock);
  1072. /* Add reference counts and links from the new css_set. */
  1073. list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
  1074. struct cgroup *c = link->cgrp;
  1075. if (c->root == cgrp->root)
  1076. c = cgrp;
  1077. link_css_set(&tmp_links, cset, c);
  1078. }
  1079. BUG_ON(!list_empty(&tmp_links));
  1080. css_set_count++;
  1081. /* Add @cset to the hash table */
  1082. key = css_set_hash(cset->subsys);
  1083. hash_add(css_set_table, &cset->hlist, key);
  1084. for_each_subsys(ss, ssid) {
  1085. struct cgroup_subsys_state *css = cset->subsys[ssid];
  1086. list_add_tail(&cset->e_cset_node[ssid],
  1087. &css->cgroup->e_csets[ssid]);
  1088. css_get(css);
  1089. }
  1090. spin_unlock_irq(&css_set_lock);
  1091. /*
  1092. * If @cset should be threaded, look up the matching dom_cset and
  1093. * link them up. We first fully initialize @cset then look for the
  1094. * dom_cset. It's simpler this way and safe as @cset is guaranteed
  1095. * to stay empty until we return.
  1096. */
  1097. if (cgroup_is_threaded(cset->dfl_cgrp)) {
  1098. struct css_set *dcset;
  1099. dcset = find_css_set(cset, cset->dfl_cgrp->dom_cgrp);
  1100. if (!dcset) {
  1101. put_css_set(cset);
  1102. return NULL;
  1103. }
  1104. spin_lock_irq(&css_set_lock);
  1105. cset->dom_cset = dcset;
  1106. list_add_tail(&cset->threaded_csets_node,
  1107. &dcset->threaded_csets);
  1108. spin_unlock_irq(&css_set_lock);
  1109. }
  1110. return cset;
  1111. }
  1112. struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
  1113. {
  1114. struct cgroup *root_cgrp = kf_root->kn->priv;
  1115. return root_cgrp->root;
  1116. }
  1117. static int cgroup_init_root_id(struct cgroup_root *root)
  1118. {
  1119. int id;
  1120. lockdep_assert_held(&cgroup_mutex);
  1121. id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
  1122. if (id < 0)
  1123. return id;
  1124. root->hierarchy_id = id;
  1125. return 0;
  1126. }
  1127. static void cgroup_exit_root_id(struct cgroup_root *root)
  1128. {
  1129. lockdep_assert_held(&cgroup_mutex);
  1130. idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
  1131. }
  1132. void cgroup_free_root(struct cgroup_root *root)
  1133. {
  1134. kfree(root);
  1135. }
  1136. static void cgroup_destroy_root(struct cgroup_root *root)
  1137. {
  1138. struct cgroup *cgrp = &root->cgrp;
  1139. struct cgrp_cset_link *link, *tmp_link;
  1140. trace_cgroup_destroy_root(root);
  1141. cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
  1142. BUG_ON(atomic_read(&root->nr_cgrps));
  1143. BUG_ON(!list_empty(&cgrp->self.children));
  1144. /* Rebind all subsystems back to the default hierarchy */
  1145. WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
  1146. /*
  1147. * Release all the links from cset_links to this hierarchy's
  1148. * root cgroup
  1149. */
  1150. spin_lock_irq(&css_set_lock);
  1151. list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
  1152. list_del(&link->cset_link);
  1153. list_del(&link->cgrp_link);
  1154. kfree(link);
  1155. }
  1156. spin_unlock_irq(&css_set_lock);
  1157. if (!list_empty(&root->root_list)) {
  1158. list_del(&root->root_list);
  1159. cgroup_root_count--;
  1160. }
  1161. cgroup_exit_root_id(root);
  1162. mutex_unlock(&cgroup_mutex);
  1163. kernfs_destroy_root(root->kf_root);
  1164. cgroup_free_root(root);
  1165. }
  1166. /*
  1167. * look up cgroup associated with current task's cgroup namespace on the
  1168. * specified hierarchy
  1169. */
  1170. static struct cgroup *
  1171. current_cgns_cgroup_from_root(struct cgroup_root *root)
  1172. {
  1173. struct cgroup *res = NULL;
  1174. struct css_set *cset;
  1175. lockdep_assert_held(&css_set_lock);
  1176. rcu_read_lock();
  1177. cset = current->nsproxy->cgroup_ns->root_cset;
  1178. if (cset == &init_css_set) {
  1179. res = &root->cgrp;
  1180. } else if (root == &cgrp_dfl_root) {
  1181. res = cset->dfl_cgrp;
  1182. } else {
  1183. struct cgrp_cset_link *link;
  1184. list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
  1185. struct cgroup *c = link->cgrp;
  1186. if (c->root == root) {
  1187. res = c;
  1188. break;
  1189. }
  1190. }
  1191. }
  1192. rcu_read_unlock();
  1193. BUG_ON(!res);
  1194. return res;
  1195. }
  1196. /* look up cgroup associated with given css_set on the specified hierarchy */
  1197. static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
  1198. struct cgroup_root *root)
  1199. {
  1200. struct cgroup *res = NULL;
  1201. lockdep_assert_held(&cgroup_mutex);
  1202. lockdep_assert_held(&css_set_lock);
  1203. if (cset == &init_css_set) {
  1204. res = &root->cgrp;
  1205. } else if (root == &cgrp_dfl_root) {
  1206. res = cset->dfl_cgrp;
  1207. } else {
  1208. struct cgrp_cset_link *link;
  1209. list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
  1210. struct cgroup *c = link->cgrp;
  1211. if (c->root == root) {
  1212. res = c;
  1213. break;
  1214. }
  1215. }
  1216. }
  1217. BUG_ON(!res);
  1218. return res;
  1219. }
  1220. /*
  1221. * Return the cgroup for "task" from the given hierarchy. Must be
  1222. * called with cgroup_mutex and css_set_lock held.
  1223. */
  1224. struct cgroup *task_cgroup_from_root(struct task_struct *task,
  1225. struct cgroup_root *root)
  1226. {
  1227. /*
  1228. * No need to lock the task - since we hold css_set_lock the
  1229. * task can't change groups.
  1230. */
  1231. return cset_cgroup_from_root(task_css_set(task), root);
  1232. }
  1233. /*
  1234. * A task must hold cgroup_mutex to modify cgroups.
  1235. *
  1236. * Any task can increment and decrement the count field without lock.
  1237. * So in general, code holding cgroup_mutex can't rely on the count
  1238. * field not changing. However, if the count goes to zero, then only
  1239. * cgroup_attach_task() can increment it again. Because a count of zero
  1240. * means that no tasks are currently attached, therefore there is no
  1241. * way a task attached to that cgroup can fork (the other way to
  1242. * increment the count). So code holding cgroup_mutex can safely
  1243. * assume that if the count is zero, it will stay zero. Similarly, if
  1244. * a task holds cgroup_mutex on a cgroup with zero count, it
  1245. * knows that the cgroup won't be removed, as cgroup_rmdir()
  1246. * needs that mutex.
  1247. *
  1248. * A cgroup can only be deleted if both its 'count' of using tasks
  1249. * is zero, and its list of 'children' cgroups is empty. Since all
  1250. * tasks in the system use _some_ cgroup, and since there is always at
  1251. * least one task in the system (init, pid == 1), therefore, root cgroup
  1252. * always has either children cgroups and/or using tasks. So we don't
  1253. * need a special hack to ensure that root cgroup cannot be deleted.
  1254. *
  1255. * P.S. One more locking exception. RCU is used to guard the
  1256. * update of a tasks cgroup pointer by cgroup_attach_task()
  1257. */
  1258. static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
  1259. static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
  1260. char *buf)
  1261. {
  1262. struct cgroup_subsys *ss = cft->ss;
  1263. if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
  1264. !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
  1265. const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : "";
  1266. snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
  1267. dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
  1268. cft->name);
  1269. } else {
  1270. strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
  1271. }
  1272. return buf;
  1273. }
  1274. /**
  1275. * cgroup_file_mode - deduce file mode of a control file
  1276. * @cft: the control file in question
  1277. *
  1278. * S_IRUGO for read, S_IWUSR for write.
  1279. */
  1280. static umode_t cgroup_file_mode(const struct cftype *cft)
  1281. {
  1282. umode_t mode = 0;
  1283. if (cft->read_u64 || cft->read_s64 || cft->seq_show)
  1284. mode |= S_IRUGO;
  1285. if (cft->write_u64 || cft->write_s64 || cft->write) {
  1286. if (cft->flags & CFTYPE_WORLD_WRITABLE)
  1287. mode |= S_IWUGO;
  1288. else
  1289. mode |= S_IWUSR;
  1290. }
  1291. return mode;
  1292. }
  1293. /**
  1294. * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
  1295. * @subtree_control: the new subtree_control mask to consider
  1296. * @this_ss_mask: available subsystems
  1297. *
  1298. * On the default hierarchy, a subsystem may request other subsystems to be
  1299. * enabled together through its ->depends_on mask. In such cases, more
  1300. * subsystems than specified in "cgroup.subtree_control" may be enabled.
  1301. *
  1302. * This function calculates which subsystems need to be enabled if
  1303. * @subtree_control is to be applied while restricted to @this_ss_mask.
  1304. */
  1305. static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask)
  1306. {
  1307. u16 cur_ss_mask = subtree_control;
  1308. struct cgroup_subsys *ss;
  1309. int ssid;
  1310. lockdep_assert_held(&cgroup_mutex);
  1311. cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
  1312. while (true) {
  1313. u16 new_ss_mask = cur_ss_mask;
  1314. do_each_subsys_mask(ss, ssid, cur_ss_mask) {
  1315. new_ss_mask |= ss->depends_on;
  1316. } while_each_subsys_mask();
  1317. /*
  1318. * Mask out subsystems which aren't available. This can
  1319. * happen only if some depended-upon subsystems were bound
  1320. * to non-default hierarchies.
  1321. */
  1322. new_ss_mask &= this_ss_mask;
  1323. if (new_ss_mask == cur_ss_mask)
  1324. break;
  1325. cur_ss_mask = new_ss_mask;
  1326. }
  1327. return cur_ss_mask;
  1328. }
  1329. /**
  1330. * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
  1331. * @kn: the kernfs_node being serviced
  1332. *
  1333. * This helper undoes cgroup_kn_lock_live() and should be invoked before
  1334. * the method finishes if locking succeeded. Note that once this function
  1335. * returns the cgroup returned by cgroup_kn_lock_live() may become
  1336. * inaccessible any time. If the caller intends to continue to access the
  1337. * cgroup, it should pin it before invoking this function.
  1338. */
  1339. void cgroup_kn_unlock(struct kernfs_node *kn)
  1340. {
  1341. struct cgroup *cgrp;
  1342. if (kernfs_type(kn) == KERNFS_DIR)
  1343. cgrp = kn->priv;
  1344. else
  1345. cgrp = kn->parent->priv;
  1346. mutex_unlock(&cgroup_mutex);
  1347. kernfs_unbreak_active_protection(kn);
  1348. cgroup_put(cgrp);
  1349. }
  1350. /**
  1351. * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
  1352. * @kn: the kernfs_node being serviced
  1353. * @drain_offline: perform offline draining on the cgroup
  1354. *
  1355. * This helper is to be used by a cgroup kernfs method currently servicing
  1356. * @kn. It breaks the active protection, performs cgroup locking and
  1357. * verifies that the associated cgroup is alive. Returns the cgroup if
  1358. * alive; otherwise, %NULL. A successful return should be undone by a
  1359. * matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the
  1360. * cgroup is drained of offlining csses before return.
  1361. *
  1362. * Any cgroup kernfs method implementation which requires locking the
  1363. * associated cgroup should use this helper. It avoids nesting cgroup
  1364. * locking under kernfs active protection and allows all kernfs operations
  1365. * including self-removal.
  1366. */
  1367. struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
  1368. {
  1369. struct cgroup *cgrp;
  1370. if (kernfs_type(kn) == KERNFS_DIR)
  1371. cgrp = kn->priv;
  1372. else
  1373. cgrp = kn->parent->priv;
  1374. /*
  1375. * We're gonna grab cgroup_mutex which nests outside kernfs
  1376. * active_ref. cgroup liveliness check alone provides enough
  1377. * protection against removal. Ensure @cgrp stays accessible and
  1378. * break the active_ref protection.
  1379. */
  1380. if (!cgroup_tryget(cgrp))
  1381. return NULL;
  1382. kernfs_break_active_protection(kn);
  1383. if (drain_offline)
  1384. cgroup_lock_and_drain_offline(cgrp);
  1385. else
  1386. mutex_lock(&cgroup_mutex);
  1387. if (!cgroup_is_dead(cgrp))
  1388. return cgrp;
  1389. cgroup_kn_unlock(kn);
  1390. return NULL;
  1391. }
  1392. static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
  1393. {
  1394. char name[CGROUP_FILE_NAME_MAX];
  1395. lockdep_assert_held(&cgroup_mutex);
  1396. if (cft->file_offset) {
  1397. struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
  1398. struct cgroup_file *cfile = (void *)css + cft->file_offset;
  1399. spin_lock_irq(&cgroup_file_kn_lock);
  1400. cfile->kn = NULL;
  1401. spin_unlock_irq(&cgroup_file_kn_lock);
  1402. del_timer_sync(&cfile->notify_timer);
  1403. }
  1404. kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
  1405. }
  1406. /**
  1407. * css_clear_dir - remove subsys files in a cgroup directory
  1408. * @css: taget css
  1409. */
  1410. static void css_clear_dir(struct cgroup_subsys_state *css)
  1411. {
  1412. struct cgroup *cgrp = css->cgroup;
  1413. struct cftype *cfts;
  1414. if (!(css->flags & CSS_VISIBLE))
  1415. return;
  1416. css->flags &= ~CSS_VISIBLE;
  1417. if (!css->ss) {
  1418. if (cgroup_on_dfl(cgrp))
  1419. cfts = cgroup_base_files;
  1420. else
  1421. cfts = cgroup1_base_files;
  1422. cgroup_addrm_files(css, cgrp, cfts, false);
  1423. } else {
  1424. list_for_each_entry(cfts, &css->ss->cfts, node)
  1425. cgroup_addrm_files(css, cgrp, cfts, false);
  1426. }
  1427. }
  1428. /**
  1429. * css_populate_dir - create subsys files in a cgroup directory
  1430. * @css: target css
  1431. *
  1432. * On failure, no file is added.
  1433. */
  1434. static int css_populate_dir(struct cgroup_subsys_state *css)
  1435. {
  1436. struct cgroup *cgrp = css->cgroup;
  1437. struct cftype *cfts, *failed_cfts;
  1438. int ret;
  1439. if ((css->flags & CSS_VISIBLE) || !cgrp->kn)
  1440. return 0;
  1441. if (!css->ss) {
  1442. if (cgroup_on_dfl(cgrp))
  1443. cfts = cgroup_base_files;
  1444. else
  1445. cfts = cgroup1_base_files;
  1446. ret = cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
  1447. if (ret < 0)
  1448. return ret;
  1449. } else {
  1450. list_for_each_entry(cfts, &css->ss->cfts, node) {
  1451. ret = cgroup_addrm_files(css, cgrp, cfts, true);
  1452. if (ret < 0) {
  1453. failed_cfts = cfts;
  1454. goto err;
  1455. }
  1456. }
  1457. }
  1458. css->flags |= CSS_VISIBLE;
  1459. return 0;
  1460. err:
  1461. list_for_each_entry(cfts, &css->ss->cfts, node) {
  1462. if (cfts == failed_cfts)
  1463. break;
  1464. cgroup_addrm_files(css, cgrp, cfts, false);
  1465. }
  1466. return ret;
  1467. }
  1468. int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
  1469. {
  1470. struct cgroup *dcgrp = &dst_root->cgrp;
  1471. struct cgroup_subsys *ss;
  1472. int ssid, i, ret;
  1473. u16 dfl_disable_ss_mask = 0;
  1474. lockdep_assert_held(&cgroup_mutex);
  1475. do_each_subsys_mask(ss, ssid, ss_mask) {
  1476. /*
  1477. * If @ss has non-root csses attached to it, can't move.
  1478. * If @ss is an implicit controller, it is exempt from this
  1479. * rule and can be stolen.
  1480. */
  1481. if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
  1482. !ss->implicit_on_dfl)
  1483. return -EBUSY;
  1484. /* can't move between two non-dummy roots either */
  1485. if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
  1486. return -EBUSY;
  1487. /*
  1488. * Collect ssid's that need to be disabled from default
  1489. * hierarchy.
  1490. */
  1491. if (ss->root == &cgrp_dfl_root)
  1492. dfl_disable_ss_mask |= 1 << ssid;
  1493. } while_each_subsys_mask();
  1494. if (dfl_disable_ss_mask) {
  1495. struct cgroup *scgrp = &cgrp_dfl_root.cgrp;
  1496. /*
  1497. * Controllers from default hierarchy that need to be rebound
  1498. * are all disabled together in one go.
  1499. */
  1500. cgrp_dfl_root.subsys_mask &= ~dfl_disable_ss_mask;
  1501. WARN_ON(cgroup_apply_control(scgrp));
  1502. cgroup_finalize_control(scgrp, 0);
  1503. }
  1504. do_each_subsys_mask(ss, ssid, ss_mask) {
  1505. struct cgroup_root *src_root = ss->root;
  1506. struct cgroup *scgrp = &src_root->cgrp;
  1507. struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
  1508. struct css_set *cset;
  1509. WARN_ON(!css || cgroup_css(dcgrp, ss));
  1510. if (src_root != &cgrp_dfl_root) {
  1511. /* disable from the source */
  1512. src_root->subsys_mask &= ~(1 << ssid);
  1513. WARN_ON(cgroup_apply_control(scgrp));
  1514. cgroup_finalize_control(scgrp, 0);
  1515. }
  1516. /* rebind */
  1517. RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
  1518. rcu_assign_pointer(dcgrp->subsys[ssid], css);
  1519. ss->root = dst_root;
  1520. css->cgroup = dcgrp;
  1521. spin_lock_irq(&css_set_lock);
  1522. hash_for_each(css_set_table, i, cset, hlist)
  1523. list_move_tail(&cset->e_cset_node[ss->id],
  1524. &dcgrp->e_csets[ss->id]);
  1525. spin_unlock_irq(&css_set_lock);
  1526. /* default hierarchy doesn't enable controllers by default */
  1527. dst_root->subsys_mask |= 1 << ssid;
  1528. if (dst_root == &cgrp_dfl_root) {
  1529. static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
  1530. } else {
  1531. dcgrp->subtree_control |= 1 << ssid;
  1532. static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
  1533. }
  1534. ret = cgroup_apply_control(dcgrp);
  1535. if (ret)
  1536. pr_warn("partial failure to rebind %s controller (err=%d)\n",
  1537. ss->name, ret);
  1538. if (ss->bind)
  1539. ss->bind(css);
  1540. } while_each_subsys_mask();
  1541. kernfs_activate(dcgrp->kn);
  1542. return 0;
  1543. }
  1544. int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
  1545. struct kernfs_root *kf_root)
  1546. {
  1547. int len = 0;
  1548. char *buf = NULL;
  1549. struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
  1550. struct cgroup *ns_cgroup;
  1551. buf = kmalloc(PATH_MAX, GFP_KERNEL);
  1552. if (!buf)
  1553. return -ENOMEM;
  1554. spin_lock_irq(&css_set_lock);
  1555. ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
  1556. len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
  1557. spin_unlock_irq(&css_set_lock);
  1558. if (len >= PATH_MAX)
  1559. len = -ERANGE;
  1560. else if (len > 0) {
  1561. seq_escape(sf, buf, " \t\n\\");
  1562. len = 0;
  1563. }
  1564. kfree(buf);
  1565. return len;
  1566. }
  1567. enum cgroup2_param {
  1568. Opt_nsdelegate,
  1569. Opt_memory_localevents,
  1570. Opt_memory_recursiveprot,
  1571. nr__cgroup2_params
  1572. };
  1573. static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
  1574. fsparam_flag("nsdelegate", Opt_nsdelegate),
  1575. fsparam_flag("memory_localevents", Opt_memory_localevents),
  1576. fsparam_flag("memory_recursiveprot", Opt_memory_recursiveprot),
  1577. {}
  1578. };
  1579. static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param)
  1580. {
  1581. struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
  1582. struct fs_parse_result result;
  1583. int opt;
  1584. opt = fs_parse(fc, cgroup2_fs_parameters, param, &result);
  1585. if (opt < 0)
  1586. return opt;
  1587. switch (opt) {
  1588. case Opt_nsdelegate:
  1589. ctx->flags |= CGRP_ROOT_NS_DELEGATE;
  1590. return 0;
  1591. case Opt_memory_localevents:
  1592. ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
  1593. return 0;
  1594. case Opt_memory_recursiveprot:
  1595. ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
  1596. return 0;
  1597. }
  1598. return -EINVAL;
  1599. }
  1600. static void apply_cgroup_root_flags(unsigned int root_flags)
  1601. {
  1602. if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
  1603. if (root_flags & CGRP_ROOT_NS_DELEGATE)
  1604. cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
  1605. else
  1606. cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
  1607. if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
  1608. cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
  1609. else
  1610. cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
  1611. if (root_flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
  1612. cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
  1613. else
  1614. cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
  1615. }
  1616. }
  1617. static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
  1618. {
  1619. if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
  1620. seq_puts(seq, ",nsdelegate");
  1621. if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
  1622. seq_puts(seq, ",memory_localevents");
  1623. if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
  1624. seq_puts(seq, ",memory_recursiveprot");
  1625. return 0;
  1626. }
  1627. static int cgroup_reconfigure(struct fs_context *fc)
  1628. {
  1629. struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
  1630. apply_cgroup_root_flags(ctx->flags);
  1631. return 0;
  1632. }
  1633. static void init_cgroup_housekeeping(struct cgroup *cgrp)
  1634. {
  1635. struct cgroup_subsys *ss;
  1636. int ssid;
  1637. INIT_LIST_HEAD(&cgrp->self.sibling);
  1638. INIT_LIST_HEAD(&cgrp->self.children);
  1639. INIT_LIST_HEAD(&cgrp->cset_links);
  1640. INIT_LIST_HEAD(&cgrp->pidlists);
  1641. mutex_init(&cgrp->pidlist_mutex);
  1642. cgrp->self.cgroup = cgrp;
  1643. cgrp->self.flags |= CSS_ONLINE;
  1644. cgrp->dom_cgrp = cgrp;
  1645. cgrp->max_descendants = INT_MAX;
  1646. cgrp->max_depth = INT_MAX;
  1647. INIT_LIST_HEAD(&cgrp->rstat_css_list);
  1648. prev_cputime_init(&cgrp->prev_cputime);
  1649. for_each_subsys(ss, ssid)
  1650. INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
  1651. init_waitqueue_head(&cgrp->offline_waitq);
  1652. INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent);
  1653. }
  1654. void init_cgroup_root(struct cgroup_fs_context *ctx)
  1655. {
  1656. struct cgroup_root *root = ctx->root;
  1657. struct cgroup *cgrp = &root->cgrp;
  1658. INIT_LIST_HEAD(&root->root_list);
  1659. atomic_set(&root->nr_cgrps, 1);
  1660. cgrp->root = root;
  1661. init_cgroup_housekeeping(cgrp);
  1662. root->flags = ctx->flags;
  1663. if (ctx->release_agent)
  1664. strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX);
  1665. if (ctx->name)
  1666. strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN);
  1667. if (ctx->cpuset_clone_children)
  1668. set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
  1669. }
  1670. int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
  1671. {
  1672. LIST_HEAD(tmp_links);
  1673. struct cgroup *root_cgrp = &root->cgrp;
  1674. struct kernfs_syscall_ops *kf_sops;
  1675. struct css_set *cset;
  1676. int i, ret;
  1677. lockdep_assert_held(&cgroup_mutex);
  1678. ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
  1679. 0, GFP_KERNEL);
  1680. if (ret)
  1681. goto out;
  1682. /*
  1683. * We're accessing css_set_count without locking css_set_lock here,
  1684. * but that's OK - it can only be increased by someone holding
  1685. * cgroup_lock, and that's us. Later rebinding may disable
  1686. * controllers on the default hierarchy and thus create new csets,
  1687. * which can't be more than the existing ones. Allocate 2x.
  1688. */
  1689. ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
  1690. if (ret)
  1691. goto cancel_ref;
  1692. ret = cgroup_init_root_id(root);
  1693. if (ret)
  1694. goto cancel_ref;
  1695. kf_sops = root == &cgrp_dfl_root ?
  1696. &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops;
  1697. root->kf_root = kernfs_create_root(kf_sops,
  1698. KERNFS_ROOT_CREATE_DEACTIVATED |
  1699. KERNFS_ROOT_SUPPORT_EXPORTOP |
  1700. KERNFS_ROOT_SUPPORT_USER_XATTR,
  1701. root_cgrp);
  1702. if (IS_ERR(root->kf_root)) {
  1703. ret = PTR_ERR(root->kf_root);
  1704. goto exit_root_id;
  1705. }
  1706. root_cgrp->kn = root->kf_root->kn;
  1707. WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1);
  1708. root_cgrp->ancestor_ids[0] = cgroup_id(root_cgrp);
  1709. ret = css_populate_dir(&root_cgrp->self);
  1710. if (ret)
  1711. goto destroy_root;
  1712. ret = rebind_subsystems(root, ss_mask);
  1713. if (ret)
  1714. goto destroy_root;
  1715. ret = cgroup_bpf_inherit(root_cgrp);
  1716. WARN_ON_ONCE(ret);
  1717. trace_cgroup_setup_root(root);
  1718. /*
  1719. * There must be no failure case after here, since rebinding takes
  1720. * care of subsystems' refcounts, which are explicitly dropped in
  1721. * the failure exit path.
  1722. */
  1723. list_add(&root->root_list, &cgroup_roots);
  1724. cgroup_root_count++;
  1725. /*
  1726. * Link the root cgroup in this hierarchy into all the css_set
  1727. * objects.
  1728. */
  1729. spin_lock_irq(&css_set_lock);
  1730. hash_for_each(css_set_table, i, cset, hlist) {
  1731. link_css_set(&tmp_links, cset, root_cgrp);
  1732. if (css_set_populated(cset))
  1733. cgroup_update_populated(root_cgrp, true);
  1734. }
  1735. spin_unlock_irq(&css_set_lock);
  1736. BUG_ON(!list_empty(&root_cgrp->self.children));
  1737. BUG_ON(atomic_read(&root->nr_cgrps) != 1);
  1738. ret = 0;
  1739. goto out;
  1740. destroy_root:
  1741. kernfs_destroy_root(root->kf_root);
  1742. root->kf_root = NULL;
  1743. exit_root_id:
  1744. cgroup_exit_root_id(root);
  1745. cancel_ref:
  1746. percpu_ref_exit(&root_cgrp->self.refcnt);
  1747. out:
  1748. free_cgrp_cset_links(&tmp_links);
  1749. return ret;
  1750. }
  1751. int cgroup_do_get_tree(struct fs_context *fc)
  1752. {
  1753. struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
  1754. int ret;
  1755. ctx->kfc.root = ctx->root->kf_root;
  1756. if (fc->fs_type == &cgroup2_fs_type)
  1757. ctx->kfc.magic = CGROUP2_SUPER_MAGIC;
  1758. else
  1759. ctx->kfc.magic = CGROUP_SUPER_MAGIC;
  1760. ret = kernfs_get_tree(fc);
  1761. /*
  1762. * In non-init cgroup namespace, instead of root cgroup's dentry,
  1763. * we return the dentry corresponding to the cgroupns->root_cgrp.
  1764. */
  1765. if (!ret && ctx->ns != &init_cgroup_ns) {
  1766. struct dentry *nsdentry;
  1767. struct super_block *sb = fc->root->d_sb;
  1768. struct cgroup *cgrp;
  1769. mutex_lock(&cgroup_mutex);
  1770. spin_lock_irq(&css_set_lock);
  1771. cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
  1772. spin_unlock_irq(&css_set_lock);
  1773. mutex_unlock(&cgroup_mutex);
  1774. nsdentry = kernfs_node_dentry(cgrp->kn, sb);
  1775. dput(fc->root);
  1776. if (IS_ERR(nsdentry)) {
  1777. deactivate_locked_super(sb);
  1778. ret = PTR_ERR(nsdentry);
  1779. nsdentry = NULL;
  1780. }
  1781. fc->root = nsdentry;
  1782. }
  1783. if (!ctx->kfc.new_sb_created)
  1784. cgroup_put(&ctx->root->cgrp);
  1785. return ret;
  1786. }
  1787. /*
  1788. * Destroy a cgroup filesystem context.
  1789. */
  1790. static void cgroup_fs_context_free(struct fs_context *fc)
  1791. {
  1792. struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
  1793. kfree(ctx->name);
  1794. kfree(ctx->release_agent);
  1795. put_cgroup_ns(ctx->ns);
  1796. kernfs_free_fs_context(fc);
  1797. kfree(ctx);
  1798. }
  1799. static int cgroup_get_tree(struct fs_context *fc)
  1800. {
  1801. struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
  1802. int ret;
  1803. cgrp_dfl_visible = true;
  1804. cgroup_get_live(&cgrp_dfl_root.cgrp);
  1805. ctx->root = &cgrp_dfl_root;
  1806. ret = cgroup_do_get_tree(fc);
  1807. if (!ret)
  1808. apply_cgroup_root_flags(ctx->flags);
  1809. return ret;
  1810. }
  1811. static const struct fs_context_operations cgroup_fs_context_ops = {
  1812. .free = cgroup_fs_context_free,
  1813. .parse_param = cgroup2_parse_param,
  1814. .get_tree = cgroup_get_tree,
  1815. .reconfigure = cgroup_reconfigure,
  1816. };
  1817. static const struct fs_context_operations cgroup1_fs_context_ops = {
  1818. .free = cgroup_fs_context_free,
  1819. .parse_param = cgroup1_parse_param,
  1820. .get_tree = cgroup1_get_tree,
  1821. .reconfigure = cgroup1_reconfigure,
  1822. };
  1823. /*
  1824. * Initialise the cgroup filesystem creation/reconfiguration context. Notably,
  1825. * we select the namespace we're going to use.
  1826. */
  1827. static int cgroup_init_fs_context(struct fs_context *fc)
  1828. {
  1829. struct cgroup_fs_context *ctx;
  1830. ctx = kzalloc(sizeof(struct cgroup_fs_context), GFP_KERNEL);
  1831. if (!ctx)
  1832. return -ENOMEM;
  1833. ctx->ns = current->nsproxy->cgroup_ns;
  1834. get_cgroup_ns(ctx->ns);
  1835. fc->fs_private = &ctx->kfc;
  1836. if (fc->fs_type == &cgroup2_fs_type)
  1837. fc->ops = &cgroup_fs_context_ops;
  1838. else
  1839. fc->ops = &cgroup1_fs_context_ops;
  1840. put_user_ns(fc->user_ns);
  1841. fc->user_ns = get_user_ns(ctx->ns->user_ns);
  1842. fc->global = true;
  1843. return 0;
  1844. }
  1845. static void cgroup_kill_sb(struct super_block *sb)
  1846. {
  1847. struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
  1848. struct cgroup_root *root = cgroup_root_from_kf(kf_root);
  1849. /*
  1850. * If @root doesn't have any children, start killing it.
  1851. * This prevents new mounts by disabling percpu_ref_tryget_live().
  1852. * cgroup_mount() may wait for @root's release.
  1853. *
  1854. * And don't kill the default root.
  1855. */
  1856. if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
  1857. !percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
  1858. cgroup_bpf_offline(&root->cgrp);
  1859. percpu_ref_kill(&root->cgrp.self.refcnt);
  1860. }
  1861. cgroup_put(&root->cgrp);
  1862. kernfs_kill_sb(sb);
  1863. }
  1864. struct file_system_type cgroup_fs_type = {
  1865. .name = "cgroup",
  1866. .init_fs_context = cgroup_init_fs_context,
  1867. .parameters = cgroup1_fs_parameters,
  1868. .kill_sb = cgroup_kill_sb,
  1869. .fs_flags = FS_USERNS_MOUNT,
  1870. };
  1871. static struct file_system_type cgroup2_fs_type = {
  1872. .name = "cgroup2",
  1873. .init_fs_context = cgroup_init_fs_context,
  1874. .parameters = cgroup2_fs_parameters,
  1875. .kill_sb = cgroup_kill_sb,
  1876. .fs_flags = FS_USERNS_MOUNT,
  1877. };
  1878. #ifdef CONFIG_CPUSETS
  1879. static const struct fs_context_operations cpuset_fs_context_ops = {
  1880. .get_tree = cgroup1_get_tree,
  1881. .free = cgroup_fs_context_free,
  1882. };
  1883. /*
  1884. * This is ugly, but preserves the userspace API for existing cpuset
  1885. * users. If someone tries to mount the "cpuset" filesystem, we
  1886. * silently switch it to mount "cgroup" instead
  1887. */
  1888. static int cpuset_init_fs_context(struct fs_context *fc)
  1889. {
  1890. char *agent = kstrdup("/sbin/cpuset_release_agent", GFP_USER);
  1891. struct cgroup_fs_context *ctx;
  1892. int err;
  1893. err = cgroup_init_fs_context(fc);
  1894. if (err) {
  1895. kfree(agent);
  1896. return err;
  1897. }
  1898. fc->ops = &cpuset_fs_context_ops;
  1899. ctx = cgroup_fc2context(fc);
  1900. ctx->subsys_mask = 1 << cpuset_cgrp_id;
  1901. ctx->flags |= CGRP_ROOT_NOPREFIX;
  1902. ctx->release_agent = agent;
  1903. get_filesystem(&cgroup_fs_type);
  1904. put_filesystem(fc->fs_type);
  1905. fc->fs_type = &cgroup_fs_type;
  1906. return 0;
  1907. }
  1908. static struct file_system_type cpuset_fs_type = {
  1909. .name = "cpuset",
  1910. .init_fs_context = cpuset_init_fs_context,
  1911. .fs_flags = FS_USERNS_MOUNT,
  1912. };
  1913. #endif
  1914. int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
  1915. struct cgroup_namespace *ns)
  1916. {
  1917. struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
  1918. return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
  1919. }
  1920. int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
  1921. struct cgroup_namespace *ns)
  1922. {
  1923. int ret;
  1924. mutex_lock(&cgroup_mutex);
  1925. spin_lock_irq(&css_set_lock);
  1926. ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
  1927. spin_unlock_irq(&css_set_lock);
  1928. mutex_unlock(&cgroup_mutex);
  1929. return ret;
  1930. }
  1931. EXPORT_SYMBOL_GPL(cgroup_path_ns);
  1932. /**
  1933. * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
  1934. * @task: target task
  1935. * @buf: the buffer to write the path into
  1936. * @buflen: the length of the buffer
  1937. *
  1938. * Determine @task's cgroup on the first (the one with the lowest non-zero
  1939. * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
  1940. * function grabs cgroup_mutex and shouldn't be used inside locks used by
  1941. * cgroup controller callbacks.
  1942. *
  1943. * Return value is the same as kernfs_path().
  1944. */
  1945. int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
  1946. {
  1947. struct cgroup_root *root;
  1948. struct cgroup *cgrp;
  1949. int hierarchy_id = 1;
  1950. int ret;
  1951. mutex_lock(&cgroup_mutex);
  1952. spin_lock_irq(&css_set_lock);
  1953. root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
  1954. if (root) {
  1955. cgrp = task_cgroup_from_root(task, root);
  1956. ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
  1957. } else {
  1958. /* if no hierarchy exists, everyone is in "/" */
  1959. ret = strlcpy(buf, "/", buflen);
  1960. }
  1961. spin_unlock_irq(&css_set_lock);
  1962. mutex_unlock(&cgroup_mutex);
  1963. return ret;
  1964. }
  1965. EXPORT_SYMBOL_GPL(task_cgroup_path);
  1966. /**
  1967. * cgroup_migrate_add_task - add a migration target task to a migration context
  1968. * @task: target task
  1969. * @mgctx: target migration context
  1970. *
  1971. * Add @task, which is a migration target, to @mgctx->tset. This function
  1972. * becomes noop if @task doesn't need to be migrated. @task's css_set
  1973. * should have been added as a migration source and @task->cg_list will be
  1974. * moved from the css_set's tasks list to mg_tasks one.
  1975. */
  1976. static void cgroup_migrate_add_task(struct task_struct *task,
  1977. struct cgroup_mgctx *mgctx)
  1978. {
  1979. struct css_set *cset;
  1980. lockdep_assert_held(&css_set_lock);
  1981. /* @task either already exited or can't exit until the end */
  1982. if (task->flags & PF_EXITING)
  1983. return;
  1984. /* cgroup_threadgroup_rwsem protects racing against forks */
  1985. WARN_ON_ONCE(list_empty(&task->cg_list));
  1986. cset = task_css_set(task);
  1987. if (!cset->mg_src_cgrp)
  1988. return;
  1989. mgctx->tset.nr_tasks++;
  1990. list_move_tail(&task->cg_list, &cset->mg_tasks);
  1991. if (list_empty(&cset->mg_node))
  1992. list_add_tail(&cset->mg_node,
  1993. &mgctx->tset.src_csets);
  1994. if (list_empty(&cset->mg_dst_cset->mg_node))
  1995. list_add_tail(&cset->mg_dst_cset->mg_node,
  1996. &mgctx->tset.dst_csets);
  1997. }
  1998. /**
  1999. * cgroup_taskset_first - reset taskset and return the first task
  2000. * @tset: taskset of interest
  2001. * @dst_cssp: output variable for the destination css
  2002. *
  2003. * @tset iteration is initialized and the first task is returned.
  2004. */
  2005. struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
  2006. struct cgroup_subsys_state **dst_cssp)
  2007. {
  2008. tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
  2009. tset->cur_task = NULL;
  2010. return cgroup_taskset_next(tset, dst_cssp);
  2011. }
  2012. EXPORT_SYMBOL_GPL(cgroup_taskset_first);
  2013. /**
  2014. * cgroup_taskset_next - iterate to the next task in taskset
  2015. * @tset: taskset of interest
  2016. * @dst_cssp: output variable for the destination css
  2017. *
  2018. * Return the next task in @tset. Iteration must have been initialized
  2019. * with cgroup_taskset_first().
  2020. */
  2021. struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
  2022. struct cgroup_subsys_state **dst_cssp)
  2023. {
  2024. struct css_set *cset = tset->cur_cset;
  2025. struct task_struct *task = tset->cur_task;
  2026. while (&cset->mg_node != tset->csets) {
  2027. if (!task)
  2028. task = list_first_entry(&cset->mg_tasks,
  2029. struct task_struct, cg_list);
  2030. else
  2031. task = list_next_entry(task, cg_list);
  2032. if (&task->cg_list != &cset->mg_tasks) {
  2033. tset->cur_cset = cset;
  2034. tset->cur_task = task;
  2035. /*
  2036. * This function may be called both before and
  2037. * after cgroup_taskset_migrate(). The two cases
  2038. * can be distinguished by looking at whether @cset
  2039. * has its ->mg_dst_cset set.
  2040. */
  2041. if (cset->mg_dst_cset)
  2042. *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
  2043. else
  2044. *dst_cssp = cset->subsys[tset->ssid];
  2045. return task;
  2046. }
  2047. cset = list_next_entry(cset, mg_node);
  2048. task = NULL;
  2049. }
  2050. return NULL;
  2051. }
  2052. EXPORT_SYMBOL_GPL(cgroup_taskset_next);
  2053. /**
  2054. * cgroup_taskset_migrate - migrate a taskset
  2055. * @mgctx: migration context
  2056. *
  2057. * Migrate tasks in @mgctx as setup by migration preparation functions.
  2058. * This function fails iff one of the ->can_attach callbacks fails and
  2059. * guarantees that either all or none of the tasks in @mgctx are migrated.
  2060. * @mgctx is consumed regardless of success.
  2061. */
  2062. static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
  2063. {
  2064. struct cgroup_taskset *tset = &mgctx->tset;
  2065. struct cgroup_subsys *ss;
  2066. struct task_struct *task, *tmp_task;
  2067. struct css_set *cset, *tmp_cset;
  2068. int ssid, failed_ssid, ret;
  2069. /* check that we can legitimately attach to the cgroup */
  2070. if (tset->nr_tasks) {
  2071. do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
  2072. if (ss->can_attach) {
  2073. tset->ssid = ssid;
  2074. ret = ss->can_attach(tset);
  2075. if (ret) {
  2076. failed_ssid = ssid;
  2077. goto out_cancel_attach;
  2078. }
  2079. }
  2080. } while_each_subsys_mask();
  2081. }
  2082. /*
  2083. * Now that we're guaranteed success, proceed to move all tasks to
  2084. * the new cgroup. There are no failure cases after here, so this
  2085. * is the commit point.
  2086. */
  2087. spin_lock_irq(&css_set_lock);
  2088. list_for_each_entry(cset, &tset->src_csets, mg_node) {
  2089. list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
  2090. struct css_set *from_cset = task_css_set(task);
  2091. struct css_set *to_cset = cset->mg_dst_cset;
  2092. get_css_set(to_cset);
  2093. to_cset->nr_tasks++;
  2094. css_set_move_task(task, from_cset, to_cset, true);
  2095. from_cset->nr_tasks--;
  2096. /*
  2097. * If the source or destination cgroup is frozen,
  2098. * the task might require to change its state.
  2099. */
  2100. cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp,
  2101. to_cset->dfl_cgrp);
  2102. put_css_set_locked(from_cset);
  2103. }
  2104. }
  2105. spin_unlock_irq(&css_set_lock);
  2106. /*
  2107. * Migration is committed, all target tasks are now on dst_csets.
  2108. * Nothing is sensitive to fork() after this point. Notify
  2109. * controllers that migration is complete.
  2110. */
  2111. tset->csets = &tset->dst_csets;
  2112. if (tset->nr_tasks) {
  2113. do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
  2114. if (ss->attach) {
  2115. tset->ssid = ssid;
  2116. trace_android_vh_cgroup_attach(ss, tset);
  2117. ss->attach(tset);
  2118. }
  2119. } while_each_subsys_mask();
  2120. }
  2121. ret = 0;
  2122. goto out_release_tset;
  2123. out_cancel_attach:
  2124. if (tset->nr_tasks) {
  2125. do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
  2126. if (ssid == failed_ssid)
  2127. break;
  2128. if (ss->cancel_attach) {
  2129. tset->ssid = ssid;
  2130. ss->cancel_attach(tset);
  2131. }
  2132. } while_each_subsys_mask();
  2133. }
  2134. out_release_tset:
  2135. spin_lock_irq(&css_set_lock);
  2136. list_splice_init(&tset->dst_csets, &tset->src_csets);
  2137. list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
  2138. list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
  2139. list_del_init(&cset->mg_node);
  2140. }
  2141. spin_unlock_irq(&css_set_lock);
  2142. /*
  2143. * Re-initialize the cgroup_taskset structure in case it is reused
  2144. * again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
  2145. * iteration.
  2146. */
  2147. tset->nr_tasks = 0;
  2148. tset->csets = &tset->src_csets;
  2149. return ret;
  2150. }
  2151. /**
  2152. * cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination
  2153. * @dst_cgrp: destination cgroup to test
  2154. *
  2155. * On the default hierarchy, except for the mixable, (possible) thread root
  2156. * and threaded cgroups, subtree_control must be zero for migration
  2157. * destination cgroups with tasks so that child cgroups don't compete
  2158. * against tasks.
  2159. */
  2160. int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp)
  2161. {
  2162. /* v1 doesn't have any restriction */
  2163. if (!cgroup_on_dfl(dst_cgrp))
  2164. return 0;
  2165. /* verify @dst_cgrp can host resources */
  2166. if (!cgroup_is_valid_domain(dst_cgrp->dom_cgrp))
  2167. return -EOPNOTSUPP;
  2168. /* mixables don't care */
  2169. if (cgroup_is_mixable(dst_cgrp))
  2170. return 0;
  2171. /*
  2172. * If @dst_cgrp is already or can become a thread root or is
  2173. * threaded, it doesn't matter.
  2174. */
  2175. if (cgroup_can_be_thread_root(dst_cgrp) || cgroup_is_threaded(dst_cgrp))
  2176. return 0;
  2177. /* apply no-internal-process constraint */
  2178. if (dst_cgrp->subtree_control)
  2179. return -EBUSY;
  2180. return 0;
  2181. }
  2182. /**
  2183. * cgroup_migrate_finish - cleanup after attach
  2184. * @mgctx: migration context
  2185. *
  2186. * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
  2187. * those functions for details.
  2188. */
  2189. void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
  2190. {
  2191. LIST_HEAD(preloaded);
  2192. struct css_set *cset, *tmp_cset;
  2193. lockdep_assert_held(&cgroup_mutex);
  2194. spin_lock_irq(&css_set_lock);
  2195. list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded);
  2196. list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded);
  2197. list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) {
  2198. cset->mg_src_cgrp = NULL;
  2199. cset->mg_dst_cgrp = NULL;
  2200. cset->mg_dst_cset = NULL;
  2201. list_del_init(&cset->mg_preload_node);
  2202. put_css_set_locked(cset);
  2203. }
  2204. spin_unlock_irq(&css_set_lock);
  2205. }
  2206. /**
  2207. * cgroup_migrate_add_src - add a migration source css_set
  2208. * @src_cset: the source css_set to add
  2209. * @dst_cgrp: the destination cgroup
  2210. * @mgctx: migration context
  2211. *
  2212. * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
  2213. * @src_cset and add it to @mgctx->src_csets, which should later be cleaned
  2214. * up by cgroup_migrate_finish().
  2215. *
  2216. * This function may be called without holding cgroup_threadgroup_rwsem
  2217. * even if the target is a process. Threads may be created and destroyed
  2218. * but as long as cgroup_mutex is not dropped, no new css_set can be put
  2219. * into play and the preloaded css_sets are guaranteed to cover all
  2220. * migrations.
  2221. */
  2222. void cgroup_migrate_add_src(struct css_set *src_cset,
  2223. struct cgroup *dst_cgrp,
  2224. struct cgroup_mgctx *mgctx)
  2225. {
  2226. struct cgroup *src_cgrp;
  2227. lockdep_assert_held(&cgroup_mutex);
  2228. lockdep_assert_held(&css_set_lock);
  2229. /*
  2230. * If ->dead, @src_set is associated with one or more dead cgroups
  2231. * and doesn't contain any migratable tasks. Ignore it early so
  2232. * that the rest of migration path doesn't get confused by it.
  2233. */
  2234. if (src_cset->dead)
  2235. return;
  2236. src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
  2237. if (!list_empty(&src_cset->mg_preload_node))
  2238. return;
  2239. WARN_ON(src_cset->mg_src_cgrp);
  2240. WARN_ON(src_cset->mg_dst_cgrp);
  2241. WARN_ON(!list_empty(&src_cset->mg_tasks));
  2242. WARN_ON(!list_empty(&src_cset->mg_node));
  2243. src_cset->mg_src_cgrp = src_cgrp;
  2244. src_cset->mg_dst_cgrp = dst_cgrp;
  2245. get_css_set(src_cset);
  2246. list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets);
  2247. }
  2248. /**
  2249. * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
  2250. * @mgctx: migration context
  2251. *
  2252. * Tasks are about to be moved and all the source css_sets have been
  2253. * preloaded to @mgctx->preloaded_src_csets. This function looks up and
  2254. * pins all destination css_sets, links each to its source, and append them
  2255. * to @mgctx->preloaded_dst_csets.
  2256. *
  2257. * This function must be called after cgroup_migrate_add_src() has been
  2258. * called on each migration source css_set. After migration is performed
  2259. * using cgroup_migrate(), cgroup_migrate_finish() must be called on
  2260. * @mgctx.
  2261. */
  2262. int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
  2263. {
  2264. struct css_set *src_cset, *tmp_cset;
  2265. lockdep_assert_held(&cgroup_mutex);
  2266. /* look up the dst cset for each src cset and link it to src */
  2267. list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
  2268. mg_preload_node) {
  2269. struct css_set *dst_cset;
  2270. struct cgroup_subsys *ss;
  2271. int ssid;
  2272. dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
  2273. if (!dst_cset)
  2274. return -ENOMEM;
  2275. WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
  2276. /*
  2277. * If src cset equals dst, it's noop. Drop the src.
  2278. * cgroup_migrate() will skip the cset too. Note that we
  2279. * can't handle src == dst as some nodes are used by both.
  2280. */
  2281. if (src_cset == dst_cset) {
  2282. src_cset->mg_src_cgrp = NULL;
  2283. src_cset->mg_dst_cgrp = NULL;
  2284. list_del_init(&src_cset->mg_preload_node);
  2285. put_css_set(src_cset);
  2286. put_css_set(dst_cset);
  2287. continue;
  2288. }
  2289. src_cset->mg_dst_cset = dst_cset;
  2290. if (list_empty(&dst_cset->mg_preload_node))
  2291. list_add_tail(&dst_cset->mg_preload_node,
  2292. &mgctx->preloaded_dst_csets);
  2293. else
  2294. put_css_set(dst_cset);
  2295. for_each_subsys(ss, ssid)
  2296. if (src_cset->subsys[ssid] != dst_cset->subsys[ssid])
  2297. mgctx->ss_mask |= 1 << ssid;
  2298. }
  2299. return 0;
  2300. }
  2301. /**
  2302. * cgroup_migrate - migrate a process or task to a cgroup
  2303. * @leader: the leader of the process or the task to migrate
  2304. * @threadgroup: whether @leader points to the whole process or a single task
  2305. * @mgctx: migration context
  2306. *
  2307. * Migrate a process or task denoted by @leader. If migrating a process,
  2308. * the caller must be holding cgroup_threadgroup_rwsem. The caller is also
  2309. * responsible for invoking cgroup_migrate_add_src() and
  2310. * cgroup_migrate_prepare_dst() on the targets before invoking this
  2311. * function and following up with cgroup_migrate_finish().
  2312. *
  2313. * As long as a controller's ->can_attach() doesn't fail, this function is
  2314. * guaranteed to succeed. This means that, excluding ->can_attach()
  2315. * failure, when migrating multiple targets, the success or failure can be
  2316. * decided for all targets by invoking group_migrate_prepare_dst() before
  2317. * actually starting migrating.
  2318. */
  2319. int cgroup_migrate(struct task_struct *leader, bool threadgroup,
  2320. struct cgroup_mgctx *mgctx)
  2321. {
  2322. struct task_struct *task;
  2323. /*
  2324. * Prevent freeing of tasks while we take a snapshot. Tasks that are
  2325. * already PF_EXITING could be freed from underneath us unless we
  2326. * take an rcu_read_lock.
  2327. */
  2328. spin_lock_irq(&css_set_lock);
  2329. rcu_read_lock();
  2330. task = leader;
  2331. do {
  2332. cgroup_migrate_add_task(task, mgctx);
  2333. if (!threadgroup)
  2334. break;
  2335. } while_each_thread(leader, task);
  2336. rcu_read_unlock();
  2337. spin_unlock_irq(&css_set_lock);
  2338. return cgroup_migrate_execute(mgctx);
  2339. }
  2340. /**
  2341. * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
  2342. * @dst_cgrp: the cgroup to attach to
  2343. * @leader: the task or the leader of the threadgroup to be attached
  2344. * @threadgroup: attach the whole threadgroup?
  2345. *
  2346. * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
  2347. */
  2348. int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
  2349. bool threadgroup)
  2350. {
  2351. DEFINE_CGROUP_MGCTX(mgctx);
  2352. struct task_struct *task;
  2353. int ret = 0;
  2354. /* look up all src csets */
  2355. spin_lock_irq(&css_set_lock);
  2356. rcu_read_lock();
  2357. task = leader;
  2358. do {
  2359. cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx);
  2360. if (!threadgroup)
  2361. break;
  2362. } while_each_thread(leader, task);
  2363. rcu_read_unlock();
  2364. spin_unlock_irq(&css_set_lock);
  2365. /* prepare dst csets and commit */
  2366. ret = cgroup_migrate_prepare_dst(&mgctx);
  2367. if (!ret)
  2368. ret = cgroup_migrate(leader, threadgroup, &mgctx);
  2369. cgroup_migrate_finish(&mgctx);
  2370. if (!ret)
  2371. TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup);
  2372. return ret;
  2373. }
  2374. struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
  2375. bool *locked,
  2376. struct cgroup *dst_cgrp)
  2377. __acquires(&cgroup_threadgroup_rwsem)
  2378. {
  2379. struct task_struct *tsk;
  2380. pid_t pid;
  2381. bool force_migration = false;
  2382. if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
  2383. return ERR_PTR(-EINVAL);
  2384. /*
  2385. * If we migrate a single thread, we don't care about threadgroup
  2386. * stability. If the thread is `current`, it won't exit(2) under our
  2387. * hands or change PID through exec(2). We exclude
  2388. * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write
  2389. * callers by cgroup_mutex.
  2390. * Therefore, we can skip the global lock.
  2391. */
  2392. lockdep_assert_held(&cgroup_mutex);
  2393. if (pid || threadgroup) {
  2394. percpu_down_write(&cgroup_threadgroup_rwsem);
  2395. *locked = true;
  2396. } else {
  2397. *locked = false;
  2398. }
  2399. rcu_read_lock();
  2400. if (pid) {
  2401. tsk = find_task_by_vpid(pid);
  2402. if (!tsk) {
  2403. tsk = ERR_PTR(-ESRCH);
  2404. goto out_unlock_threadgroup;
  2405. }
  2406. } else {
  2407. tsk = current;
  2408. }
  2409. if (threadgroup)
  2410. tsk = tsk->group_leader;
  2411. if (tsk->flags & PF_KTHREAD)
  2412. trace_android_rvh_cgroup_force_kthread_migration(tsk, dst_cgrp, &force_migration);
  2413. /*
  2414. * kthreads may acquire PF_NO_SETAFFINITY during initialization.
  2415. * If userland migrates such a kthread to a non-root cgroup, it can
  2416. * become trapped in a cpuset, or RT kthread may be born in a
  2417. * cgroup with no rt_runtime allocated. Just say no.
  2418. */
  2419. if (!force_migration && (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY))) {
  2420. tsk = ERR_PTR(-EINVAL);
  2421. goto out_unlock_threadgroup;
  2422. }
  2423. get_task_struct(tsk);
  2424. goto out_unlock_rcu;
  2425. out_unlock_threadgroup:
  2426. if (*locked) {
  2427. percpu_up_write(&cgroup_threadgroup_rwsem);
  2428. *locked = false;
  2429. }
  2430. out_unlock_rcu:
  2431. rcu_read_unlock();
  2432. return tsk;
  2433. }
  2434. void cgroup_procs_write_finish(struct task_struct *task, bool locked)
  2435. __releases(&cgroup_threadgroup_rwsem)
  2436. {
  2437. struct cgroup_subsys *ss;
  2438. int ssid;
  2439. /* release reference from cgroup_procs_write_start() */
  2440. put_task_struct(task);
  2441. if (locked)
  2442. percpu_up_write(&cgroup_threadgroup_rwsem);
  2443. for_each_subsys(ss, ssid)
  2444. if (ss->post_attach)
  2445. ss->post_attach();
  2446. }
  2447. static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
  2448. {
  2449. struct cgroup_subsys *ss;
  2450. bool printed = false;
  2451. int ssid;
  2452. do_each_subsys_mask(ss, ssid, ss_mask) {
  2453. if (printed)
  2454. seq_putc(seq, ' ');
  2455. seq_puts(seq, ss->name);
  2456. printed = true;
  2457. } while_each_subsys_mask();
  2458. if (printed)
  2459. seq_putc(seq, '\n');
  2460. }
  2461. /* show controllers which are enabled from the parent */
  2462. static int cgroup_controllers_show(struct seq_file *seq, void *v)
  2463. {
  2464. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2465. cgroup_print_ss_mask(seq, cgroup_control(cgrp));
  2466. return 0;
  2467. }
  2468. /* show controllers which are enabled for a given cgroup's children */
  2469. static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
  2470. {
  2471. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2472. cgroup_print_ss_mask(seq, cgrp->subtree_control);
  2473. return 0;
  2474. }
  2475. /**
  2476. * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
  2477. * @cgrp: root of the subtree to update csses for
  2478. *
  2479. * @cgrp's control masks have changed and its subtree's css associations
  2480. * need to be updated accordingly. This function looks up all css_sets
  2481. * which are attached to the subtree, creates the matching updated css_sets
  2482. * and migrates the tasks to the new ones.
  2483. */
  2484. static int cgroup_update_dfl_csses(struct cgroup *cgrp)
  2485. {
  2486. DEFINE_CGROUP_MGCTX(mgctx);
  2487. struct cgroup_subsys_state *d_css;
  2488. struct cgroup *dsct;
  2489. struct css_set *src_cset;
  2490. int ret;
  2491. lockdep_assert_held(&cgroup_mutex);
  2492. percpu_down_write(&cgroup_threadgroup_rwsem);
  2493. /* look up all csses currently attached to @cgrp's subtree */
  2494. spin_lock_irq(&css_set_lock);
  2495. cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
  2496. struct cgrp_cset_link *link;
  2497. list_for_each_entry(link, &dsct->cset_links, cset_link)
  2498. cgroup_migrate_add_src(link->cset, dsct, &mgctx);
  2499. }
  2500. spin_unlock_irq(&css_set_lock);
  2501. /* NULL dst indicates self on default hierarchy */
  2502. ret = cgroup_migrate_prepare_dst(&mgctx);
  2503. if (ret)
  2504. goto out_finish;
  2505. spin_lock_irq(&css_set_lock);
  2506. list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) {
  2507. struct task_struct *task, *ntask;
  2508. /* all tasks in src_csets need to be migrated */
  2509. list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
  2510. cgroup_migrate_add_task(task, &mgctx);
  2511. }
  2512. spin_unlock_irq(&css_set_lock);
  2513. ret = cgroup_migrate_execute(&mgctx);
  2514. out_finish:
  2515. cgroup_migrate_finish(&mgctx);
  2516. percpu_up_write(&cgroup_threadgroup_rwsem);
  2517. return ret;
  2518. }
  2519. /**
  2520. * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
  2521. * @cgrp: root of the target subtree
  2522. *
  2523. * Because css offlining is asynchronous, userland may try to re-enable a
  2524. * controller while the previous css is still around. This function grabs
  2525. * cgroup_mutex and drains the previous css instances of @cgrp's subtree.
  2526. */
  2527. void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
  2528. __acquires(&cgroup_mutex)
  2529. {
  2530. struct cgroup *dsct;
  2531. struct cgroup_subsys_state *d_css;
  2532. struct cgroup_subsys *ss;
  2533. int ssid;
  2534. restart:
  2535. mutex_lock(&cgroup_mutex);
  2536. cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
  2537. for_each_subsys(ss, ssid) {
  2538. struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
  2539. DEFINE_WAIT(wait);
  2540. if (!css || !percpu_ref_is_dying(&css->refcnt))
  2541. continue;
  2542. cgroup_get_live(dsct);
  2543. prepare_to_wait(&dsct->offline_waitq, &wait,
  2544. TASK_UNINTERRUPTIBLE);
  2545. mutex_unlock(&cgroup_mutex);
  2546. schedule();
  2547. finish_wait(&dsct->offline_waitq, &wait);
  2548. cgroup_put(dsct);
  2549. goto restart;
  2550. }
  2551. }
  2552. }
  2553. /**
  2554. * cgroup_save_control - save control masks and dom_cgrp of a subtree
  2555. * @cgrp: root of the target subtree
  2556. *
  2557. * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
  2558. * respective old_ prefixed fields for @cgrp's subtree including @cgrp
  2559. * itself.
  2560. */
  2561. static void cgroup_save_control(struct cgroup *cgrp)
  2562. {
  2563. struct cgroup *dsct;
  2564. struct cgroup_subsys_state *d_css;
  2565. cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
  2566. dsct->old_subtree_control = dsct->subtree_control;
  2567. dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
  2568. dsct->old_dom_cgrp = dsct->dom_cgrp;
  2569. }
  2570. }
  2571. /**
  2572. * cgroup_propagate_control - refresh control masks of a subtree
  2573. * @cgrp: root of the target subtree
  2574. *
  2575. * For @cgrp and its subtree, ensure ->subtree_ss_mask matches
  2576. * ->subtree_control and propagate controller availability through the
  2577. * subtree so that descendants don't have unavailable controllers enabled.
  2578. */
  2579. static void cgroup_propagate_control(struct cgroup *cgrp)
  2580. {
  2581. struct cgroup *dsct;
  2582. struct cgroup_subsys_state *d_css;
  2583. cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
  2584. dsct->subtree_control &= cgroup_control(dsct);
  2585. dsct->subtree_ss_mask =
  2586. cgroup_calc_subtree_ss_mask(dsct->subtree_control,
  2587. cgroup_ss_mask(dsct));
  2588. }
  2589. }
  2590. /**
  2591. * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
  2592. * @cgrp: root of the target subtree
  2593. *
  2594. * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
  2595. * respective old_ prefixed fields for @cgrp's subtree including @cgrp
  2596. * itself.
  2597. */
  2598. static void cgroup_restore_control(struct cgroup *cgrp)
  2599. {
  2600. struct cgroup *dsct;
  2601. struct cgroup_subsys_state *d_css;
  2602. cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
  2603. dsct->subtree_control = dsct->old_subtree_control;
  2604. dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
  2605. dsct->dom_cgrp = dsct->old_dom_cgrp;
  2606. }
  2607. }
  2608. static bool css_visible(struct cgroup_subsys_state *css)
  2609. {
  2610. struct cgroup_subsys *ss = css->ss;
  2611. struct cgroup *cgrp = css->cgroup;
  2612. if (cgroup_control(cgrp) & (1 << ss->id))
  2613. return true;
  2614. if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
  2615. return false;
  2616. return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
  2617. }
  2618. /**
  2619. * cgroup_apply_control_enable - enable or show csses according to control
  2620. * @cgrp: root of the target subtree
  2621. *
  2622. * Walk @cgrp's subtree and create new csses or make the existing ones
  2623. * visible. A css is created invisible if it's being implicitly enabled
  2624. * through dependency. An invisible css is made visible when the userland
  2625. * explicitly enables it.
  2626. *
  2627. * Returns 0 on success, -errno on failure. On failure, csses which have
  2628. * been processed already aren't cleaned up. The caller is responsible for
  2629. * cleaning up with cgroup_apply_control_disable().
  2630. */
  2631. static int cgroup_apply_control_enable(struct cgroup *cgrp)
  2632. {
  2633. struct cgroup *dsct;
  2634. struct cgroup_subsys_state *d_css;
  2635. struct cgroup_subsys *ss;
  2636. int ssid, ret;
  2637. cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
  2638. for_each_subsys(ss, ssid) {
  2639. struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
  2640. if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
  2641. continue;
  2642. if (!css) {
  2643. css = css_create(dsct, ss);
  2644. if (IS_ERR(css))
  2645. return PTR_ERR(css);
  2646. }
  2647. WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
  2648. if (css_visible(css)) {
  2649. ret = css_populate_dir(css);
  2650. if (ret)
  2651. return ret;
  2652. }
  2653. }
  2654. }
  2655. return 0;
  2656. }
  2657. /**
  2658. * cgroup_apply_control_disable - kill or hide csses according to control
  2659. * @cgrp: root of the target subtree
  2660. *
  2661. * Walk @cgrp's subtree and kill and hide csses so that they match
  2662. * cgroup_ss_mask() and cgroup_visible_mask().
  2663. *
  2664. * A css is hidden when the userland requests it to be disabled while other
  2665. * subsystems are still depending on it. The css must not actively control
  2666. * resources and be in the vanilla state if it's made visible again later.
  2667. * Controllers which may be depended upon should provide ->css_reset() for
  2668. * this purpose.
  2669. */
  2670. static void cgroup_apply_control_disable(struct cgroup *cgrp)
  2671. {
  2672. struct cgroup *dsct;
  2673. struct cgroup_subsys_state *d_css;
  2674. struct cgroup_subsys *ss;
  2675. int ssid;
  2676. cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
  2677. for_each_subsys(ss, ssid) {
  2678. struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
  2679. if (!css)
  2680. continue;
  2681. WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
  2682. if (css->parent &&
  2683. !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
  2684. kill_css(css);
  2685. } else if (!css_visible(css)) {
  2686. css_clear_dir(css);
  2687. if (ss->css_reset)
  2688. ss->css_reset(css);
  2689. }
  2690. }
  2691. }
  2692. }
  2693. /**
  2694. * cgroup_apply_control - apply control mask updates to the subtree
  2695. * @cgrp: root of the target subtree
  2696. *
  2697. * subsystems can be enabled and disabled in a subtree using the following
  2698. * steps.
  2699. *
  2700. * 1. Call cgroup_save_control() to stash the current state.
  2701. * 2. Update ->subtree_control masks in the subtree as desired.
  2702. * 3. Call cgroup_apply_control() to apply the changes.
  2703. * 4. Optionally perform other related operations.
  2704. * 5. Call cgroup_finalize_control() to finish up.
  2705. *
  2706. * This function implements step 3 and propagates the mask changes
  2707. * throughout @cgrp's subtree, updates csses accordingly and perform
  2708. * process migrations.
  2709. */
  2710. static int cgroup_apply_control(struct cgroup *cgrp)
  2711. {
  2712. int ret;
  2713. cgroup_propagate_control(cgrp);
  2714. ret = cgroup_apply_control_enable(cgrp);
  2715. if (ret)
  2716. return ret;
  2717. /*
  2718. * At this point, cgroup_e_css_by_mask() results reflect the new csses
  2719. * making the following cgroup_update_dfl_csses() properly update
  2720. * css associations of all tasks in the subtree.
  2721. */
  2722. ret = cgroup_update_dfl_csses(cgrp);
  2723. if (ret)
  2724. return ret;
  2725. return 0;
  2726. }
  2727. /**
  2728. * cgroup_finalize_control - finalize control mask update
  2729. * @cgrp: root of the target subtree
  2730. * @ret: the result of the update
  2731. *
  2732. * Finalize control mask update. See cgroup_apply_control() for more info.
  2733. */
  2734. static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
  2735. {
  2736. if (ret) {
  2737. cgroup_restore_control(cgrp);
  2738. cgroup_propagate_control(cgrp);
  2739. }
  2740. cgroup_apply_control_disable(cgrp);
  2741. }
  2742. static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u16 enable)
  2743. {
  2744. u16 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask;
  2745. /* if nothing is getting enabled, nothing to worry about */
  2746. if (!enable)
  2747. return 0;
  2748. /* can @cgrp host any resources? */
  2749. if (!cgroup_is_valid_domain(cgrp->dom_cgrp))
  2750. return -EOPNOTSUPP;
  2751. /* mixables don't care */
  2752. if (cgroup_is_mixable(cgrp))
  2753. return 0;
  2754. if (domain_enable) {
  2755. /* can't enable domain controllers inside a thread subtree */
  2756. if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp))
  2757. return -EOPNOTSUPP;
  2758. } else {
  2759. /*
  2760. * Threaded controllers can handle internal competitions
  2761. * and are always allowed inside a (prospective) thread
  2762. * subtree.
  2763. */
  2764. if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp))
  2765. return 0;
  2766. }
  2767. /*
  2768. * Controllers can't be enabled for a cgroup with tasks to avoid
  2769. * child cgroups competing against tasks.
  2770. */
  2771. if (cgroup_has_tasks(cgrp))
  2772. return -EBUSY;
  2773. return 0;
  2774. }
  2775. /* change the enabled child controllers for a cgroup in the default hierarchy */
  2776. static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
  2777. char *buf, size_t nbytes,
  2778. loff_t off)
  2779. {
  2780. u16 enable = 0, disable = 0;
  2781. struct cgroup *cgrp, *child;
  2782. struct cgroup_subsys *ss;
  2783. char *tok;
  2784. int ssid, ret;
  2785. /*
  2786. * Parse input - space separated list of subsystem names prefixed
  2787. * with either + or -.
  2788. */
  2789. buf = strstrip(buf);
  2790. while ((tok = strsep(&buf, " "))) {
  2791. if (tok[0] == '\0')
  2792. continue;
  2793. do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
  2794. if (!cgroup_ssid_enabled(ssid) ||
  2795. strcmp(tok + 1, ss->name))
  2796. continue;
  2797. if (*tok == '+') {
  2798. enable |= 1 << ssid;
  2799. disable &= ~(1 << ssid);
  2800. } else if (*tok == '-') {
  2801. disable |= 1 << ssid;
  2802. enable &= ~(1 << ssid);
  2803. } else {
  2804. return -EINVAL;
  2805. }
  2806. break;
  2807. } while_each_subsys_mask();
  2808. if (ssid == CGROUP_SUBSYS_COUNT)
  2809. return -EINVAL;
  2810. }
  2811. cgrp = cgroup_kn_lock_live(of->kn, true);
  2812. if (!cgrp)
  2813. return -ENODEV;
  2814. for_each_subsys(ss, ssid) {
  2815. if (enable & (1 << ssid)) {
  2816. if (cgrp->subtree_control & (1 << ssid)) {
  2817. enable &= ~(1 << ssid);
  2818. continue;
  2819. }
  2820. if (!(cgroup_control(cgrp) & (1 << ssid))) {
  2821. ret = -ENOENT;
  2822. goto out_unlock;
  2823. }
  2824. } else if (disable & (1 << ssid)) {
  2825. if (!(cgrp->subtree_control & (1 << ssid))) {
  2826. disable &= ~(1 << ssid);
  2827. continue;
  2828. }
  2829. /* a child has it enabled? */
  2830. cgroup_for_each_live_child(child, cgrp) {
  2831. if (child->subtree_control & (1 << ssid)) {
  2832. ret = -EBUSY;
  2833. goto out_unlock;
  2834. }
  2835. }
  2836. }
  2837. }
  2838. if (!enable && !disable) {
  2839. ret = 0;
  2840. goto out_unlock;
  2841. }
  2842. ret = cgroup_vet_subtree_control_enable(cgrp, enable);
  2843. if (ret)
  2844. goto out_unlock;
  2845. /* save and update control masks and prepare csses */
  2846. cgroup_save_control(cgrp);
  2847. cgrp->subtree_control |= enable;
  2848. cgrp->subtree_control &= ~disable;
  2849. ret = cgroup_apply_control(cgrp);
  2850. cgroup_finalize_control(cgrp, ret);
  2851. if (ret)
  2852. goto out_unlock;
  2853. kernfs_activate(cgrp->kn);
  2854. out_unlock:
  2855. cgroup_kn_unlock(of->kn);
  2856. return ret ?: nbytes;
  2857. }
  2858. /**
  2859. * cgroup_enable_threaded - make @cgrp threaded
  2860. * @cgrp: the target cgroup
  2861. *
  2862. * Called when "threaded" is written to the cgroup.type interface file and
  2863. * tries to make @cgrp threaded and join the parent's resource domain.
  2864. * This function is never called on the root cgroup as cgroup.type doesn't
  2865. * exist on it.
  2866. */
  2867. static int cgroup_enable_threaded(struct cgroup *cgrp)
  2868. {
  2869. struct cgroup *parent = cgroup_parent(cgrp);
  2870. struct cgroup *dom_cgrp = parent->dom_cgrp;
  2871. struct cgroup *dsct;
  2872. struct cgroup_subsys_state *d_css;
  2873. int ret;
  2874. lockdep_assert_held(&cgroup_mutex);
  2875. /* noop if already threaded */
  2876. if (cgroup_is_threaded(cgrp))
  2877. return 0;
  2878. /*
  2879. * If @cgroup is populated or has domain controllers enabled, it
  2880. * can't be switched. While the below cgroup_can_be_thread_root()
  2881. * test can catch the same conditions, that's only when @parent is
  2882. * not mixable, so let's check it explicitly.
  2883. */
  2884. if (cgroup_is_populated(cgrp) ||
  2885. cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
  2886. return -EOPNOTSUPP;
  2887. /* we're joining the parent's domain, ensure its validity */
  2888. if (!cgroup_is_valid_domain(dom_cgrp) ||
  2889. !cgroup_can_be_thread_root(dom_cgrp))
  2890. return -EOPNOTSUPP;
  2891. /*
  2892. * The following shouldn't cause actual migrations and should
  2893. * always succeed.
  2894. */
  2895. cgroup_save_control(cgrp);
  2896. cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
  2897. if (dsct == cgrp || cgroup_is_threaded(dsct))
  2898. dsct->dom_cgrp = dom_cgrp;
  2899. ret = cgroup_apply_control(cgrp);
  2900. if (!ret)
  2901. parent->nr_threaded_children++;
  2902. cgroup_finalize_control(cgrp, ret);
  2903. return ret;
  2904. }
  2905. static int cgroup_type_show(struct seq_file *seq, void *v)
  2906. {
  2907. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2908. if (cgroup_is_threaded(cgrp))
  2909. seq_puts(seq, "threaded\n");
  2910. else if (!cgroup_is_valid_domain(cgrp))
  2911. seq_puts(seq, "domain invalid\n");
  2912. else if (cgroup_is_thread_root(cgrp))
  2913. seq_puts(seq, "domain threaded\n");
  2914. else
  2915. seq_puts(seq, "domain\n");
  2916. return 0;
  2917. }
  2918. static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf,
  2919. size_t nbytes, loff_t off)
  2920. {
  2921. struct cgroup *cgrp;
  2922. int ret;
  2923. /* only switching to threaded mode is supported */
  2924. if (strcmp(strstrip(buf), "threaded"))
  2925. return -EINVAL;
  2926. /* drain dying csses before we re-apply (threaded) subtree control */
  2927. cgrp = cgroup_kn_lock_live(of->kn, true);
  2928. if (!cgrp)
  2929. return -ENOENT;
  2930. /* threaded can only be enabled */
  2931. ret = cgroup_enable_threaded(cgrp);
  2932. cgroup_kn_unlock(of->kn);
  2933. return ret ?: nbytes;
  2934. }
  2935. static int cgroup_max_descendants_show(struct seq_file *seq, void *v)
  2936. {
  2937. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2938. int descendants = READ_ONCE(cgrp->max_descendants);
  2939. if (descendants == INT_MAX)
  2940. seq_puts(seq, "max\n");
  2941. else
  2942. seq_printf(seq, "%d\n", descendants);
  2943. return 0;
  2944. }
  2945. static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of,
  2946. char *buf, size_t nbytes, loff_t off)
  2947. {
  2948. struct cgroup *cgrp;
  2949. int descendants;
  2950. ssize_t ret;
  2951. buf = strstrip(buf);
  2952. if (!strcmp(buf, "max")) {
  2953. descendants = INT_MAX;
  2954. } else {
  2955. ret = kstrtoint(buf, 0, &descendants);
  2956. if (ret)
  2957. return ret;
  2958. }
  2959. if (descendants < 0)
  2960. return -ERANGE;
  2961. cgrp = cgroup_kn_lock_live(of->kn, false);
  2962. if (!cgrp)
  2963. return -ENOENT;
  2964. cgrp->max_descendants = descendants;
  2965. cgroup_kn_unlock(of->kn);
  2966. return nbytes;
  2967. }
  2968. static int cgroup_max_depth_show(struct seq_file *seq, void *v)
  2969. {
  2970. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2971. int depth = READ_ONCE(cgrp->max_depth);
  2972. if (depth == INT_MAX)
  2973. seq_puts(seq, "max\n");
  2974. else
  2975. seq_printf(seq, "%d\n", depth);
  2976. return 0;
  2977. }
  2978. static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of,
  2979. char *buf, size_t nbytes, loff_t off)
  2980. {
  2981. struct cgroup *cgrp;
  2982. ssize_t ret;
  2983. int depth;
  2984. buf = strstrip(buf);
  2985. if (!strcmp(buf, "max")) {
  2986. depth = INT_MAX;
  2987. } else {
  2988. ret = kstrtoint(buf, 0, &depth);
  2989. if (ret)
  2990. return ret;
  2991. }
  2992. if (depth < 0)
  2993. return -ERANGE;
  2994. cgrp = cgroup_kn_lock_live(of->kn, false);
  2995. if (!cgrp)
  2996. return -ENOENT;
  2997. cgrp->max_depth = depth;
  2998. cgroup_kn_unlock(of->kn);
  2999. return nbytes;
  3000. }
  3001. static int cgroup_events_show(struct seq_file *seq, void *v)
  3002. {
  3003. struct cgroup *cgrp = seq_css(seq)->cgroup;
  3004. seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp));
  3005. seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags));
  3006. return 0;
  3007. }
  3008. static int cgroup_stat_show(struct seq_file *seq, void *v)
  3009. {
  3010. struct cgroup *cgroup = seq_css(seq)->cgroup;
  3011. seq_printf(seq, "nr_descendants %d\n",
  3012. cgroup->nr_descendants);
  3013. seq_printf(seq, "nr_dying_descendants %d\n",
  3014. cgroup->nr_dying_descendants);
  3015. return 0;
  3016. }
  3017. static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq,
  3018. struct cgroup *cgrp, int ssid)
  3019. {
  3020. struct cgroup_subsys *ss = cgroup_subsys[ssid];
  3021. struct cgroup_subsys_state *css;
  3022. int ret;
  3023. if (!ss->css_extra_stat_show)
  3024. return 0;
  3025. css = cgroup_tryget_css(cgrp, ss);
  3026. if (!css)
  3027. return 0;
  3028. ret = ss->css_extra_stat_show(seq, css);
  3029. css_put(css);
  3030. return ret;
  3031. }
  3032. static int cpu_stat_show(struct seq_file *seq, void *v)
  3033. {
  3034. struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
  3035. int ret = 0;
  3036. cgroup_base_stat_cputime_show(seq);
  3037. #ifdef CONFIG_CGROUP_SCHED
  3038. ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id);
  3039. #endif
  3040. return ret;
  3041. }
  3042. #ifdef CONFIG_PSI
  3043. static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
  3044. {
  3045. struct cgroup *cgrp = seq_css(seq)->cgroup;
  3046. struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
  3047. return psi_show(seq, psi, PSI_IO);
  3048. }
  3049. static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
  3050. {
  3051. struct cgroup *cgrp = seq_css(seq)->cgroup;
  3052. struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
  3053. return psi_show(seq, psi, PSI_MEM);
  3054. }
  3055. static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
  3056. {
  3057. struct cgroup *cgrp = seq_css(seq)->cgroup;
  3058. struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
  3059. return psi_show(seq, psi, PSI_CPU);
  3060. }
  3061. static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
  3062. size_t nbytes, enum psi_res res)
  3063. {
  3064. struct cgroup_file_ctx *ctx = of->priv;
  3065. struct psi_trigger *new;
  3066. struct cgroup *cgrp;
  3067. struct psi_group *psi;
  3068. cgrp = cgroup_kn_lock_live(of->kn, false);
  3069. if (!cgrp)
  3070. return -ENODEV;
  3071. cgroup_get(cgrp);
  3072. cgroup_kn_unlock(of->kn);
  3073. /* Allow only one trigger per file descriptor */
  3074. if (ctx->psi.trigger) {
  3075. cgroup_put(cgrp);
  3076. return -EBUSY;
  3077. }
  3078. psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
  3079. new = psi_trigger_create(psi, buf, nbytes, res);
  3080. if (IS_ERR(new)) {
  3081. cgroup_put(cgrp);
  3082. return PTR_ERR(new);
  3083. }
  3084. smp_store_release(&ctx->psi.trigger, new);
  3085. cgroup_put(cgrp);
  3086. return nbytes;
  3087. }
  3088. static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
  3089. char *buf, size_t nbytes,
  3090. loff_t off)
  3091. {
  3092. return cgroup_pressure_write(of, buf, nbytes, PSI_IO);
  3093. }
  3094. static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
  3095. char *buf, size_t nbytes,
  3096. loff_t off)
  3097. {
  3098. return cgroup_pressure_write(of, buf, nbytes, PSI_MEM);
  3099. }
  3100. static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
  3101. char *buf, size_t nbytes,
  3102. loff_t off)
  3103. {
  3104. return cgroup_pressure_write(of, buf, nbytes, PSI_CPU);
  3105. }
  3106. static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
  3107. poll_table *pt)
  3108. {
  3109. struct cgroup_file_ctx *ctx = of->priv;
  3110. return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
  3111. }
  3112. static void cgroup_pressure_release(struct kernfs_open_file *of)
  3113. {
  3114. struct cgroup_file_ctx *ctx = of->priv;
  3115. psi_trigger_destroy(ctx->psi.trigger);
  3116. }
  3117. bool cgroup_psi_enabled(void)
  3118. {
  3119. return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0;
  3120. }
  3121. #else /* CONFIG_PSI */
  3122. bool cgroup_psi_enabled(void)
  3123. {
  3124. return false;
  3125. }
  3126. #endif /* CONFIG_PSI */
  3127. static int cgroup_freeze_show(struct seq_file *seq, void *v)
  3128. {
  3129. struct cgroup *cgrp = seq_css(seq)->cgroup;
  3130. seq_printf(seq, "%d\n", cgrp->freezer.freeze);
  3131. return 0;
  3132. }
  3133. static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
  3134. char *buf, size_t nbytes, loff_t off)
  3135. {
  3136. struct cgroup *cgrp;
  3137. ssize_t ret;
  3138. int freeze;
  3139. ret = kstrtoint(strstrip(buf), 0, &freeze);
  3140. if (ret)
  3141. return ret;
  3142. if (freeze < 0 || freeze > 1)
  3143. return -ERANGE;
  3144. cgrp = cgroup_kn_lock_live(of->kn, false);
  3145. if (!cgrp)
  3146. return -ENOENT;
  3147. cgroup_freeze(cgrp, freeze);
  3148. cgroup_kn_unlock(of->kn);
  3149. return nbytes;
  3150. }
  3151. static int cgroup_file_open(struct kernfs_open_file *of)
  3152. {
  3153. struct cftype *cft = of->kn->priv;
  3154. struct cgroup_file_ctx *ctx;
  3155. int ret;
  3156. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  3157. if (!ctx)
  3158. return -ENOMEM;
  3159. ctx->ns = current->nsproxy->cgroup_ns;
  3160. get_cgroup_ns(ctx->ns);
  3161. of->priv = ctx;
  3162. if (!cft->open)
  3163. return 0;
  3164. ret = cft->open(of);
  3165. if (ret) {
  3166. put_cgroup_ns(ctx->ns);
  3167. kfree(ctx);
  3168. }
  3169. return ret;
  3170. }
  3171. static void cgroup_file_release(struct kernfs_open_file *of)
  3172. {
  3173. struct cftype *cft = of->kn->priv;
  3174. struct cgroup_file_ctx *ctx = of->priv;
  3175. if (cft->release)
  3176. cft->release(of);
  3177. put_cgroup_ns(ctx->ns);
  3178. kfree(ctx);
  3179. }
  3180. static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
  3181. size_t nbytes, loff_t off)
  3182. {
  3183. struct cgroup_file_ctx *ctx = of->priv;
  3184. struct cgroup *cgrp = of->kn->parent->priv;
  3185. struct cftype *cft = of->kn->priv;
  3186. struct cgroup_subsys_state *css;
  3187. int ret;
  3188. if (!nbytes)
  3189. return 0;
  3190. /*
  3191. * If namespaces are delegation boundaries, disallow writes to
  3192. * files in an non-init namespace root from inside the namespace
  3193. * except for the files explicitly marked delegatable -
  3194. * cgroup.procs and cgroup.subtree_control.
  3195. */
  3196. if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
  3197. !(cft->flags & CFTYPE_NS_DELEGATABLE) &&
  3198. ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp)
  3199. return -EPERM;
  3200. if (cft->write)
  3201. return cft->write(of, buf, nbytes, off);
  3202. /*
  3203. * kernfs guarantees that a file isn't deleted with operations in
  3204. * flight, which means that the matching css is and stays alive and
  3205. * doesn't need to be pinned. The RCU locking is not necessary
  3206. * either. It's just for the convenience of using cgroup_css().
  3207. */
  3208. rcu_read_lock();
  3209. css = cgroup_css(cgrp, cft->ss);
  3210. rcu_read_unlock();
  3211. if (cft->write_u64) {
  3212. unsigned long long v;
  3213. ret = kstrtoull(buf, 0, &v);
  3214. if (!ret)
  3215. ret = cft->write_u64(css, cft, v);
  3216. } else if (cft->write_s64) {
  3217. long long v;
  3218. ret = kstrtoll(buf, 0, &v);
  3219. if (!ret)
  3220. ret = cft->write_s64(css, cft, v);
  3221. } else {
  3222. ret = -EINVAL;
  3223. }
  3224. return ret ?: nbytes;
  3225. }
  3226. static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
  3227. {
  3228. struct cftype *cft = of->kn->priv;
  3229. if (cft->poll)
  3230. return cft->poll(of, pt);
  3231. return kernfs_generic_poll(of, pt);
  3232. }
  3233. static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
  3234. {
  3235. return seq_cft(seq)->seq_start(seq, ppos);
  3236. }
  3237. static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
  3238. {
  3239. return seq_cft(seq)->seq_next(seq, v, ppos);
  3240. }
  3241. static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
  3242. {
  3243. if (seq_cft(seq)->seq_stop)
  3244. seq_cft(seq)->seq_stop(seq, v);
  3245. }
  3246. static int cgroup_seqfile_show(struct seq_file *m, void *arg)
  3247. {
  3248. struct cftype *cft = seq_cft(m);
  3249. struct cgroup_subsys_state *css = seq_css(m);
  3250. if (cft->seq_show)
  3251. return cft->seq_show(m, arg);
  3252. if (cft->read_u64)
  3253. seq_printf(m, "%llu\n", cft->read_u64(css, cft));
  3254. else if (cft->read_s64)
  3255. seq_printf(m, "%lld\n", cft->read_s64(css, cft));
  3256. else
  3257. return -EINVAL;
  3258. return 0;
  3259. }
  3260. static struct kernfs_ops cgroup_kf_single_ops = {
  3261. .atomic_write_len = PAGE_SIZE,
  3262. .open = cgroup_file_open,
  3263. .release = cgroup_file_release,
  3264. .write = cgroup_file_write,
  3265. .poll = cgroup_file_poll,
  3266. .seq_show = cgroup_seqfile_show,
  3267. };
  3268. static struct kernfs_ops cgroup_kf_ops = {
  3269. .atomic_write_len = PAGE_SIZE,
  3270. .open = cgroup_file_open,
  3271. .release = cgroup_file_release,
  3272. .write = cgroup_file_write,
  3273. .poll = cgroup_file_poll,
  3274. .seq_start = cgroup_seqfile_start,
  3275. .seq_next = cgroup_seqfile_next,
  3276. .seq_stop = cgroup_seqfile_stop,
  3277. .seq_show = cgroup_seqfile_show,
  3278. };
  3279. /* set uid and gid of cgroup dirs and files to that of the creator */
  3280. static int cgroup_kn_set_ugid(struct kernfs_node *kn)
  3281. {
  3282. struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
  3283. .ia_uid = current_fsuid(),
  3284. .ia_gid = current_fsgid(), };
  3285. if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
  3286. gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
  3287. return 0;
  3288. return kernfs_setattr(kn, &iattr);
  3289. }
  3290. static void cgroup_file_notify_timer(struct timer_list *timer)
  3291. {
  3292. cgroup_file_notify(container_of(timer, struct cgroup_file,
  3293. notify_timer));
  3294. }
  3295. static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
  3296. struct cftype *cft)
  3297. {
  3298. char name[CGROUP_FILE_NAME_MAX];
  3299. struct kernfs_node *kn;
  3300. struct lock_class_key *key = NULL;
  3301. int ret;
  3302. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  3303. key = &cft->lockdep_key;
  3304. #endif
  3305. kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
  3306. cgroup_file_mode(cft),
  3307. GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
  3308. 0, cft->kf_ops, cft,
  3309. NULL, key);
  3310. if (IS_ERR(kn))
  3311. return PTR_ERR(kn);
  3312. ret = cgroup_kn_set_ugid(kn);
  3313. if (ret) {
  3314. kernfs_remove(kn);
  3315. return ret;
  3316. }
  3317. if (cft->file_offset) {
  3318. struct cgroup_file *cfile = (void *)css + cft->file_offset;
  3319. timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0);
  3320. spin_lock_irq(&cgroup_file_kn_lock);
  3321. cfile->kn = kn;
  3322. spin_unlock_irq(&cgroup_file_kn_lock);
  3323. }
  3324. return 0;
  3325. }
  3326. /**
  3327. * cgroup_addrm_files - add or remove files to a cgroup directory
  3328. * @css: the target css
  3329. * @cgrp: the target cgroup (usually css->cgroup)
  3330. * @cfts: array of cftypes to be added
  3331. * @is_add: whether to add or remove
  3332. *
  3333. * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
  3334. * For removals, this function never fails.
  3335. */
  3336. static int cgroup_addrm_files(struct cgroup_subsys_state *css,
  3337. struct cgroup *cgrp, struct cftype cfts[],
  3338. bool is_add)
  3339. {
  3340. struct cftype *cft, *cft_end = NULL;
  3341. int ret = 0;
  3342. lockdep_assert_held(&cgroup_mutex);
  3343. restart:
  3344. for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
  3345. /* does cft->flags tell us to skip this file on @cgrp? */
  3346. if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled())
  3347. continue;
  3348. if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
  3349. continue;
  3350. if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
  3351. continue;
  3352. if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
  3353. continue;
  3354. if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
  3355. continue;
  3356. if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug)
  3357. continue;
  3358. if (is_add) {
  3359. ret = cgroup_add_file(css, cgrp, cft);
  3360. if (ret) {
  3361. pr_warn("%s: failed to add %s, err=%d\n",
  3362. __func__, cft->name, ret);
  3363. cft_end = cft;
  3364. is_add = false;
  3365. goto restart;
  3366. }
  3367. } else {
  3368. cgroup_rm_file(cgrp, cft);
  3369. }
  3370. }
  3371. return ret;
  3372. }
  3373. static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
  3374. {
  3375. struct cgroup_subsys *ss = cfts[0].ss;
  3376. struct cgroup *root = &ss->root->cgrp;
  3377. struct cgroup_subsys_state *css;
  3378. int ret = 0;
  3379. lockdep_assert_held(&cgroup_mutex);
  3380. /* add/rm files for all cgroups created before */
  3381. css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
  3382. struct cgroup *cgrp = css->cgroup;
  3383. if (!(css->flags & CSS_VISIBLE))
  3384. continue;
  3385. ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
  3386. if (ret)
  3387. break;
  3388. }
  3389. if (is_add && !ret)
  3390. kernfs_activate(root->kn);
  3391. return ret;
  3392. }
  3393. static void cgroup_exit_cftypes(struct cftype *cfts)
  3394. {
  3395. struct cftype *cft;
  3396. for (cft = cfts; cft->name[0] != '\0'; cft++) {
  3397. /* free copy for custom atomic_write_len, see init_cftypes() */
  3398. if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
  3399. kfree(cft->kf_ops);
  3400. cft->kf_ops = NULL;
  3401. cft->ss = NULL;
  3402. /* revert flags set by cgroup core while adding @cfts */
  3403. cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
  3404. }
  3405. }
  3406. static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  3407. {
  3408. struct cftype *cft;
  3409. for (cft = cfts; cft->name[0] != '\0'; cft++) {
  3410. struct kernfs_ops *kf_ops;
  3411. WARN_ON(cft->ss || cft->kf_ops);
  3412. if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled())
  3413. continue;
  3414. if (cft->seq_start)
  3415. kf_ops = &cgroup_kf_ops;
  3416. else
  3417. kf_ops = &cgroup_kf_single_ops;
  3418. /*
  3419. * Ugh... if @cft wants a custom max_write_len, we need to
  3420. * make a copy of kf_ops to set its atomic_write_len.
  3421. */
  3422. if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
  3423. kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
  3424. if (!kf_ops) {
  3425. cgroup_exit_cftypes(cfts);
  3426. return -ENOMEM;
  3427. }
  3428. kf_ops->atomic_write_len = cft->max_write_len;
  3429. }
  3430. cft->kf_ops = kf_ops;
  3431. cft->ss = ss;
  3432. }
  3433. return 0;
  3434. }
  3435. static int cgroup_rm_cftypes_locked(struct cftype *cfts)
  3436. {
  3437. lockdep_assert_held(&cgroup_mutex);
  3438. if (!cfts || !cfts[0].ss)
  3439. return -ENOENT;
  3440. list_del(&cfts->node);
  3441. cgroup_apply_cftypes(cfts, false);
  3442. cgroup_exit_cftypes(cfts);
  3443. return 0;
  3444. }
  3445. /**
  3446. * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
  3447. * @cfts: zero-length name terminated array of cftypes
  3448. *
  3449. * Unregister @cfts. Files described by @cfts are removed from all
  3450. * existing cgroups and all future cgroups won't have them either. This
  3451. * function can be called anytime whether @cfts' subsys is attached or not.
  3452. *
  3453. * Returns 0 on successful unregistration, -ENOENT if @cfts is not
  3454. * registered.
  3455. */
  3456. int cgroup_rm_cftypes(struct cftype *cfts)
  3457. {
  3458. int ret;
  3459. mutex_lock(&cgroup_mutex);
  3460. ret = cgroup_rm_cftypes_locked(cfts);
  3461. mutex_unlock(&cgroup_mutex);
  3462. return ret;
  3463. }
  3464. /**
  3465. * cgroup_add_cftypes - add an array of cftypes to a subsystem
  3466. * @ss: target cgroup subsystem
  3467. * @cfts: zero-length name terminated array of cftypes
  3468. *
  3469. * Register @cfts to @ss. Files described by @cfts are created for all
  3470. * existing cgroups to which @ss is attached and all future cgroups will
  3471. * have them too. This function can be called anytime whether @ss is
  3472. * attached or not.
  3473. *
  3474. * Returns 0 on successful registration, -errno on failure. Note that this
  3475. * function currently returns 0 as long as @cfts registration is successful
  3476. * even if some file creation attempts on existing cgroups fail.
  3477. */
  3478. static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  3479. {
  3480. int ret;
  3481. if (!cgroup_ssid_enabled(ss->id))
  3482. return 0;
  3483. if (!cfts || cfts[0].name[0] == '\0')
  3484. return 0;
  3485. ret = cgroup_init_cftypes(ss, cfts);
  3486. if (ret)
  3487. return ret;
  3488. mutex_lock(&cgroup_mutex);
  3489. list_add_tail(&cfts->node, &ss->cfts);
  3490. ret = cgroup_apply_cftypes(cfts, true);
  3491. if (ret)
  3492. cgroup_rm_cftypes_locked(cfts);
  3493. mutex_unlock(&cgroup_mutex);
  3494. return ret;
  3495. }
  3496. /**
  3497. * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
  3498. * @ss: target cgroup subsystem
  3499. * @cfts: zero-length name terminated array of cftypes
  3500. *
  3501. * Similar to cgroup_add_cftypes() but the added files are only used for
  3502. * the default hierarchy.
  3503. */
  3504. int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  3505. {
  3506. struct cftype *cft;
  3507. for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
  3508. cft->flags |= __CFTYPE_ONLY_ON_DFL;
  3509. return cgroup_add_cftypes(ss, cfts);
  3510. }
  3511. /**
  3512. * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
  3513. * @ss: target cgroup subsystem
  3514. * @cfts: zero-length name terminated array of cftypes
  3515. *
  3516. * Similar to cgroup_add_cftypes() but the added files are only used for
  3517. * the legacy hierarchies.
  3518. */
  3519. int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  3520. {
  3521. struct cftype *cft;
  3522. for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
  3523. cft->flags |= __CFTYPE_NOT_ON_DFL;
  3524. return cgroup_add_cftypes(ss, cfts);
  3525. }
  3526. EXPORT_SYMBOL_GPL(cgroup_add_legacy_cftypes);
  3527. /**
  3528. * cgroup_file_notify - generate a file modified event for a cgroup_file
  3529. * @cfile: target cgroup_file
  3530. *
  3531. * @cfile must have been obtained by setting cftype->file_offset.
  3532. */
  3533. void cgroup_file_notify(struct cgroup_file *cfile)
  3534. {
  3535. unsigned long flags;
  3536. spin_lock_irqsave(&cgroup_file_kn_lock, flags);
  3537. if (cfile->kn) {
  3538. unsigned long last = cfile->notified_at;
  3539. unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV;
  3540. if (time_in_range(jiffies, last, next)) {
  3541. timer_reduce(&cfile->notify_timer, next);
  3542. } else {
  3543. kernfs_notify(cfile->kn);
  3544. cfile->notified_at = jiffies;
  3545. }
  3546. }
  3547. spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
  3548. }
  3549. /**
  3550. * css_next_child - find the next child of a given css
  3551. * @pos: the current position (%NULL to initiate traversal)
  3552. * @parent: css whose children to walk
  3553. *
  3554. * This function returns the next child of @parent and should be called
  3555. * under either cgroup_mutex or RCU read lock. The only requirement is
  3556. * that @parent and @pos are accessible. The next sibling is guaranteed to
  3557. * be returned regardless of their states.
  3558. *
  3559. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  3560. * css which finished ->css_online() is guaranteed to be visible in the
  3561. * future iterations and will stay visible until the last reference is put.
  3562. * A css which hasn't finished ->css_online() or already finished
  3563. * ->css_offline() may show up during traversal. It's each subsystem's
  3564. * responsibility to synchronize against on/offlining.
  3565. */
  3566. struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
  3567. struct cgroup_subsys_state *parent)
  3568. {
  3569. struct cgroup_subsys_state *next;
  3570. cgroup_assert_mutex_or_rcu_locked();
  3571. /*
  3572. * @pos could already have been unlinked from the sibling list.
  3573. * Once a cgroup is removed, its ->sibling.next is no longer
  3574. * updated when its next sibling changes. CSS_RELEASED is set when
  3575. * @pos is taken off list, at which time its next pointer is valid,
  3576. * and, as releases are serialized, the one pointed to by the next
  3577. * pointer is guaranteed to not have started release yet. This
  3578. * implies that if we observe !CSS_RELEASED on @pos in this RCU
  3579. * critical section, the one pointed to by its next pointer is
  3580. * guaranteed to not have finished its RCU grace period even if we
  3581. * have dropped rcu_read_lock() inbetween iterations.
  3582. *
  3583. * If @pos has CSS_RELEASED set, its next pointer can't be
  3584. * dereferenced; however, as each css is given a monotonically
  3585. * increasing unique serial number and always appended to the
  3586. * sibling list, the next one can be found by walking the parent's
  3587. * children until the first css with higher serial number than
  3588. * @pos's. While this path can be slower, it happens iff iteration
  3589. * races against release and the race window is very small.
  3590. */
  3591. if (!pos) {
  3592. next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
  3593. } else if (likely(!(pos->flags & CSS_RELEASED))) {
  3594. next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
  3595. } else {
  3596. list_for_each_entry_rcu(next, &parent->children, sibling,
  3597. lockdep_is_held(&cgroup_mutex))
  3598. if (next->serial_nr > pos->serial_nr)
  3599. break;
  3600. }
  3601. /*
  3602. * @next, if not pointing to the head, can be dereferenced and is
  3603. * the next sibling.
  3604. */
  3605. if (&next->sibling != &parent->children)
  3606. return next;
  3607. return NULL;
  3608. }
  3609. EXPORT_SYMBOL_GPL(css_next_child);
  3610. /**
  3611. * css_next_descendant_pre - find the next descendant for pre-order walk
  3612. * @pos: the current position (%NULL to initiate traversal)
  3613. * @root: css whose descendants to walk
  3614. *
  3615. * To be used by css_for_each_descendant_pre(). Find the next descendant
  3616. * to visit for pre-order traversal of @root's descendants. @root is
  3617. * included in the iteration and the first node to be visited.
  3618. *
  3619. * While this function requires cgroup_mutex or RCU read locking, it
  3620. * doesn't require the whole traversal to be contained in a single critical
  3621. * section. This function will return the correct next descendant as long
  3622. * as both @pos and @root are accessible and @pos is a descendant of @root.
  3623. *
  3624. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  3625. * css which finished ->css_online() is guaranteed to be visible in the
  3626. * future iterations and will stay visible until the last reference is put.
  3627. * A css which hasn't finished ->css_online() or already finished
  3628. * ->css_offline() may show up during traversal. It's each subsystem's
  3629. * responsibility to synchronize against on/offlining.
  3630. */
  3631. struct cgroup_subsys_state *
  3632. css_next_descendant_pre(struct cgroup_subsys_state *pos,
  3633. struct cgroup_subsys_state *root)
  3634. {
  3635. struct cgroup_subsys_state *next;
  3636. cgroup_assert_mutex_or_rcu_locked();
  3637. /* if first iteration, visit @root */
  3638. if (!pos)
  3639. return root;
  3640. /* visit the first child if exists */
  3641. next = css_next_child(NULL, pos);
  3642. if (next)
  3643. return next;
  3644. /* no child, visit my or the closest ancestor's next sibling */
  3645. while (pos != root) {
  3646. next = css_next_child(pos, pos->parent);
  3647. if (next)
  3648. return next;
  3649. pos = pos->parent;
  3650. }
  3651. return NULL;
  3652. }
  3653. EXPORT_SYMBOL_GPL(css_next_descendant_pre);
  3654. /**
  3655. * css_rightmost_descendant - return the rightmost descendant of a css
  3656. * @pos: css of interest
  3657. *
  3658. * Return the rightmost descendant of @pos. If there's no descendant, @pos
  3659. * is returned. This can be used during pre-order traversal to skip
  3660. * subtree of @pos.
  3661. *
  3662. * While this function requires cgroup_mutex or RCU read locking, it
  3663. * doesn't require the whole traversal to be contained in a single critical
  3664. * section. This function will return the correct rightmost descendant as
  3665. * long as @pos is accessible.
  3666. */
  3667. struct cgroup_subsys_state *
  3668. css_rightmost_descendant(struct cgroup_subsys_state *pos)
  3669. {
  3670. struct cgroup_subsys_state *last, *tmp;
  3671. cgroup_assert_mutex_or_rcu_locked();
  3672. do {
  3673. last = pos;
  3674. /* ->prev isn't RCU safe, walk ->next till the end */
  3675. pos = NULL;
  3676. css_for_each_child(tmp, last)
  3677. pos = tmp;
  3678. } while (pos);
  3679. return last;
  3680. }
  3681. static struct cgroup_subsys_state *
  3682. css_leftmost_descendant(struct cgroup_subsys_state *pos)
  3683. {
  3684. struct cgroup_subsys_state *last;
  3685. do {
  3686. last = pos;
  3687. pos = css_next_child(NULL, pos);
  3688. } while (pos);
  3689. return last;
  3690. }
  3691. /**
  3692. * css_next_descendant_post - find the next descendant for post-order walk
  3693. * @pos: the current position (%NULL to initiate traversal)
  3694. * @root: css whose descendants to walk
  3695. *
  3696. * To be used by css_for_each_descendant_post(). Find the next descendant
  3697. * to visit for post-order traversal of @root's descendants. @root is
  3698. * included in the iteration and the last node to be visited.
  3699. *
  3700. * While this function requires cgroup_mutex or RCU read locking, it
  3701. * doesn't require the whole traversal to be contained in a single critical
  3702. * section. This function will return the correct next descendant as long
  3703. * as both @pos and @cgroup are accessible and @pos is a descendant of
  3704. * @cgroup.
  3705. *
  3706. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  3707. * css which finished ->css_online() is guaranteed to be visible in the
  3708. * future iterations and will stay visible until the last reference is put.
  3709. * A css which hasn't finished ->css_online() or already finished
  3710. * ->css_offline() may show up during traversal. It's each subsystem's
  3711. * responsibility to synchronize against on/offlining.
  3712. */
  3713. struct cgroup_subsys_state *
  3714. css_next_descendant_post(struct cgroup_subsys_state *pos,
  3715. struct cgroup_subsys_state *root)
  3716. {
  3717. struct cgroup_subsys_state *next;
  3718. cgroup_assert_mutex_or_rcu_locked();
  3719. /* if first iteration, visit leftmost descendant which may be @root */
  3720. if (!pos)
  3721. return css_leftmost_descendant(root);
  3722. /* if we visited @root, we're done */
  3723. if (pos == root)
  3724. return NULL;
  3725. /* if there's an unvisited sibling, visit its leftmost descendant */
  3726. next = css_next_child(pos, pos->parent);
  3727. if (next)
  3728. return css_leftmost_descendant(next);
  3729. /* no sibling left, visit parent */
  3730. return pos->parent;
  3731. }
  3732. /**
  3733. * css_has_online_children - does a css have online children
  3734. * @css: the target css
  3735. *
  3736. * Returns %true if @css has any online children; otherwise, %false. This
  3737. * function can be called from any context but the caller is responsible
  3738. * for synchronizing against on/offlining as necessary.
  3739. */
  3740. bool css_has_online_children(struct cgroup_subsys_state *css)
  3741. {
  3742. struct cgroup_subsys_state *child;
  3743. bool ret = false;
  3744. rcu_read_lock();
  3745. css_for_each_child(child, css) {
  3746. if (child->flags & CSS_ONLINE) {
  3747. ret = true;
  3748. break;
  3749. }
  3750. }
  3751. rcu_read_unlock();
  3752. return ret;
  3753. }
  3754. static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
  3755. {
  3756. struct list_head *l;
  3757. struct cgrp_cset_link *link;
  3758. struct css_set *cset;
  3759. lockdep_assert_held(&css_set_lock);
  3760. /* find the next threaded cset */
  3761. if (it->tcset_pos) {
  3762. l = it->tcset_pos->next;
  3763. if (l != it->tcset_head) {
  3764. it->tcset_pos = l;
  3765. return container_of(l, struct css_set,
  3766. threaded_csets_node);
  3767. }
  3768. it->tcset_pos = NULL;
  3769. }
  3770. /* find the next cset */
  3771. l = it->cset_pos;
  3772. l = l->next;
  3773. if (l == it->cset_head) {
  3774. it->cset_pos = NULL;
  3775. return NULL;
  3776. }
  3777. if (it->ss) {
  3778. cset = container_of(l, struct css_set, e_cset_node[it->ss->id]);
  3779. } else {
  3780. link = list_entry(l, struct cgrp_cset_link, cset_link);
  3781. cset = link->cset;
  3782. }
  3783. it->cset_pos = l;
  3784. /* initialize threaded css_set walking */
  3785. if (it->flags & CSS_TASK_ITER_THREADED) {
  3786. if (it->cur_dcset)
  3787. put_css_set_locked(it->cur_dcset);
  3788. it->cur_dcset = cset;
  3789. get_css_set(cset);
  3790. it->tcset_head = &cset->threaded_csets;
  3791. it->tcset_pos = &cset->threaded_csets;
  3792. }
  3793. return cset;
  3794. }
  3795. /**
  3796. * css_task_iter_advance_css_set - advance a task itererator to the next css_set
  3797. * @it: the iterator to advance
  3798. *
  3799. * Advance @it to the next css_set to walk.
  3800. */
  3801. static void css_task_iter_advance_css_set(struct css_task_iter *it)
  3802. {
  3803. struct css_set *cset;
  3804. lockdep_assert_held(&css_set_lock);
  3805. /* Advance to the next non-empty css_set and find first non-empty tasks list*/
  3806. while ((cset = css_task_iter_next_css_set(it))) {
  3807. if (!list_empty(&cset->tasks)) {
  3808. it->cur_tasks_head = &cset->tasks;
  3809. break;
  3810. } else if (!list_empty(&cset->mg_tasks)) {
  3811. it->cur_tasks_head = &cset->mg_tasks;
  3812. break;
  3813. } else if (!list_empty(&cset->dying_tasks)) {
  3814. it->cur_tasks_head = &cset->dying_tasks;
  3815. break;
  3816. }
  3817. }
  3818. if (!cset) {
  3819. it->task_pos = NULL;
  3820. return;
  3821. }
  3822. it->task_pos = it->cur_tasks_head->next;
  3823. /*
  3824. * We don't keep css_sets locked across iteration steps and thus
  3825. * need to take steps to ensure that iteration can be resumed after
  3826. * the lock is re-acquired. Iteration is performed at two levels -
  3827. * css_sets and tasks in them.
  3828. *
  3829. * Once created, a css_set never leaves its cgroup lists, so a
  3830. * pinned css_set is guaranteed to stay put and we can resume
  3831. * iteration afterwards.
  3832. *
  3833. * Tasks may leave @cset across iteration steps. This is resolved
  3834. * by registering each iterator with the css_set currently being
  3835. * walked and making css_set_move_task() advance iterators whose
  3836. * next task is leaving.
  3837. */
  3838. if (it->cur_cset) {
  3839. list_del(&it->iters_node);
  3840. put_css_set_locked(it->cur_cset);
  3841. }
  3842. get_css_set(cset);
  3843. it->cur_cset = cset;
  3844. list_add(&it->iters_node, &cset->task_iters);
  3845. }
  3846. static void css_task_iter_skip(struct css_task_iter *it,
  3847. struct task_struct *task)
  3848. {
  3849. lockdep_assert_held(&css_set_lock);
  3850. if (it->task_pos == &task->cg_list) {
  3851. it->task_pos = it->task_pos->next;
  3852. it->flags |= CSS_TASK_ITER_SKIPPED;
  3853. }
  3854. }
  3855. static void css_task_iter_advance(struct css_task_iter *it)
  3856. {
  3857. struct task_struct *task;
  3858. lockdep_assert_held(&css_set_lock);
  3859. repeat:
  3860. if (it->task_pos) {
  3861. /*
  3862. * Advance iterator to find next entry. We go through cset
  3863. * tasks, mg_tasks and dying_tasks, when consumed we move onto
  3864. * the next cset.
  3865. */
  3866. if (it->flags & CSS_TASK_ITER_SKIPPED)
  3867. it->flags &= ~CSS_TASK_ITER_SKIPPED;
  3868. else
  3869. it->task_pos = it->task_pos->next;
  3870. if (it->task_pos == &it->cur_cset->tasks) {
  3871. it->cur_tasks_head = &it->cur_cset->mg_tasks;
  3872. it->task_pos = it->cur_tasks_head->next;
  3873. }
  3874. if (it->task_pos == &it->cur_cset->mg_tasks) {
  3875. it->cur_tasks_head = &it->cur_cset->dying_tasks;
  3876. it->task_pos = it->cur_tasks_head->next;
  3877. }
  3878. if (it->task_pos == &it->cur_cset->dying_tasks)
  3879. css_task_iter_advance_css_set(it);
  3880. } else {
  3881. /* called from start, proceed to the first cset */
  3882. css_task_iter_advance_css_set(it);
  3883. }
  3884. if (!it->task_pos)
  3885. return;
  3886. task = list_entry(it->task_pos, struct task_struct, cg_list);
  3887. if (it->flags & CSS_TASK_ITER_PROCS) {
  3888. /* if PROCS, skip over tasks which aren't group leaders */
  3889. if (!thread_group_leader(task))
  3890. goto repeat;
  3891. /* and dying leaders w/o live member threads */
  3892. if (it->cur_tasks_head == &it->cur_cset->dying_tasks &&
  3893. !atomic_read(&task->signal->live))
  3894. goto repeat;
  3895. } else {
  3896. /* skip all dying ones */
  3897. if (it->cur_tasks_head == &it->cur_cset->dying_tasks)
  3898. goto repeat;
  3899. }
  3900. }
  3901. /**
  3902. * css_task_iter_start - initiate task iteration
  3903. * @css: the css to walk tasks of
  3904. * @flags: CSS_TASK_ITER_* flags
  3905. * @it: the task iterator to use
  3906. *
  3907. * Initiate iteration through the tasks of @css. The caller can call
  3908. * css_task_iter_next() to walk through the tasks until the function
  3909. * returns NULL. On completion of iteration, css_task_iter_end() must be
  3910. * called.
  3911. */
  3912. void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
  3913. struct css_task_iter *it)
  3914. {
  3915. memset(it, 0, sizeof(*it));
  3916. spin_lock_irq(&css_set_lock);
  3917. it->ss = css->ss;
  3918. it->flags = flags;
  3919. if (it->ss)
  3920. it->cset_pos = &css->cgroup->e_csets[css->ss->id];
  3921. else
  3922. it->cset_pos = &css->cgroup->cset_links;
  3923. it->cset_head = it->cset_pos;
  3924. css_task_iter_advance(it);
  3925. spin_unlock_irq(&css_set_lock);
  3926. }
  3927. /**
  3928. * css_task_iter_next - return the next task for the iterator
  3929. * @it: the task iterator being iterated
  3930. *
  3931. * The "next" function for task iteration. @it should have been
  3932. * initialized via css_task_iter_start(). Returns NULL when the iteration
  3933. * reaches the end.
  3934. */
  3935. struct task_struct *css_task_iter_next(struct css_task_iter *it)
  3936. {
  3937. if (it->cur_task) {
  3938. put_task_struct(it->cur_task);
  3939. it->cur_task = NULL;
  3940. }
  3941. spin_lock_irq(&css_set_lock);
  3942. /* @it may be half-advanced by skips, finish advancing */
  3943. if (it->flags & CSS_TASK_ITER_SKIPPED)
  3944. css_task_iter_advance(it);
  3945. if (it->task_pos) {
  3946. it->cur_task = list_entry(it->task_pos, struct task_struct,
  3947. cg_list);
  3948. get_task_struct(it->cur_task);
  3949. css_task_iter_advance(it);
  3950. }
  3951. spin_unlock_irq(&css_set_lock);
  3952. return it->cur_task;
  3953. }
  3954. /**
  3955. * css_task_iter_end - finish task iteration
  3956. * @it: the task iterator to finish
  3957. *
  3958. * Finish task iteration started by css_task_iter_start().
  3959. */
  3960. void css_task_iter_end(struct css_task_iter *it)
  3961. {
  3962. if (it->cur_cset) {
  3963. spin_lock_irq(&css_set_lock);
  3964. list_del(&it->iters_node);
  3965. put_css_set_locked(it->cur_cset);
  3966. spin_unlock_irq(&css_set_lock);
  3967. }
  3968. if (it->cur_dcset)
  3969. put_css_set(it->cur_dcset);
  3970. if (it->cur_task)
  3971. put_task_struct(it->cur_task);
  3972. }
  3973. static void cgroup_procs_release(struct kernfs_open_file *of)
  3974. {
  3975. struct cgroup_file_ctx *ctx = of->priv;
  3976. if (ctx->procs.started)
  3977. css_task_iter_end(&ctx->procs.iter);
  3978. }
  3979. static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
  3980. {
  3981. struct kernfs_open_file *of = s->private;
  3982. struct cgroup_file_ctx *ctx = of->priv;
  3983. if (pos)
  3984. (*pos)++;
  3985. return css_task_iter_next(&ctx->procs.iter);
  3986. }
  3987. static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
  3988. unsigned int iter_flags)
  3989. {
  3990. struct kernfs_open_file *of = s->private;
  3991. struct cgroup *cgrp = seq_css(s)->cgroup;
  3992. struct cgroup_file_ctx *ctx = of->priv;
  3993. struct css_task_iter *it = &ctx->procs.iter;
  3994. /*
  3995. * When a seq_file is seeked, it's always traversed sequentially
  3996. * from position 0, so we can simply keep iterating on !0 *pos.
  3997. */
  3998. if (!ctx->procs.started) {
  3999. if (WARN_ON_ONCE((*pos)))
  4000. return ERR_PTR(-EINVAL);
  4001. css_task_iter_start(&cgrp->self, iter_flags, it);
  4002. ctx->procs.started = true;
  4003. } else if (!(*pos)) {
  4004. css_task_iter_end(it);
  4005. css_task_iter_start(&cgrp->self, iter_flags, it);
  4006. } else
  4007. return it->cur_task;
  4008. return cgroup_procs_next(s, NULL, NULL);
  4009. }
  4010. static void *cgroup_procs_start(struct seq_file *s, loff_t *pos)
  4011. {
  4012. struct cgroup *cgrp = seq_css(s)->cgroup;
  4013. /*
  4014. * All processes of a threaded subtree belong to the domain cgroup
  4015. * of the subtree. Only threads can be distributed across the
  4016. * subtree. Reject reads on cgroup.procs in the subtree proper.
  4017. * They're always empty anyway.
  4018. */
  4019. if (cgroup_is_threaded(cgrp))
  4020. return ERR_PTR(-EOPNOTSUPP);
  4021. return __cgroup_procs_start(s, pos, CSS_TASK_ITER_PROCS |
  4022. CSS_TASK_ITER_THREADED);
  4023. }
  4024. static int cgroup_procs_show(struct seq_file *s, void *v)
  4025. {
  4026. seq_printf(s, "%d\n", task_pid_vnr(v));
  4027. return 0;
  4028. }
  4029. static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
  4030. {
  4031. int ret;
  4032. struct inode *inode;
  4033. lockdep_assert_held(&cgroup_mutex);
  4034. inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
  4035. if (!inode)
  4036. return -ENOMEM;
  4037. ret = inode_permission(inode, MAY_WRITE);
  4038. iput(inode);
  4039. return ret;
  4040. }
  4041. static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
  4042. struct cgroup *dst_cgrp,
  4043. struct super_block *sb,
  4044. struct cgroup_namespace *ns)
  4045. {
  4046. struct cgroup *com_cgrp = src_cgrp;
  4047. int ret;
  4048. lockdep_assert_held(&cgroup_mutex);
  4049. /* find the common ancestor */
  4050. while (!cgroup_is_descendant(dst_cgrp, com_cgrp))
  4051. com_cgrp = cgroup_parent(com_cgrp);
  4052. /* %current should be authorized to migrate to the common ancestor */
  4053. ret = cgroup_may_write(com_cgrp, sb);
  4054. if (ret)
  4055. return ret;
  4056. /*
  4057. * If namespaces are delegation boundaries, %current must be able
  4058. * to see both source and destination cgroups from its namespace.
  4059. */
  4060. if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) &&
  4061. (!cgroup_is_descendant(src_cgrp, ns->root_cset->dfl_cgrp) ||
  4062. !cgroup_is_descendant(dst_cgrp, ns->root_cset->dfl_cgrp)))
  4063. return -ENOENT;
  4064. return 0;
  4065. }
  4066. static int cgroup_attach_permissions(struct cgroup *src_cgrp,
  4067. struct cgroup *dst_cgrp,
  4068. struct super_block *sb, bool threadgroup,
  4069. struct cgroup_namespace *ns)
  4070. {
  4071. int ret = 0;
  4072. ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns);
  4073. if (ret)
  4074. return ret;
  4075. ret = cgroup_migrate_vet_dst(dst_cgrp);
  4076. if (ret)
  4077. return ret;
  4078. if (!threadgroup && (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp))
  4079. ret = -EOPNOTSUPP;
  4080. return ret;
  4081. }
  4082. static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
  4083. char *buf, size_t nbytes, loff_t off)
  4084. {
  4085. struct cgroup_file_ctx *ctx = of->priv;
  4086. struct cgroup *src_cgrp, *dst_cgrp;
  4087. struct task_struct *task;
  4088. const struct cred *saved_cred;
  4089. ssize_t ret;
  4090. bool locked;
  4091. dst_cgrp = cgroup_kn_lock_live(of->kn, false);
  4092. if (!dst_cgrp)
  4093. return -ENODEV;
  4094. task = cgroup_procs_write_start(buf, true, &locked, dst_cgrp);
  4095. ret = PTR_ERR_OR_ZERO(task);
  4096. if (ret)
  4097. goto out_unlock;
  4098. /* find the source cgroup */
  4099. spin_lock_irq(&css_set_lock);
  4100. src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
  4101. spin_unlock_irq(&css_set_lock);
  4102. /*
  4103. * Process and thread migrations follow same delegation rule. Check
  4104. * permissions using the credentials from file open to protect against
  4105. * inherited fd attacks.
  4106. */
  4107. saved_cred = override_creds(of->file->f_cred);
  4108. ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
  4109. of->file->f_path.dentry->d_sb, true,
  4110. ctx->ns);
  4111. revert_creds(saved_cred);
  4112. if (ret)
  4113. goto out_finish;
  4114. ret = cgroup_attach_task(dst_cgrp, task, true);
  4115. out_finish:
  4116. cgroup_procs_write_finish(task, locked);
  4117. out_unlock:
  4118. cgroup_kn_unlock(of->kn);
  4119. return ret ?: nbytes;
  4120. }
  4121. static void *cgroup_threads_start(struct seq_file *s, loff_t *pos)
  4122. {
  4123. return __cgroup_procs_start(s, pos, 0);
  4124. }
  4125. static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
  4126. char *buf, size_t nbytes, loff_t off)
  4127. {
  4128. struct cgroup_file_ctx *ctx = of->priv;
  4129. struct cgroup *src_cgrp, *dst_cgrp;
  4130. struct task_struct *task;
  4131. const struct cred *saved_cred;
  4132. ssize_t ret;
  4133. bool locked;
  4134. buf = strstrip(buf);
  4135. dst_cgrp = cgroup_kn_lock_live(of->kn, false);
  4136. if (!dst_cgrp)
  4137. return -ENODEV;
  4138. task = cgroup_procs_write_start(buf, false, &locked, dst_cgrp);
  4139. ret = PTR_ERR_OR_ZERO(task);
  4140. if (ret)
  4141. goto out_unlock;
  4142. /* find the source cgroup */
  4143. spin_lock_irq(&css_set_lock);
  4144. src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
  4145. spin_unlock_irq(&css_set_lock);
  4146. /*
  4147. * Process and thread migrations follow same delegation rule. Check
  4148. * permissions using the credentials from file open to protect against
  4149. * inherited fd attacks.
  4150. */
  4151. saved_cred = override_creds(of->file->f_cred);
  4152. ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
  4153. of->file->f_path.dentry->d_sb, false,
  4154. ctx->ns);
  4155. revert_creds(saved_cred);
  4156. if (ret)
  4157. goto out_finish;
  4158. ret = cgroup_attach_task(dst_cgrp, task, false);
  4159. out_finish:
  4160. cgroup_procs_write_finish(task, locked);
  4161. out_unlock:
  4162. cgroup_kn_unlock(of->kn);
  4163. return ret ?: nbytes;
  4164. }
  4165. /* cgroup core interface files for the default hierarchy */
  4166. static struct cftype cgroup_base_files[] = {
  4167. {
  4168. .name = "cgroup.type",
  4169. .flags = CFTYPE_NOT_ON_ROOT,
  4170. .seq_show = cgroup_type_show,
  4171. .write = cgroup_type_write,
  4172. },
  4173. {
  4174. .name = "cgroup.procs",
  4175. .flags = CFTYPE_NS_DELEGATABLE,
  4176. .file_offset = offsetof(struct cgroup, procs_file),
  4177. .release = cgroup_procs_release,
  4178. .seq_start = cgroup_procs_start,
  4179. .seq_next = cgroup_procs_next,
  4180. .seq_show = cgroup_procs_show,
  4181. .write = cgroup_procs_write,
  4182. },
  4183. {
  4184. .name = "cgroup.threads",
  4185. .flags = CFTYPE_NS_DELEGATABLE,
  4186. .release = cgroup_procs_release,
  4187. .seq_start = cgroup_threads_start,
  4188. .seq_next = cgroup_procs_next,
  4189. .seq_show = cgroup_procs_show,
  4190. .write = cgroup_threads_write,
  4191. },
  4192. {
  4193. .name = "cgroup.controllers",
  4194. .seq_show = cgroup_controllers_show,
  4195. },
  4196. {
  4197. .name = "cgroup.subtree_control",
  4198. .flags = CFTYPE_NS_DELEGATABLE,
  4199. .seq_show = cgroup_subtree_control_show,
  4200. .write = cgroup_subtree_control_write,
  4201. },
  4202. {
  4203. .name = "cgroup.events",
  4204. .flags = CFTYPE_NOT_ON_ROOT,
  4205. .file_offset = offsetof(struct cgroup, events_file),
  4206. .seq_show = cgroup_events_show,
  4207. },
  4208. {
  4209. .name = "cgroup.max.descendants",
  4210. .seq_show = cgroup_max_descendants_show,
  4211. .write = cgroup_max_descendants_write,
  4212. },
  4213. {
  4214. .name = "cgroup.max.depth",
  4215. .seq_show = cgroup_max_depth_show,
  4216. .write = cgroup_max_depth_write,
  4217. },
  4218. {
  4219. .name = "cgroup.stat",
  4220. .seq_show = cgroup_stat_show,
  4221. },
  4222. {
  4223. .name = "cgroup.freeze",
  4224. .flags = CFTYPE_NOT_ON_ROOT,
  4225. .seq_show = cgroup_freeze_show,
  4226. .write = cgroup_freeze_write,
  4227. },
  4228. {
  4229. .name = "cpu.stat",
  4230. .seq_show = cpu_stat_show,
  4231. },
  4232. #ifdef CONFIG_PSI
  4233. {
  4234. .name = "io.pressure",
  4235. .flags = CFTYPE_PRESSURE,
  4236. .seq_show = cgroup_io_pressure_show,
  4237. .write = cgroup_io_pressure_write,
  4238. .poll = cgroup_pressure_poll,
  4239. .release = cgroup_pressure_release,
  4240. },
  4241. {
  4242. .name = "memory.pressure",
  4243. .flags = CFTYPE_PRESSURE,
  4244. .seq_show = cgroup_memory_pressure_show,
  4245. .write = cgroup_memory_pressure_write,
  4246. .poll = cgroup_pressure_poll,
  4247. .release = cgroup_pressure_release,
  4248. },
  4249. {
  4250. .name = "cpu.pressure",
  4251. .flags = CFTYPE_PRESSURE,
  4252. .seq_show = cgroup_cpu_pressure_show,
  4253. .write = cgroup_cpu_pressure_write,
  4254. .poll = cgroup_pressure_poll,
  4255. .release = cgroup_pressure_release,
  4256. },
  4257. #endif /* CONFIG_PSI */
  4258. { } /* terminate */
  4259. };
  4260. /*
  4261. * css destruction is four-stage process.
  4262. *
  4263. * 1. Destruction starts. Killing of the percpu_ref is initiated.
  4264. * Implemented in kill_css().
  4265. *
  4266. * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
  4267. * and thus css_tryget_online() is guaranteed to fail, the css can be
  4268. * offlined by invoking offline_css(). After offlining, the base ref is
  4269. * put. Implemented in css_killed_work_fn().
  4270. *
  4271. * 3. When the percpu_ref reaches zero, the only possible remaining
  4272. * accessors are inside RCU read sections. css_release() schedules the
  4273. * RCU callback.
  4274. *
  4275. * 4. After the grace period, the css can be freed. Implemented in
  4276. * css_free_work_fn().
  4277. *
  4278. * It is actually hairier because both step 2 and 4 require process context
  4279. * and thus involve punting to css->destroy_work adding two additional
  4280. * steps to the already complex sequence.
  4281. */
  4282. static void css_free_rwork_fn(struct work_struct *work)
  4283. {
  4284. struct cgroup_subsys_state *css = container_of(to_rcu_work(work),
  4285. struct cgroup_subsys_state, destroy_rwork);
  4286. struct cgroup_subsys *ss = css->ss;
  4287. struct cgroup *cgrp = css->cgroup;
  4288. percpu_ref_exit(&css->refcnt);
  4289. if (ss) {
  4290. /* css free path */
  4291. struct cgroup_subsys_state *parent = css->parent;
  4292. int id = css->id;
  4293. ss->css_free(css);
  4294. cgroup_idr_remove(&ss->css_idr, id);
  4295. cgroup_put(cgrp);
  4296. if (parent)
  4297. css_put(parent);
  4298. } else {
  4299. /* cgroup free path */
  4300. atomic_dec(&cgrp->root->nr_cgrps);
  4301. cgroup1_pidlist_destroy_all(cgrp);
  4302. cancel_work_sync(&cgrp->release_agent_work);
  4303. if (cgroup_parent(cgrp)) {
  4304. /*
  4305. * We get a ref to the parent, and put the ref when
  4306. * this cgroup is being freed, so it's guaranteed
  4307. * that the parent won't be destroyed before its
  4308. * children.
  4309. */
  4310. cgroup_put(cgroup_parent(cgrp));
  4311. kernfs_put(cgrp->kn);
  4312. psi_cgroup_free(cgrp);
  4313. if (cgroup_on_dfl(cgrp))
  4314. cgroup_rstat_exit(cgrp);
  4315. kfree(cgrp);
  4316. } else {
  4317. /*
  4318. * This is root cgroup's refcnt reaching zero,
  4319. * which indicates that the root should be
  4320. * released.
  4321. */
  4322. cgroup_destroy_root(cgrp->root);
  4323. }
  4324. }
  4325. }
  4326. static void css_release_work_fn(struct work_struct *work)
  4327. {
  4328. struct cgroup_subsys_state *css =
  4329. container_of(work, struct cgroup_subsys_state, destroy_work);
  4330. struct cgroup_subsys *ss = css->ss;
  4331. struct cgroup *cgrp = css->cgroup;
  4332. mutex_lock(&cgroup_mutex);
  4333. css->flags |= CSS_RELEASED;
  4334. list_del_rcu(&css->sibling);
  4335. if (ss) {
  4336. /* css release path */
  4337. if (!list_empty(&css->rstat_css_node)) {
  4338. cgroup_rstat_flush(cgrp);
  4339. list_del_rcu(&css->rstat_css_node);
  4340. }
  4341. cgroup_idr_replace(&ss->css_idr, NULL, css->id);
  4342. if (ss->css_released)
  4343. ss->css_released(css);
  4344. } else {
  4345. struct cgroup *tcgrp;
  4346. /* cgroup release path */
  4347. TRACE_CGROUP_PATH(release, cgrp);
  4348. if (cgroup_on_dfl(cgrp))
  4349. cgroup_rstat_flush(cgrp);
  4350. spin_lock_irq(&css_set_lock);
  4351. for (tcgrp = cgroup_parent(cgrp); tcgrp;
  4352. tcgrp = cgroup_parent(tcgrp))
  4353. tcgrp->nr_dying_descendants--;
  4354. spin_unlock_irq(&css_set_lock);
  4355. /*
  4356. * There are two control paths which try to determine
  4357. * cgroup from dentry without going through kernfs -
  4358. * cgroupstats_build() and css_tryget_online_from_dir().
  4359. * Those are supported by RCU protecting clearing of
  4360. * cgrp->kn->priv backpointer.
  4361. */
  4362. if (cgrp->kn)
  4363. RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
  4364. NULL);
  4365. }
  4366. mutex_unlock(&cgroup_mutex);
  4367. INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
  4368. queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
  4369. }
  4370. static void css_release(struct percpu_ref *ref)
  4371. {
  4372. struct cgroup_subsys_state *css =
  4373. container_of(ref, struct cgroup_subsys_state, refcnt);
  4374. INIT_WORK(&css->destroy_work, css_release_work_fn);
  4375. queue_work(cgroup_destroy_wq, &css->destroy_work);
  4376. }
  4377. static void init_and_link_css(struct cgroup_subsys_state *css,
  4378. struct cgroup_subsys *ss, struct cgroup *cgrp)
  4379. {
  4380. lockdep_assert_held(&cgroup_mutex);
  4381. cgroup_get_live(cgrp);
  4382. memset(css, 0, sizeof(*css));
  4383. css->cgroup = cgrp;
  4384. css->ss = ss;
  4385. css->id = -1;
  4386. INIT_LIST_HEAD(&css->sibling);
  4387. INIT_LIST_HEAD(&css->children);
  4388. INIT_LIST_HEAD(&css->rstat_css_node);
  4389. css->serial_nr = css_serial_nr_next++;
  4390. atomic_set(&css->online_cnt, 0);
  4391. if (cgroup_parent(cgrp)) {
  4392. css->parent = cgroup_css(cgroup_parent(cgrp), ss);
  4393. css_get(css->parent);
  4394. }
  4395. if (cgroup_on_dfl(cgrp) && ss->css_rstat_flush)
  4396. list_add_rcu(&css->rstat_css_node, &cgrp->rstat_css_list);
  4397. BUG_ON(cgroup_css(cgrp, ss));
  4398. }
  4399. /* invoke ->css_online() on a new CSS and mark it online if successful */
  4400. static int online_css(struct cgroup_subsys_state *css)
  4401. {
  4402. struct cgroup_subsys *ss = css->ss;
  4403. int ret = 0;
  4404. lockdep_assert_held(&cgroup_mutex);
  4405. if (ss->css_online)
  4406. ret = ss->css_online(css);
  4407. if (!ret) {
  4408. css->flags |= CSS_ONLINE;
  4409. rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
  4410. atomic_inc(&css->online_cnt);
  4411. if (css->parent)
  4412. atomic_inc(&css->parent->online_cnt);
  4413. }
  4414. return ret;
  4415. }
  4416. /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
  4417. static void offline_css(struct cgroup_subsys_state *css)
  4418. {
  4419. struct cgroup_subsys *ss = css->ss;
  4420. lockdep_assert_held(&cgroup_mutex);
  4421. if (!(css->flags & CSS_ONLINE))
  4422. return;
  4423. if (ss->css_offline)
  4424. ss->css_offline(css);
  4425. css->flags &= ~CSS_ONLINE;
  4426. RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
  4427. wake_up_all(&css->cgroup->offline_waitq);
  4428. }
  4429. /**
  4430. * css_create - create a cgroup_subsys_state
  4431. * @cgrp: the cgroup new css will be associated with
  4432. * @ss: the subsys of new css
  4433. *
  4434. * Create a new css associated with @cgrp - @ss pair. On success, the new
  4435. * css is online and installed in @cgrp. This function doesn't create the
  4436. * interface files. Returns 0 on success, -errno on failure.
  4437. */
  4438. static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
  4439. struct cgroup_subsys *ss)
  4440. {
  4441. struct cgroup *parent = cgroup_parent(cgrp);
  4442. struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
  4443. struct cgroup_subsys_state *css;
  4444. int err;
  4445. lockdep_assert_held(&cgroup_mutex);
  4446. css = ss->css_alloc(parent_css);
  4447. if (!css)
  4448. css = ERR_PTR(-ENOMEM);
  4449. if (IS_ERR(css))
  4450. return css;
  4451. init_and_link_css(css, ss, cgrp);
  4452. err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
  4453. if (err)
  4454. goto err_free_css;
  4455. err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
  4456. if (err < 0)
  4457. goto err_free_css;
  4458. css->id = err;
  4459. /* @css is ready to be brought online now, make it visible */
  4460. list_add_tail_rcu(&css->sibling, &parent_css->children);
  4461. cgroup_idr_replace(&ss->css_idr, css, css->id);
  4462. err = online_css(css);
  4463. if (err)
  4464. goto err_list_del;
  4465. if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
  4466. cgroup_parent(parent)) {
  4467. pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
  4468. current->comm, current->pid, ss->name);
  4469. if (!strcmp(ss->name, "memory"))
  4470. pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
  4471. ss->warned_broken_hierarchy = true;
  4472. }
  4473. return css;
  4474. err_list_del:
  4475. list_del_rcu(&css->sibling);
  4476. err_free_css:
  4477. list_del_rcu(&css->rstat_css_node);
  4478. INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
  4479. queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
  4480. return ERR_PTR(err);
  4481. }
  4482. /*
  4483. * The returned cgroup is fully initialized including its control mask, but
  4484. * it isn't associated with its kernfs_node and doesn't have the control
  4485. * mask applied.
  4486. */
  4487. static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
  4488. umode_t mode)
  4489. {
  4490. struct cgroup_root *root = parent->root;
  4491. struct cgroup *cgrp, *tcgrp;
  4492. struct kernfs_node *kn;
  4493. int level = parent->level + 1;
  4494. int ret;
  4495. /* allocate the cgroup and its ID, 0 is reserved for the root */
  4496. cgrp = kzalloc(struct_size(cgrp, ancestor_ids, (level + 1)),
  4497. GFP_KERNEL);
  4498. if (!cgrp)
  4499. return ERR_PTR(-ENOMEM);
  4500. ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
  4501. if (ret)
  4502. goto out_free_cgrp;
  4503. if (cgroup_on_dfl(parent)) {
  4504. ret = cgroup_rstat_init(cgrp);
  4505. if (ret)
  4506. goto out_cancel_ref;
  4507. }
  4508. /* create the directory */
  4509. kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
  4510. if (IS_ERR(kn)) {
  4511. ret = PTR_ERR(kn);
  4512. goto out_stat_exit;
  4513. }
  4514. cgrp->kn = kn;
  4515. init_cgroup_housekeeping(cgrp);
  4516. cgrp->self.parent = &parent->self;
  4517. cgrp->root = root;
  4518. cgrp->level = level;
  4519. ret = psi_cgroup_alloc(cgrp);
  4520. if (ret)
  4521. goto out_kernfs_remove;
  4522. ret = cgroup_bpf_inherit(cgrp);
  4523. if (ret)
  4524. goto out_psi_free;
  4525. /*
  4526. * New cgroup inherits effective freeze counter, and
  4527. * if the parent has to be frozen, the child has too.
  4528. */
  4529. cgrp->freezer.e_freeze = parent->freezer.e_freeze;
  4530. if (cgrp->freezer.e_freeze) {
  4531. /*
  4532. * Set the CGRP_FREEZE flag, so when a process will be
  4533. * attached to the child cgroup, it will become frozen.
  4534. * At this point the new cgroup is unpopulated, so we can
  4535. * consider it frozen immediately.
  4536. */
  4537. set_bit(CGRP_FREEZE, &cgrp->flags);
  4538. set_bit(CGRP_FROZEN, &cgrp->flags);
  4539. }
  4540. spin_lock_irq(&css_set_lock);
  4541. for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
  4542. cgrp->ancestor_ids[tcgrp->level] = cgroup_id(tcgrp);
  4543. if (tcgrp != cgrp) {
  4544. tcgrp->nr_descendants++;
  4545. /*
  4546. * If the new cgroup is frozen, all ancestor cgroups
  4547. * get a new frozen descendant, but their state can't
  4548. * change because of this.
  4549. */
  4550. if (cgrp->freezer.e_freeze)
  4551. tcgrp->freezer.nr_frozen_descendants++;
  4552. }
  4553. }
  4554. spin_unlock_irq(&css_set_lock);
  4555. if (notify_on_release(parent))
  4556. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  4557. if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
  4558. set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
  4559. cgrp->self.serial_nr = css_serial_nr_next++;
  4560. /* allocation complete, commit to creation */
  4561. list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
  4562. atomic_inc(&root->nr_cgrps);
  4563. cgroup_get_live(parent);
  4564. /*
  4565. * On the default hierarchy, a child doesn't automatically inherit
  4566. * subtree_control from the parent. Each is configured manually.
  4567. */
  4568. if (!cgroup_on_dfl(cgrp))
  4569. cgrp->subtree_control = cgroup_control(cgrp);
  4570. cgroup_propagate_control(cgrp);
  4571. return cgrp;
  4572. out_psi_free:
  4573. psi_cgroup_free(cgrp);
  4574. out_kernfs_remove:
  4575. kernfs_remove(cgrp->kn);
  4576. out_stat_exit:
  4577. if (cgroup_on_dfl(parent))
  4578. cgroup_rstat_exit(cgrp);
  4579. out_cancel_ref:
  4580. percpu_ref_exit(&cgrp->self.refcnt);
  4581. out_free_cgrp:
  4582. kfree(cgrp);
  4583. return ERR_PTR(ret);
  4584. }
  4585. static bool cgroup_check_hierarchy_limits(struct cgroup *parent)
  4586. {
  4587. struct cgroup *cgroup;
  4588. int ret = false;
  4589. int level = 1;
  4590. lockdep_assert_held(&cgroup_mutex);
  4591. for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgroup)) {
  4592. if (cgroup->nr_descendants >= cgroup->max_descendants)
  4593. goto fail;
  4594. if (level > cgroup->max_depth)
  4595. goto fail;
  4596. level++;
  4597. }
  4598. ret = true;
  4599. fail:
  4600. return ret;
  4601. }
  4602. int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
  4603. {
  4604. struct cgroup *parent, *cgrp;
  4605. int ret;
  4606. /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
  4607. if (strchr(name, '\n'))
  4608. return -EINVAL;
  4609. parent = cgroup_kn_lock_live(parent_kn, false);
  4610. if (!parent)
  4611. return -ENODEV;
  4612. if (!cgroup_check_hierarchy_limits(parent)) {
  4613. ret = -EAGAIN;
  4614. goto out_unlock;
  4615. }
  4616. cgrp = cgroup_create(parent, name, mode);
  4617. if (IS_ERR(cgrp)) {
  4618. ret = PTR_ERR(cgrp);
  4619. goto out_unlock;
  4620. }
  4621. /*
  4622. * This extra ref will be put in cgroup_free_fn() and guarantees
  4623. * that @cgrp->kn is always accessible.
  4624. */
  4625. kernfs_get(cgrp->kn);
  4626. ret = cgroup_kn_set_ugid(cgrp->kn);
  4627. if (ret)
  4628. goto out_destroy;
  4629. ret = css_populate_dir(&cgrp->self);
  4630. if (ret)
  4631. goto out_destroy;
  4632. ret = cgroup_apply_control_enable(cgrp);
  4633. if (ret)
  4634. goto out_destroy;
  4635. TRACE_CGROUP_PATH(mkdir, cgrp);
  4636. /* let's create and online css's */
  4637. kernfs_activate(cgrp->kn);
  4638. ret = 0;
  4639. goto out_unlock;
  4640. out_destroy:
  4641. cgroup_destroy_locked(cgrp);
  4642. out_unlock:
  4643. cgroup_kn_unlock(parent_kn);
  4644. return ret;
  4645. }
  4646. /*
  4647. * This is called when the refcnt of a css is confirmed to be killed.
  4648. * css_tryget_online() is now guaranteed to fail. Tell the subsystem to
  4649. * initate destruction and put the css ref from kill_css().
  4650. */
  4651. static void css_killed_work_fn(struct work_struct *work)
  4652. {
  4653. struct cgroup_subsys_state *css =
  4654. container_of(work, struct cgroup_subsys_state, destroy_work);
  4655. mutex_lock(&cgroup_mutex);
  4656. do {
  4657. offline_css(css);
  4658. css_put(css);
  4659. /* @css can't go away while we're holding cgroup_mutex */
  4660. css = css->parent;
  4661. } while (css && atomic_dec_and_test(&css->online_cnt));
  4662. mutex_unlock(&cgroup_mutex);
  4663. }
  4664. /* css kill confirmation processing requires process context, bounce */
  4665. static void css_killed_ref_fn(struct percpu_ref *ref)
  4666. {
  4667. struct cgroup_subsys_state *css =
  4668. container_of(ref, struct cgroup_subsys_state, refcnt);
  4669. if (atomic_dec_and_test(&css->online_cnt)) {
  4670. INIT_WORK(&css->destroy_work, css_killed_work_fn);
  4671. queue_work(cgroup_destroy_wq, &css->destroy_work);
  4672. }
  4673. }
  4674. /**
  4675. * kill_css - destroy a css
  4676. * @css: css to destroy
  4677. *
  4678. * This function initiates destruction of @css by removing cgroup interface
  4679. * files and putting its base reference. ->css_offline() will be invoked
  4680. * asynchronously once css_tryget_online() is guaranteed to fail and when
  4681. * the reference count reaches zero, @css will be released.
  4682. */
  4683. static void kill_css(struct cgroup_subsys_state *css)
  4684. {
  4685. lockdep_assert_held(&cgroup_mutex);
  4686. if (css->flags & CSS_DYING)
  4687. return;
  4688. css->flags |= CSS_DYING;
  4689. /*
  4690. * This must happen before css is disassociated with its cgroup.
  4691. * See seq_css() for details.
  4692. */
  4693. css_clear_dir(css);
  4694. /*
  4695. * Killing would put the base ref, but we need to keep it alive
  4696. * until after ->css_offline().
  4697. */
  4698. css_get(css);
  4699. /*
  4700. * cgroup core guarantees that, by the time ->css_offline() is
  4701. * invoked, no new css reference will be given out via
  4702. * css_tryget_online(). We can't simply call percpu_ref_kill() and
  4703. * proceed to offlining css's because percpu_ref_kill() doesn't
  4704. * guarantee that the ref is seen as killed on all CPUs on return.
  4705. *
  4706. * Use percpu_ref_kill_and_confirm() to get notifications as each
  4707. * css is confirmed to be seen as killed on all CPUs.
  4708. */
  4709. percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
  4710. }
  4711. /**
  4712. * cgroup_destroy_locked - the first stage of cgroup destruction
  4713. * @cgrp: cgroup to be destroyed
  4714. *
  4715. * css's make use of percpu refcnts whose killing latency shouldn't be
  4716. * exposed to userland and are RCU protected. Also, cgroup core needs to
  4717. * guarantee that css_tryget_online() won't succeed by the time
  4718. * ->css_offline() is invoked. To satisfy all the requirements,
  4719. * destruction is implemented in the following two steps.
  4720. *
  4721. * s1. Verify @cgrp can be destroyed and mark it dying. Remove all
  4722. * userland visible parts and start killing the percpu refcnts of
  4723. * css's. Set up so that the next stage will be kicked off once all
  4724. * the percpu refcnts are confirmed to be killed.
  4725. *
  4726. * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
  4727. * rest of destruction. Once all cgroup references are gone, the
  4728. * cgroup is RCU-freed.
  4729. *
  4730. * This function implements s1. After this step, @cgrp is gone as far as
  4731. * the userland is concerned and a new cgroup with the same name may be
  4732. * created. As cgroup doesn't care about the names internally, this
  4733. * doesn't cause any problem.
  4734. */
  4735. static int cgroup_destroy_locked(struct cgroup *cgrp)
  4736. __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
  4737. {
  4738. struct cgroup *tcgrp, *parent = cgroup_parent(cgrp);
  4739. struct cgroup_subsys_state *css;
  4740. struct cgrp_cset_link *link;
  4741. int ssid;
  4742. lockdep_assert_held(&cgroup_mutex);
  4743. /*
  4744. * Only migration can raise populated from zero and we're already
  4745. * holding cgroup_mutex.
  4746. */
  4747. if (cgroup_is_populated(cgrp))
  4748. return -EBUSY;
  4749. /*
  4750. * Make sure there's no live children. We can't test emptiness of
  4751. * ->self.children as dead children linger on it while being
  4752. * drained; otherwise, "rmdir parent/child parent" may fail.
  4753. */
  4754. if (css_has_online_children(&cgrp->self))
  4755. return -EBUSY;
  4756. /*
  4757. * Mark @cgrp and the associated csets dead. The former prevents
  4758. * further task migration and child creation by disabling
  4759. * cgroup_lock_live_group(). The latter makes the csets ignored by
  4760. * the migration path.
  4761. */
  4762. cgrp->self.flags &= ~CSS_ONLINE;
  4763. spin_lock_irq(&css_set_lock);
  4764. list_for_each_entry(link, &cgrp->cset_links, cset_link)
  4765. link->cset->dead = true;
  4766. spin_unlock_irq(&css_set_lock);
  4767. /* initiate massacre of all css's */
  4768. for_each_css(css, ssid, cgrp)
  4769. kill_css(css);
  4770. /* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */
  4771. css_clear_dir(&cgrp->self);
  4772. kernfs_remove(cgrp->kn);
  4773. if (parent && cgroup_is_threaded(cgrp))
  4774. parent->nr_threaded_children--;
  4775. spin_lock_irq(&css_set_lock);
  4776. for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
  4777. tcgrp->nr_descendants--;
  4778. tcgrp->nr_dying_descendants++;
  4779. /*
  4780. * If the dying cgroup is frozen, decrease frozen descendants
  4781. * counters of ancestor cgroups.
  4782. */
  4783. if (test_bit(CGRP_FROZEN, &cgrp->flags))
  4784. tcgrp->freezer.nr_frozen_descendants--;
  4785. }
  4786. spin_unlock_irq(&css_set_lock);
  4787. cgroup1_check_for_release(parent);
  4788. cgroup_bpf_offline(cgrp);
  4789. /* put the base reference */
  4790. percpu_ref_kill(&cgrp->self.refcnt);
  4791. return 0;
  4792. };
  4793. int cgroup_rmdir(struct kernfs_node *kn)
  4794. {
  4795. struct cgroup *cgrp;
  4796. int ret = 0;
  4797. cgrp = cgroup_kn_lock_live(kn, false);
  4798. if (!cgrp)
  4799. return 0;
  4800. ret = cgroup_destroy_locked(cgrp);
  4801. if (!ret)
  4802. TRACE_CGROUP_PATH(rmdir, cgrp);
  4803. cgroup_kn_unlock(kn);
  4804. return ret;
  4805. }
  4806. static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
  4807. .show_options = cgroup_show_options,
  4808. .mkdir = cgroup_mkdir,
  4809. .rmdir = cgroup_rmdir,
  4810. .show_path = cgroup_show_path,
  4811. };
  4812. static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
  4813. {
  4814. struct cgroup_subsys_state *css;
  4815. pr_debug("Initializing cgroup subsys %s\n", ss->name);
  4816. mutex_lock(&cgroup_mutex);
  4817. idr_init(&ss->css_idr);
  4818. INIT_LIST_HEAD(&ss->cfts);
  4819. /* Create the root cgroup state for this subsystem */
  4820. ss->root = &cgrp_dfl_root;
  4821. css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
  4822. /* We don't handle early failures gracefully */
  4823. BUG_ON(IS_ERR(css));
  4824. init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
  4825. /*
  4826. * Root csses are never destroyed and we can't initialize
  4827. * percpu_ref during early init. Disable refcnting.
  4828. */
  4829. css->flags |= CSS_NO_REF;
  4830. if (early) {
  4831. /* allocation can't be done safely during early init */
  4832. css->id = 1;
  4833. } else {
  4834. css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
  4835. BUG_ON(css->id < 0);
  4836. }
  4837. /* Update the init_css_set to contain a subsys
  4838. * pointer to this state - since the subsystem is
  4839. * newly registered, all tasks and hence the
  4840. * init_css_set is in the subsystem's root cgroup. */
  4841. init_css_set.subsys[ss->id] = css;
  4842. have_fork_callback |= (bool)ss->fork << ss->id;
  4843. have_exit_callback |= (bool)ss->exit << ss->id;
  4844. have_release_callback |= (bool)ss->release << ss->id;
  4845. have_canfork_callback |= (bool)ss->can_fork << ss->id;
  4846. /* At system boot, before all subsystems have been
  4847. * registered, no tasks have been forked, so we don't
  4848. * need to invoke fork callbacks here. */
  4849. BUG_ON(!list_empty(&init_task.tasks));
  4850. BUG_ON(online_css(css));
  4851. mutex_unlock(&cgroup_mutex);
  4852. }
  4853. /**
  4854. * cgroup_init_early - cgroup initialization at system boot
  4855. *
  4856. * Initialize cgroups at system boot, and initialize any
  4857. * subsystems that request early init.
  4858. */
  4859. int __init cgroup_init_early(void)
  4860. {
  4861. static struct cgroup_fs_context __initdata ctx;
  4862. struct cgroup_subsys *ss;
  4863. int i;
  4864. ctx.root = &cgrp_dfl_root;
  4865. init_cgroup_root(&ctx);
  4866. cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
  4867. RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
  4868. for_each_subsys(ss, i) {
  4869. WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
  4870. "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
  4871. i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
  4872. ss->id, ss->name);
  4873. WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
  4874. "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
  4875. ss->id = i;
  4876. ss->name = cgroup_subsys_name[i];
  4877. if (!ss->legacy_name)
  4878. ss->legacy_name = cgroup_subsys_name[i];
  4879. if (ss->early_init)
  4880. cgroup_init_subsys(ss, true);
  4881. }
  4882. return 0;
  4883. }
  4884. /**
  4885. * cgroup_init - cgroup initialization
  4886. *
  4887. * Register cgroup filesystem and /proc file, and initialize
  4888. * any subsystems that didn't request early init.
  4889. */
  4890. int __init cgroup_init(void)
  4891. {
  4892. struct cgroup_subsys *ss;
  4893. int ssid;
  4894. BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
  4895. BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
  4896. BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
  4897. cgroup_rstat_boot();
  4898. /*
  4899. * The latency of the synchronize_rcu() is too high for cgroups,
  4900. * avoid it at the cost of forcing all readers into the slow path.
  4901. */
  4902. rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
  4903. get_user_ns(init_cgroup_ns.user_ns);
  4904. mutex_lock(&cgroup_mutex);
  4905. /*
  4906. * Add init_css_set to the hash table so that dfl_root can link to
  4907. * it during init.
  4908. */
  4909. hash_add(css_set_table, &init_css_set.hlist,
  4910. css_set_hash(init_css_set.subsys));
  4911. BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
  4912. mutex_unlock(&cgroup_mutex);
  4913. for_each_subsys(ss, ssid) {
  4914. if (ss->early_init) {
  4915. struct cgroup_subsys_state *css =
  4916. init_css_set.subsys[ss->id];
  4917. css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
  4918. GFP_KERNEL);
  4919. BUG_ON(css->id < 0);
  4920. } else {
  4921. cgroup_init_subsys(ss, false);
  4922. }
  4923. list_add_tail(&init_css_set.e_cset_node[ssid],
  4924. &cgrp_dfl_root.cgrp.e_csets[ssid]);
  4925. /*
  4926. * Setting dfl_root subsys_mask needs to consider the
  4927. * disabled flag and cftype registration needs kmalloc,
  4928. * both of which aren't available during early_init.
  4929. */
  4930. if (!cgroup_ssid_enabled(ssid))
  4931. continue;
  4932. if (cgroup1_ssid_disabled(ssid))
  4933. printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
  4934. ss->name);
  4935. cgrp_dfl_root.subsys_mask |= 1 << ss->id;
  4936. /* implicit controllers must be threaded too */
  4937. WARN_ON(ss->implicit_on_dfl && !ss->threaded);
  4938. if (ss->implicit_on_dfl)
  4939. cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
  4940. else if (!ss->dfl_cftypes)
  4941. cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
  4942. if (ss->threaded)
  4943. cgrp_dfl_threaded_ss_mask |= 1 << ss->id;
  4944. if (ss->dfl_cftypes == ss->legacy_cftypes) {
  4945. WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
  4946. } else {
  4947. WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
  4948. WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
  4949. }
  4950. if (ss->bind)
  4951. ss->bind(init_css_set.subsys[ssid]);
  4952. mutex_lock(&cgroup_mutex);
  4953. css_populate_dir(init_css_set.subsys[ssid]);
  4954. mutex_unlock(&cgroup_mutex);
  4955. }
  4956. /* init_css_set.subsys[] has been updated, re-hash */
  4957. hash_del(&init_css_set.hlist);
  4958. hash_add(css_set_table, &init_css_set.hlist,
  4959. css_set_hash(init_css_set.subsys));
  4960. WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
  4961. WARN_ON(register_filesystem(&cgroup_fs_type));
  4962. WARN_ON(register_filesystem(&cgroup2_fs_type));
  4963. WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show));
  4964. #ifdef CONFIG_CPUSETS
  4965. WARN_ON(register_filesystem(&cpuset_fs_type));
  4966. #endif
  4967. return 0;
  4968. }
  4969. static int __init cgroup_wq_init(void)
  4970. {
  4971. /*
  4972. * There isn't much point in executing destruction path in
  4973. * parallel. Good chunk is serialized with cgroup_mutex anyway.
  4974. * Use 1 for @max_active.
  4975. *
  4976. * We would prefer to do this in cgroup_init() above, but that
  4977. * is called before init_workqueues(): so leave this until after.
  4978. */
  4979. cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
  4980. BUG_ON(!cgroup_destroy_wq);
  4981. return 0;
  4982. }
  4983. core_initcall(cgroup_wq_init);
  4984. void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
  4985. {
  4986. struct kernfs_node *kn;
  4987. kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
  4988. if (!kn)
  4989. return;
  4990. kernfs_path(kn, buf, buflen);
  4991. kernfs_put(kn);
  4992. }
  4993. /*
  4994. * proc_cgroup_show()
  4995. * - Print task's cgroup paths into seq_file, one line for each hierarchy
  4996. * - Used for /proc/<pid>/cgroup.
  4997. */
  4998. int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
  4999. struct pid *pid, struct task_struct *tsk)
  5000. {
  5001. char *buf;
  5002. int retval;
  5003. struct cgroup_root *root;
  5004. retval = -ENOMEM;
  5005. buf = kmalloc(PATH_MAX, GFP_KERNEL);
  5006. if (!buf)
  5007. goto out;
  5008. mutex_lock(&cgroup_mutex);
  5009. spin_lock_irq(&css_set_lock);
  5010. for_each_root(root) {
  5011. struct cgroup_subsys *ss;
  5012. struct cgroup *cgrp;
  5013. int ssid, count = 0;
  5014. if (root == &cgrp_dfl_root && !cgrp_dfl_visible)
  5015. continue;
  5016. seq_printf(m, "%d:", root->hierarchy_id);
  5017. if (root != &cgrp_dfl_root)
  5018. for_each_subsys(ss, ssid)
  5019. if (root->subsys_mask & (1 << ssid))
  5020. seq_printf(m, "%s%s", count++ ? "," : "",
  5021. ss->legacy_name);
  5022. if (strlen(root->name))
  5023. seq_printf(m, "%sname=%s", count ? "," : "",
  5024. root->name);
  5025. seq_putc(m, ':');
  5026. cgrp = task_cgroup_from_root(tsk, root);
  5027. /*
  5028. * On traditional hierarchies, all zombie tasks show up as
  5029. * belonging to the root cgroup. On the default hierarchy,
  5030. * while a zombie doesn't show up in "cgroup.procs" and
  5031. * thus can't be migrated, its /proc/PID/cgroup keeps
  5032. * reporting the cgroup it belonged to before exiting. If
  5033. * the cgroup is removed before the zombie is reaped,
  5034. * " (deleted)" is appended to the cgroup path.
  5035. */
  5036. if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
  5037. retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
  5038. current->nsproxy->cgroup_ns);
  5039. if (retval >= PATH_MAX)
  5040. retval = -ENAMETOOLONG;
  5041. if (retval < 0)
  5042. goto out_unlock;
  5043. seq_puts(m, buf);
  5044. } else {
  5045. seq_puts(m, "/");
  5046. }
  5047. if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
  5048. seq_puts(m, " (deleted)\n");
  5049. else
  5050. seq_putc(m, '\n');
  5051. }
  5052. retval = 0;
  5053. out_unlock:
  5054. spin_unlock_irq(&css_set_lock);
  5055. mutex_unlock(&cgroup_mutex);
  5056. kfree(buf);
  5057. out:
  5058. return retval;
  5059. }
  5060. /**
  5061. * cgroup_fork - initialize cgroup related fields during copy_process()
  5062. * @child: pointer to task_struct of forking parent process.
  5063. *
  5064. * A task is associated with the init_css_set until cgroup_post_fork()
  5065. * attaches it to the target css_set.
  5066. */
  5067. void cgroup_fork(struct task_struct *child)
  5068. {
  5069. RCU_INIT_POINTER(child->cgroups, &init_css_set);
  5070. INIT_LIST_HEAD(&child->cg_list);
  5071. }
  5072. static struct cgroup *cgroup_get_from_file(struct file *f)
  5073. {
  5074. struct cgroup_subsys_state *css;
  5075. struct cgroup *cgrp;
  5076. css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
  5077. if (IS_ERR(css))
  5078. return ERR_CAST(css);
  5079. cgrp = css->cgroup;
  5080. if (!cgroup_on_dfl(cgrp)) {
  5081. cgroup_put(cgrp);
  5082. return ERR_PTR(-EBADF);
  5083. }
  5084. return cgrp;
  5085. }
  5086. /**
  5087. * cgroup_css_set_fork - find or create a css_set for a child process
  5088. * @kargs: the arguments passed to create the child process
  5089. *
  5090. * This functions finds or creates a new css_set which the child
  5091. * process will be attached to in cgroup_post_fork(). By default,
  5092. * the child process will be given the same css_set as its parent.
  5093. *
  5094. * If CLONE_INTO_CGROUP is specified this function will try to find an
  5095. * existing css_set which includes the requested cgroup and if not create
  5096. * a new css_set that the child will be attached to later. If this function
  5097. * succeeds it will hold cgroup_threadgroup_rwsem on return. If
  5098. * CLONE_INTO_CGROUP is requested this function will grab cgroup mutex
  5099. * before grabbing cgroup_threadgroup_rwsem and will hold a reference
  5100. * to the target cgroup.
  5101. */
  5102. static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
  5103. __acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem)
  5104. {
  5105. int ret;
  5106. struct cgroup *dst_cgrp = NULL;
  5107. struct css_set *cset;
  5108. struct super_block *sb;
  5109. struct file *f;
  5110. if (kargs->flags & CLONE_INTO_CGROUP)
  5111. mutex_lock(&cgroup_mutex);
  5112. cgroup_threadgroup_change_begin(current);
  5113. spin_lock_irq(&css_set_lock);
  5114. cset = task_css_set(current);
  5115. get_css_set(cset);
  5116. spin_unlock_irq(&css_set_lock);
  5117. if (!(kargs->flags & CLONE_INTO_CGROUP)) {
  5118. kargs->cset = cset;
  5119. return 0;
  5120. }
  5121. f = fget_raw(kargs->cgroup);
  5122. if (!f) {
  5123. ret = -EBADF;
  5124. goto err;
  5125. }
  5126. sb = f->f_path.dentry->d_sb;
  5127. dst_cgrp = cgroup_get_from_file(f);
  5128. if (IS_ERR(dst_cgrp)) {
  5129. ret = PTR_ERR(dst_cgrp);
  5130. dst_cgrp = NULL;
  5131. goto err;
  5132. }
  5133. if (cgroup_is_dead(dst_cgrp)) {
  5134. ret = -ENODEV;
  5135. goto err;
  5136. }
  5137. /*
  5138. * Verify that we the target cgroup is writable for us. This is
  5139. * usually done by the vfs layer but since we're not going through
  5140. * the vfs layer here we need to do it "manually".
  5141. */
  5142. ret = cgroup_may_write(dst_cgrp, sb);
  5143. if (ret)
  5144. goto err;
  5145. ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
  5146. !(kargs->flags & CLONE_THREAD),
  5147. current->nsproxy->cgroup_ns);
  5148. if (ret)
  5149. goto err;
  5150. kargs->cset = find_css_set(cset, dst_cgrp);
  5151. if (!kargs->cset) {
  5152. ret = -ENOMEM;
  5153. goto err;
  5154. }
  5155. put_css_set(cset);
  5156. fput(f);
  5157. kargs->cgrp = dst_cgrp;
  5158. return ret;
  5159. err:
  5160. cgroup_threadgroup_change_end(current);
  5161. mutex_unlock(&cgroup_mutex);
  5162. if (f)
  5163. fput(f);
  5164. if (dst_cgrp)
  5165. cgroup_put(dst_cgrp);
  5166. put_css_set(cset);
  5167. if (kargs->cset)
  5168. put_css_set(kargs->cset);
  5169. return ret;
  5170. }
  5171. /**
  5172. * cgroup_css_set_put_fork - drop references we took during fork
  5173. * @kargs: the arguments passed to create the child process
  5174. *
  5175. * Drop references to the prepared css_set and target cgroup if
  5176. * CLONE_INTO_CGROUP was requested.
  5177. */
  5178. static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
  5179. __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
  5180. {
  5181. cgroup_threadgroup_change_end(current);
  5182. if (kargs->flags & CLONE_INTO_CGROUP) {
  5183. struct cgroup *cgrp = kargs->cgrp;
  5184. struct css_set *cset = kargs->cset;
  5185. mutex_unlock(&cgroup_mutex);
  5186. if (cset) {
  5187. put_css_set(cset);
  5188. kargs->cset = NULL;
  5189. }
  5190. if (cgrp) {
  5191. cgroup_put(cgrp);
  5192. kargs->cgrp = NULL;
  5193. }
  5194. }
  5195. }
  5196. /**
  5197. * cgroup_can_fork - called on a new task before the process is exposed
  5198. * @child: the child process
  5199. *
  5200. * This prepares a new css_set for the child process which the child will
  5201. * be attached to in cgroup_post_fork().
  5202. * This calls the subsystem can_fork() callbacks. If the cgroup_can_fork()
  5203. * callback returns an error, the fork aborts with that error code. This
  5204. * allows for a cgroup subsystem to conditionally allow or deny new forks.
  5205. */
  5206. int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs)
  5207. {
  5208. struct cgroup_subsys *ss;
  5209. int i, j, ret;
  5210. ret = cgroup_css_set_fork(kargs);
  5211. if (ret)
  5212. return ret;
  5213. do_each_subsys_mask(ss, i, have_canfork_callback) {
  5214. ret = ss->can_fork(child, kargs->cset);
  5215. if (ret)
  5216. goto out_revert;
  5217. } while_each_subsys_mask();
  5218. return 0;
  5219. out_revert:
  5220. for_each_subsys(ss, j) {
  5221. if (j >= i)
  5222. break;
  5223. if (ss->cancel_fork)
  5224. ss->cancel_fork(child, kargs->cset);
  5225. }
  5226. cgroup_css_set_put_fork(kargs);
  5227. return ret;
  5228. }
  5229. /**
  5230. * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
  5231. * @child: the child process
  5232. * @kargs: the arguments passed to create the child process
  5233. *
  5234. * This calls the cancel_fork() callbacks if a fork failed *after*
  5235. * cgroup_can_fork() succeded and cleans up references we took to
  5236. * prepare a new css_set for the child process in cgroup_can_fork().
  5237. */
  5238. void cgroup_cancel_fork(struct task_struct *child,
  5239. struct kernel_clone_args *kargs)
  5240. {
  5241. struct cgroup_subsys *ss;
  5242. int i;
  5243. for_each_subsys(ss, i)
  5244. if (ss->cancel_fork)
  5245. ss->cancel_fork(child, kargs->cset);
  5246. cgroup_css_set_put_fork(kargs);
  5247. }
  5248. /**
  5249. * cgroup_post_fork - finalize cgroup setup for the child process
  5250. * @child: the child process
  5251. *
  5252. * Attach the child process to its css_set calling the subsystem fork()
  5253. * callbacks.
  5254. */
  5255. void cgroup_post_fork(struct task_struct *child,
  5256. struct kernel_clone_args *kargs)
  5257. __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
  5258. {
  5259. struct cgroup_subsys *ss;
  5260. struct css_set *cset;
  5261. int i;
  5262. cset = kargs->cset;
  5263. kargs->cset = NULL;
  5264. spin_lock_irq(&css_set_lock);
  5265. /* init tasks are special, only link regular threads */
  5266. if (likely(child->pid)) {
  5267. WARN_ON_ONCE(!list_empty(&child->cg_list));
  5268. cset->nr_tasks++;
  5269. css_set_move_task(child, NULL, cset, false);
  5270. } else {
  5271. put_css_set(cset);
  5272. cset = NULL;
  5273. }
  5274. /*
  5275. * If the cgroup has to be frozen, the new task has too. Let's set
  5276. * the JOBCTL_TRAP_FREEZE jobctl bit to get the task into the
  5277. * frozen state.
  5278. */
  5279. if (unlikely(cgroup_task_freeze(child))) {
  5280. spin_lock(&child->sighand->siglock);
  5281. WARN_ON_ONCE(child->frozen);
  5282. child->jobctl |= JOBCTL_TRAP_FREEZE;
  5283. spin_unlock(&child->sighand->siglock);
  5284. /*
  5285. * Calling cgroup_update_frozen() isn't required here,
  5286. * because it will be called anyway a bit later from
  5287. * do_freezer_trap(). So we avoid cgroup's transient switch
  5288. * from the frozen state and back.
  5289. */
  5290. }
  5291. spin_unlock_irq(&css_set_lock);
  5292. /*
  5293. * Call ss->fork(). This must happen after @child is linked on
  5294. * css_set; otherwise, @child might change state between ->fork()
  5295. * and addition to css_set.
  5296. */
  5297. do_each_subsys_mask(ss, i, have_fork_callback) {
  5298. ss->fork(child);
  5299. } while_each_subsys_mask();
  5300. /* Make the new cset the root_cset of the new cgroup namespace. */
  5301. if (kargs->flags & CLONE_NEWCGROUP) {
  5302. struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset;
  5303. get_css_set(cset);
  5304. child->nsproxy->cgroup_ns->root_cset = cset;
  5305. put_css_set(rcset);
  5306. }
  5307. cgroup_css_set_put_fork(kargs);
  5308. }
  5309. /**
  5310. * cgroup_exit - detach cgroup from exiting task
  5311. * @tsk: pointer to task_struct of exiting process
  5312. *
  5313. * Description: Detach cgroup from @tsk.
  5314. *
  5315. */
  5316. void cgroup_exit(struct task_struct *tsk)
  5317. {
  5318. struct cgroup_subsys *ss;
  5319. struct css_set *cset;
  5320. int i;
  5321. spin_lock_irq(&css_set_lock);
  5322. WARN_ON_ONCE(list_empty(&tsk->cg_list));
  5323. cset = task_css_set(tsk);
  5324. css_set_move_task(tsk, cset, NULL, false);
  5325. list_add_tail(&tsk->cg_list, &cset->dying_tasks);
  5326. cset->nr_tasks--;
  5327. WARN_ON_ONCE(cgroup_task_frozen(tsk));
  5328. if (unlikely(cgroup_task_freeze(tsk)))
  5329. cgroup_update_frozen(task_dfl_cgroup(tsk));
  5330. spin_unlock_irq(&css_set_lock);
  5331. /* see cgroup_post_fork() for details */
  5332. do_each_subsys_mask(ss, i, have_exit_callback) {
  5333. ss->exit(tsk);
  5334. } while_each_subsys_mask();
  5335. }
  5336. void cgroup_release(struct task_struct *task)
  5337. {
  5338. struct cgroup_subsys *ss;
  5339. int ssid;
  5340. do_each_subsys_mask(ss, ssid, have_release_callback) {
  5341. ss->release(task);
  5342. } while_each_subsys_mask();
  5343. spin_lock_irq(&css_set_lock);
  5344. css_set_skip_task_iters(task_css_set(task), task);
  5345. list_del_init(&task->cg_list);
  5346. spin_unlock_irq(&css_set_lock);
  5347. }
  5348. void cgroup_free(struct task_struct *task)
  5349. {
  5350. struct css_set *cset = task_css_set(task);
  5351. put_css_set(cset);
  5352. }
  5353. static int __init cgroup_disable(char *str)
  5354. {
  5355. struct cgroup_subsys *ss;
  5356. char *token;
  5357. int i;
  5358. while ((token = strsep(&str, ",")) != NULL) {
  5359. if (!*token)
  5360. continue;
  5361. for_each_subsys(ss, i) {
  5362. if (strcmp(token, ss->name) &&
  5363. strcmp(token, ss->legacy_name))
  5364. continue;
  5365. static_branch_disable(cgroup_subsys_enabled_key[i]);
  5366. pr_info("Disabling %s control group subsystem\n",
  5367. ss->name);
  5368. }
  5369. for (i = 0; i < OPT_FEATURE_COUNT; i++) {
  5370. if (strcmp(token, cgroup_opt_feature_names[i]))
  5371. continue;
  5372. cgroup_feature_disable_mask |= 1 << i;
  5373. pr_info("Disabling %s control group feature\n",
  5374. cgroup_opt_feature_names[i]);
  5375. break;
  5376. }
  5377. }
  5378. return 1;
  5379. }
  5380. __setup("cgroup_disable=", cgroup_disable);
  5381. void __init __weak enable_debug_cgroup(void) { }
  5382. static int __init enable_cgroup_debug(char *str)
  5383. {
  5384. cgroup_debug = true;
  5385. enable_debug_cgroup();
  5386. return 1;
  5387. }
  5388. __setup("cgroup_debug", enable_cgroup_debug);
  5389. /**
  5390. * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
  5391. * @dentry: directory dentry of interest
  5392. * @ss: subsystem of interest
  5393. *
  5394. * If @dentry is a directory for a cgroup which has @ss enabled on it, try
  5395. * to get the corresponding css and return it. If such css doesn't exist
  5396. * or can't be pinned, an ERR_PTR value is returned.
  5397. */
  5398. struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
  5399. struct cgroup_subsys *ss)
  5400. {
  5401. struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
  5402. struct file_system_type *s_type = dentry->d_sb->s_type;
  5403. struct cgroup_subsys_state *css = NULL;
  5404. struct cgroup *cgrp;
  5405. /* is @dentry a cgroup dir? */
  5406. if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
  5407. !kn || kernfs_type(kn) != KERNFS_DIR)
  5408. return ERR_PTR(-EBADF);
  5409. rcu_read_lock();
  5410. /*
  5411. * This path doesn't originate from kernfs and @kn could already
  5412. * have been or be removed at any point. @kn->priv is RCU
  5413. * protected for this access. See css_release_work_fn() for details.
  5414. */
  5415. cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
  5416. if (cgrp)
  5417. css = cgroup_css(cgrp, ss);
  5418. if (!css || !css_tryget_online(css))
  5419. css = ERR_PTR(-ENOENT);
  5420. rcu_read_unlock();
  5421. return css;
  5422. }
  5423. /**
  5424. * css_from_id - lookup css by id
  5425. * @id: the cgroup id
  5426. * @ss: cgroup subsys to be looked into
  5427. *
  5428. * Returns the css if there's valid one with @id, otherwise returns NULL.
  5429. * Should be called under rcu_read_lock().
  5430. */
  5431. struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
  5432. {
  5433. WARN_ON_ONCE(!rcu_read_lock_held());
  5434. return idr_find(&ss->css_idr, id);
  5435. }
  5436. /**
  5437. * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
  5438. * @path: path on the default hierarchy
  5439. *
  5440. * Find the cgroup at @path on the default hierarchy, increment its
  5441. * reference count and return it. Returns pointer to the found cgroup on
  5442. * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
  5443. * if @path points to a non-directory.
  5444. */
  5445. struct cgroup *cgroup_get_from_path(const char *path)
  5446. {
  5447. struct kernfs_node *kn;
  5448. struct cgroup *cgrp;
  5449. mutex_lock(&cgroup_mutex);
  5450. kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path);
  5451. if (kn) {
  5452. if (kernfs_type(kn) == KERNFS_DIR) {
  5453. cgrp = kn->priv;
  5454. cgroup_get_live(cgrp);
  5455. } else {
  5456. cgrp = ERR_PTR(-ENOTDIR);
  5457. }
  5458. kernfs_put(kn);
  5459. } else {
  5460. cgrp = ERR_PTR(-ENOENT);
  5461. }
  5462. mutex_unlock(&cgroup_mutex);
  5463. return cgrp;
  5464. }
  5465. EXPORT_SYMBOL_GPL(cgroup_get_from_path);
  5466. /**
  5467. * cgroup_get_from_fd - get a cgroup pointer from a fd
  5468. * @fd: fd obtained by open(cgroup2_dir)
  5469. *
  5470. * Find the cgroup from a fd which should be obtained
  5471. * by opening a cgroup directory. Returns a pointer to the
  5472. * cgroup on success. ERR_PTR is returned if the cgroup
  5473. * cannot be found.
  5474. */
  5475. struct cgroup *cgroup_get_from_fd(int fd)
  5476. {
  5477. struct cgroup *cgrp;
  5478. struct file *f;
  5479. f = fget_raw(fd);
  5480. if (!f)
  5481. return ERR_PTR(-EBADF);
  5482. cgrp = cgroup_get_from_file(f);
  5483. fput(f);
  5484. return cgrp;
  5485. }
  5486. EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
  5487. static u64 power_of_ten(int power)
  5488. {
  5489. u64 v = 1;
  5490. while (power--)
  5491. v *= 10;
  5492. return v;
  5493. }
  5494. /**
  5495. * cgroup_parse_float - parse a floating number
  5496. * @input: input string
  5497. * @dec_shift: number of decimal digits to shift
  5498. * @v: output
  5499. *
  5500. * Parse a decimal floating point number in @input and store the result in
  5501. * @v with decimal point right shifted @dec_shift times. For example, if
  5502. * @input is "12.3456" and @dec_shift is 3, *@v will be set to 12345.
  5503. * Returns 0 on success, -errno otherwise.
  5504. *
  5505. * There's nothing cgroup specific about this function except that it's
  5506. * currently the only user.
  5507. */
  5508. int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
  5509. {
  5510. s64 whole, frac = 0;
  5511. int fstart = 0, fend = 0, flen;
  5512. if (!sscanf(input, "%lld.%n%lld%n", &whole, &fstart, &frac, &fend))
  5513. return -EINVAL;
  5514. if (frac < 0)
  5515. return -EINVAL;
  5516. flen = fend > fstart ? fend - fstart : 0;
  5517. if (flen < dec_shift)
  5518. frac *= power_of_ten(dec_shift - flen);
  5519. else
  5520. frac = DIV_ROUND_CLOSEST_ULL(frac, power_of_ten(flen - dec_shift));
  5521. *v = whole * power_of_ten(dec_shift) + frac;
  5522. return 0;
  5523. }
  5524. /*
  5525. * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
  5526. * definition in cgroup-defs.h.
  5527. */
  5528. #ifdef CONFIG_SOCK_CGROUP_DATA
  5529. #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
  5530. DEFINE_SPINLOCK(cgroup_sk_update_lock);
  5531. static bool cgroup_sk_alloc_disabled __read_mostly;
  5532. void cgroup_sk_alloc_disable(void)
  5533. {
  5534. if (cgroup_sk_alloc_disabled)
  5535. return;
  5536. pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
  5537. cgroup_sk_alloc_disabled = true;
  5538. }
  5539. #else
  5540. #define cgroup_sk_alloc_disabled false
  5541. #endif
  5542. void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
  5543. {
  5544. if (cgroup_sk_alloc_disabled) {
  5545. skcd->no_refcnt = 1;
  5546. return;
  5547. }
  5548. /* Don't associate the sock with unrelated interrupted task's cgroup. */
  5549. if (in_interrupt())
  5550. return;
  5551. rcu_read_lock();
  5552. while (true) {
  5553. struct css_set *cset;
  5554. cset = task_css_set(current);
  5555. if (likely(cgroup_tryget(cset->dfl_cgrp))) {
  5556. skcd->val = (unsigned long)cset->dfl_cgrp;
  5557. cgroup_bpf_get(cset->dfl_cgrp);
  5558. break;
  5559. }
  5560. cpu_relax();
  5561. }
  5562. rcu_read_unlock();
  5563. }
  5564. void cgroup_sk_clone(struct sock_cgroup_data *skcd)
  5565. {
  5566. if (skcd->val) {
  5567. if (skcd->no_refcnt)
  5568. return;
  5569. /*
  5570. * We might be cloning a socket which is left in an empty
  5571. * cgroup and the cgroup might have already been rmdir'd.
  5572. * Don't use cgroup_get_live().
  5573. */
  5574. cgroup_get(sock_cgroup_ptr(skcd));
  5575. cgroup_bpf_get(sock_cgroup_ptr(skcd));
  5576. }
  5577. }
  5578. void cgroup_sk_free(struct sock_cgroup_data *skcd)
  5579. {
  5580. struct cgroup *cgrp = sock_cgroup_ptr(skcd);
  5581. if (skcd->no_refcnt)
  5582. return;
  5583. cgroup_bpf_put(cgrp);
  5584. cgroup_put(cgrp);
  5585. }
  5586. #endif /* CONFIG_SOCK_CGROUP_DATA */
  5587. #ifdef CONFIG_CGROUP_BPF
  5588. int cgroup_bpf_attach(struct cgroup *cgrp,
  5589. struct bpf_prog *prog, struct bpf_prog *replace_prog,
  5590. struct bpf_cgroup_link *link,
  5591. enum bpf_attach_type type,
  5592. u32 flags)
  5593. {
  5594. int ret;
  5595. mutex_lock(&cgroup_mutex);
  5596. ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
  5597. mutex_unlock(&cgroup_mutex);
  5598. return ret;
  5599. }
  5600. int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  5601. enum bpf_attach_type type)
  5602. {
  5603. int ret;
  5604. mutex_lock(&cgroup_mutex);
  5605. ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
  5606. mutex_unlock(&cgroup_mutex);
  5607. return ret;
  5608. }
  5609. int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  5610. union bpf_attr __user *uattr)
  5611. {
  5612. int ret;
  5613. mutex_lock(&cgroup_mutex);
  5614. ret = __cgroup_bpf_query(cgrp, attr, uattr);
  5615. mutex_unlock(&cgroup_mutex);
  5616. return ret;
  5617. }
  5618. #endif /* CONFIG_CGROUP_BPF */
  5619. #ifdef CONFIG_SYSFS
  5620. static ssize_t show_delegatable_files(struct cftype *files, char *buf,
  5621. ssize_t size, const char *prefix)
  5622. {
  5623. struct cftype *cft;
  5624. ssize_t ret = 0;
  5625. for (cft = files; cft && cft->name[0] != '\0'; cft++) {
  5626. if (!(cft->flags & CFTYPE_NS_DELEGATABLE))
  5627. continue;
  5628. if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled())
  5629. continue;
  5630. if (prefix)
  5631. ret += snprintf(buf + ret, size - ret, "%s.", prefix);
  5632. ret += snprintf(buf + ret, size - ret, "%s\n", cft->name);
  5633. if (WARN_ON(ret >= size))
  5634. break;
  5635. }
  5636. return ret;
  5637. }
  5638. static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr,
  5639. char *buf)
  5640. {
  5641. struct cgroup_subsys *ss;
  5642. int ssid;
  5643. ssize_t ret = 0;
  5644. ret = show_delegatable_files(cgroup_base_files, buf, PAGE_SIZE - ret,
  5645. NULL);
  5646. for_each_subsys(ss, ssid)
  5647. ret += show_delegatable_files(ss->dfl_cftypes, buf + ret,
  5648. PAGE_SIZE - ret,
  5649. cgroup_subsys_name[ssid]);
  5650. return ret;
  5651. }
  5652. static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
  5653. static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
  5654. char *buf)
  5655. {
  5656. return snprintf(buf, PAGE_SIZE,
  5657. "nsdelegate\n"
  5658. "memory_localevents\n"
  5659. "memory_recursiveprot\n");
  5660. }
  5661. static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
  5662. static struct attribute *cgroup_sysfs_attrs[] = {
  5663. &cgroup_delegate_attr.attr,
  5664. &cgroup_features_attr.attr,
  5665. NULL,
  5666. };
  5667. static const struct attribute_group cgroup_sysfs_attr_group = {
  5668. .attrs = cgroup_sysfs_attrs,
  5669. .name = "cgroup",
  5670. };
  5671. static int __init cgroup_sysfs_init(void)
  5672. {
  5673. return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group);
  5674. }
  5675. subsys_initcall(cgroup_sysfs_init);
  5676. #endif /* CONFIG_SYSFS */