io_uring.c 241 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared application/kernel submission and completion ring pairs, for
  4. * supporting fast/efficient IO.
  5. *
  6. * A note on the read/write ordering memory barriers that are matched between
  7. * the application and kernel side.
  8. *
  9. * After the application reads the CQ ring tail, it must use an
  10. * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
  11. * before writing the tail (using smp_load_acquire to read the tail will
  12. * do). It also needs a smp_mb() before updating CQ head (ordering the
  13. * entry load(s) with the head store), pairing with an implicit barrier
  14. * through a control-dependency in io_get_cqring (smp_store_release to
  15. * store head will do). Failure to do so could lead to reading invalid
  16. * CQ entries.
  17. *
  18. * Likewise, the application must use an appropriate smp_wmb() before
  19. * writing the SQ tail (ordering SQ entry stores with the tail store),
  20. * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
  21. * to store the tail will do). And it needs a barrier ordering the SQ
  22. * head load before writing new SQ entries (smp_load_acquire to read
  23. * head will do).
  24. *
  25. * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
  26. * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
  27. * updating the SQ tail; a full memory barrier smp_mb() is needed
  28. * between.
  29. *
  30. * Also see the examples in the liburing library:
  31. *
  32. * git://git.kernel.dk/liburing
  33. *
  34. * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
  35. * from data shared between the kernel and application. This is done both
  36. * for ordering purposes, but also to ensure that once a value is loaded from
  37. * data that the application could potentially modify, it remains stable.
  38. *
  39. * Copyright (C) 2018-2019 Jens Axboe
  40. * Copyright (c) 2018-2019 Christoph Hellwig
  41. */
  42. #include <linux/kernel.h>
  43. #include <linux/init.h>
  44. #include <linux/errno.h>
  45. #include <linux/syscalls.h>
  46. #include <linux/compat.h>
  47. #include <net/compat.h>
  48. #include <linux/refcount.h>
  49. #include <linux/uio.h>
  50. #include <linux/bits.h>
  51. #include <linux/sched/signal.h>
  52. #include <linux/fs.h>
  53. #include <linux/file.h>
  54. #include <linux/fdtable.h>
  55. #include <linux/mm.h>
  56. #include <linux/mman.h>
  57. #include <linux/percpu.h>
  58. #include <linux/slab.h>
  59. #include <linux/kthread.h>
  60. #include <linux/blkdev.h>
  61. #include <linux/bvec.h>
  62. #include <linux/net.h>
  63. #include <net/sock.h>
  64. #include <net/af_unix.h>
  65. #include <net/scm.h>
  66. #include <linux/anon_inodes.h>
  67. #include <linux/sched/mm.h>
  68. #include <linux/uaccess.h>
  69. #include <linux/nospec.h>
  70. #include <linux/sizes.h>
  71. #include <linux/hugetlb.h>
  72. #include <linux/highmem.h>
  73. #include <linux/namei.h>
  74. #include <linux/fsnotify.h>
  75. #include <linux/fadvise.h>
  76. #include <linux/eventpoll.h>
  77. #include <linux/fs_struct.h>
  78. #include <linux/splice.h>
  79. #include <linux/task_work.h>
  80. #include <linux/pagemap.h>
  81. #include <linux/io_uring.h>
  82. #include <linux/blk-cgroup.h>
  83. #include <linux/audit.h>
  84. #define CREATE_TRACE_POINTS
  85. #include <trace/events/io_uring.h>
  86. #include <uapi/linux/io_uring.h>
  87. #include "internal.h"
  88. #include "io-wq.h"
  89. #define IORING_MAX_ENTRIES 32768
  90. #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
  91. /*
  92. * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
  93. */
  94. #define IORING_FILE_TABLE_SHIFT 9
  95. #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
  96. #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
  97. #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
  98. #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
  99. IORING_REGISTER_LAST + IORING_OP_LAST)
  100. struct io_uring {
  101. u32 head ____cacheline_aligned_in_smp;
  102. u32 tail ____cacheline_aligned_in_smp;
  103. };
  104. /*
  105. * This data is shared with the application through the mmap at offsets
  106. * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
  107. *
  108. * The offsets to the member fields are published through struct
  109. * io_sqring_offsets when calling io_uring_setup.
  110. */
  111. struct io_rings {
  112. /*
  113. * Head and tail offsets into the ring; the offsets need to be
  114. * masked to get valid indices.
  115. *
  116. * The kernel controls head of the sq ring and the tail of the cq ring,
  117. * and the application controls tail of the sq ring and the head of the
  118. * cq ring.
  119. */
  120. struct io_uring sq, cq;
  121. /*
  122. * Bitmasks to apply to head and tail offsets (constant, equals
  123. * ring_entries - 1)
  124. */
  125. u32 sq_ring_mask, cq_ring_mask;
  126. /* Ring sizes (constant, power of 2) */
  127. u32 sq_ring_entries, cq_ring_entries;
  128. /*
  129. * Number of invalid entries dropped by the kernel due to
  130. * invalid index stored in array
  131. *
  132. * Written by the kernel, shouldn't be modified by the
  133. * application (i.e. get number of "new events" by comparing to
  134. * cached value).
  135. *
  136. * After a new SQ head value was read by the application this
  137. * counter includes all submissions that were dropped reaching
  138. * the new SQ head (and possibly more).
  139. */
  140. u32 sq_dropped;
  141. /*
  142. * Runtime SQ flags
  143. *
  144. * Written by the kernel, shouldn't be modified by the
  145. * application.
  146. *
  147. * The application needs a full memory barrier before checking
  148. * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
  149. */
  150. u32 sq_flags;
  151. /*
  152. * Runtime CQ flags
  153. *
  154. * Written by the application, shouldn't be modified by the
  155. * kernel.
  156. */
  157. u32 cq_flags;
  158. /*
  159. * Number of completion events lost because the queue was full;
  160. * this should be avoided by the application by making sure
  161. * there are not more requests pending than there is space in
  162. * the completion queue.
  163. *
  164. * Written by the kernel, shouldn't be modified by the
  165. * application (i.e. get number of "new events" by comparing to
  166. * cached value).
  167. *
  168. * As completion events come in out of order this counter is not
  169. * ordered with any other data.
  170. */
  171. u32 cq_overflow;
  172. /*
  173. * Ring buffer of completion events.
  174. *
  175. * The kernel writes completion events fresh every time they are
  176. * produced, so the application is allowed to modify pending
  177. * entries.
  178. */
  179. struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
  180. };
  181. struct io_mapped_ubuf {
  182. u64 ubuf;
  183. size_t len;
  184. struct bio_vec *bvec;
  185. unsigned int nr_bvecs;
  186. unsigned long acct_pages;
  187. };
  188. struct fixed_file_table {
  189. struct file **files;
  190. };
  191. struct fixed_file_ref_node {
  192. struct percpu_ref refs;
  193. struct list_head node;
  194. struct list_head file_list;
  195. struct fixed_file_data *file_data;
  196. struct llist_node llist;
  197. bool done;
  198. };
  199. struct fixed_file_data {
  200. struct fixed_file_table *table;
  201. struct io_ring_ctx *ctx;
  202. struct fixed_file_ref_node *node;
  203. struct percpu_ref refs;
  204. struct completion done;
  205. struct list_head ref_list;
  206. spinlock_t lock;
  207. };
  208. struct io_buffer {
  209. struct list_head list;
  210. __u64 addr;
  211. __u32 len;
  212. __u16 bid;
  213. };
  214. struct io_restriction {
  215. DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
  216. DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
  217. u8 sqe_flags_allowed;
  218. u8 sqe_flags_required;
  219. bool registered;
  220. };
  221. struct io_sq_data {
  222. refcount_t refs;
  223. struct mutex lock;
  224. /* ctx's that are using this sqd */
  225. struct list_head ctx_list;
  226. struct list_head ctx_new_list;
  227. struct mutex ctx_lock;
  228. struct task_struct *thread;
  229. struct wait_queue_head wait;
  230. };
  231. struct io_ring_ctx {
  232. struct {
  233. struct percpu_ref refs;
  234. } ____cacheline_aligned_in_smp;
  235. struct {
  236. unsigned int flags;
  237. unsigned int compat: 1;
  238. unsigned int limit_mem: 1;
  239. unsigned int cq_overflow_flushed: 1;
  240. unsigned int drain_next: 1;
  241. unsigned int eventfd_async: 1;
  242. unsigned int restricted: 1;
  243. unsigned int sqo_dead: 1;
  244. /*
  245. * Ring buffer of indices into array of io_uring_sqe, which is
  246. * mmapped by the application using the IORING_OFF_SQES offset.
  247. *
  248. * This indirection could e.g. be used to assign fixed
  249. * io_uring_sqe entries to operations and only submit them to
  250. * the queue when needed.
  251. *
  252. * The kernel modifies neither the indices array nor the entries
  253. * array.
  254. */
  255. u32 *sq_array;
  256. unsigned cached_sq_head;
  257. unsigned sq_entries;
  258. unsigned sq_mask;
  259. unsigned sq_thread_idle;
  260. unsigned cached_sq_dropped;
  261. unsigned cached_cq_overflow;
  262. unsigned long sq_check_overflow;
  263. struct list_head defer_list;
  264. struct list_head timeout_list;
  265. struct list_head cq_overflow_list;
  266. struct io_uring_sqe *sq_sqes;
  267. } ____cacheline_aligned_in_smp;
  268. struct io_rings *rings;
  269. /* IO offload */
  270. struct io_wq *io_wq;
  271. /*
  272. * For SQPOLL usage - we hold a reference to the parent task, so we
  273. * have access to the ->files
  274. */
  275. struct task_struct *sqo_task;
  276. /* Only used for accounting purposes */
  277. struct mm_struct *mm_account;
  278. #ifdef CONFIG_BLK_CGROUP
  279. struct cgroup_subsys_state *sqo_blkcg_css;
  280. #endif
  281. struct io_sq_data *sq_data; /* if using sq thread polling */
  282. struct wait_queue_head sqo_sq_wait;
  283. struct wait_queue_entry sqo_wait_entry;
  284. struct list_head sqd_list;
  285. /*
  286. * If used, fixed file set. Writers must ensure that ->refs is dead,
  287. * readers must ensure that ->refs is alive as long as the file* is
  288. * used. Only updated through io_uring_register(2).
  289. */
  290. struct fixed_file_data *file_data;
  291. unsigned nr_user_files;
  292. /* if used, fixed mapped user buffers */
  293. unsigned nr_user_bufs;
  294. struct io_mapped_ubuf *user_bufs;
  295. struct user_struct *user;
  296. const struct cred *creds;
  297. #ifdef CONFIG_AUDIT
  298. kuid_t loginuid;
  299. unsigned int sessionid;
  300. #endif
  301. struct completion ref_comp;
  302. struct completion sq_thread_comp;
  303. /* if all else fails... */
  304. struct io_kiocb *fallback_req;
  305. #if defined(CONFIG_UNIX)
  306. struct socket *ring_sock;
  307. #endif
  308. struct xarray io_buffers;
  309. struct xarray personalities;
  310. u32 pers_next;
  311. struct {
  312. unsigned cached_cq_tail;
  313. unsigned cq_entries;
  314. unsigned cq_mask;
  315. atomic_t cq_timeouts;
  316. unsigned cq_last_tm_flush;
  317. unsigned long cq_check_overflow;
  318. struct wait_queue_head cq_wait;
  319. struct fasync_struct *cq_fasync;
  320. struct eventfd_ctx *cq_ev_fd;
  321. } ____cacheline_aligned_in_smp;
  322. struct {
  323. struct mutex uring_lock;
  324. wait_queue_head_t wait;
  325. } ____cacheline_aligned_in_smp;
  326. struct {
  327. spinlock_t completion_lock;
  328. /*
  329. * ->iopoll_list is protected by the ctx->uring_lock for
  330. * io_uring instances that don't use IORING_SETUP_SQPOLL.
  331. * For SQPOLL, only the single threaded io_sq_thread() will
  332. * manipulate the list, hence no extra locking is needed there.
  333. */
  334. struct list_head iopoll_list;
  335. struct hlist_head *cancel_hash;
  336. unsigned cancel_hash_bits;
  337. bool poll_multi_file;
  338. spinlock_t inflight_lock;
  339. struct list_head inflight_list;
  340. } ____cacheline_aligned_in_smp;
  341. struct delayed_work file_put_work;
  342. struct llist_head file_put_llist;
  343. struct work_struct exit_work;
  344. struct io_restriction restrictions;
  345. };
  346. /*
  347. * First field must be the file pointer in all the
  348. * iocb unions! See also 'struct kiocb' in <linux/fs.h>
  349. */
  350. struct io_poll_iocb {
  351. struct file *file;
  352. union {
  353. struct wait_queue_head *head;
  354. u64 addr;
  355. };
  356. __poll_t events;
  357. bool done;
  358. bool canceled;
  359. struct wait_queue_entry wait;
  360. };
  361. struct io_close {
  362. struct file *file;
  363. struct file *put_file;
  364. int fd;
  365. };
  366. struct io_timeout_data {
  367. struct io_kiocb *req;
  368. struct hrtimer timer;
  369. struct timespec64 ts;
  370. enum hrtimer_mode mode;
  371. };
  372. struct io_accept {
  373. struct file *file;
  374. struct sockaddr __user *addr;
  375. int __user *addr_len;
  376. int flags;
  377. unsigned long nofile;
  378. };
  379. struct io_sync {
  380. struct file *file;
  381. loff_t len;
  382. loff_t off;
  383. int flags;
  384. int mode;
  385. };
  386. struct io_cancel {
  387. struct file *file;
  388. u64 addr;
  389. };
  390. struct io_timeout {
  391. struct file *file;
  392. u32 off;
  393. u32 target_seq;
  394. struct list_head list;
  395. };
  396. struct io_timeout_rem {
  397. struct file *file;
  398. u64 addr;
  399. };
  400. struct io_rw {
  401. /* NOTE: kiocb has the file as the first member, so don't do it here */
  402. struct kiocb kiocb;
  403. u64 addr;
  404. u64 len;
  405. };
  406. struct io_connect {
  407. struct file *file;
  408. struct sockaddr __user *addr;
  409. int addr_len;
  410. };
  411. struct io_sr_msg {
  412. struct file *file;
  413. union {
  414. struct user_msghdr __user *umsg;
  415. void __user *buf;
  416. };
  417. int msg_flags;
  418. int bgid;
  419. size_t len;
  420. struct io_buffer *kbuf;
  421. };
  422. struct io_open {
  423. struct file *file;
  424. int dfd;
  425. bool ignore_nonblock;
  426. struct filename *filename;
  427. struct open_how how;
  428. unsigned long nofile;
  429. };
  430. struct io_files_update {
  431. struct file *file;
  432. u64 arg;
  433. u32 nr_args;
  434. u32 offset;
  435. };
  436. struct io_fadvise {
  437. struct file *file;
  438. u64 offset;
  439. u32 len;
  440. u32 advice;
  441. };
  442. struct io_madvise {
  443. struct file *file;
  444. u64 addr;
  445. u32 len;
  446. u32 advice;
  447. };
  448. struct io_epoll {
  449. struct file *file;
  450. int epfd;
  451. int op;
  452. int fd;
  453. struct epoll_event event;
  454. };
  455. struct io_splice {
  456. struct file *file_out;
  457. struct file *file_in;
  458. loff_t off_out;
  459. loff_t off_in;
  460. u64 len;
  461. unsigned int flags;
  462. };
  463. struct io_provide_buf {
  464. struct file *file;
  465. __u64 addr;
  466. __u32 len;
  467. __u32 bgid;
  468. __u16 nbufs;
  469. __u16 bid;
  470. };
  471. struct io_statx {
  472. struct file *file;
  473. int dfd;
  474. unsigned int mask;
  475. unsigned int flags;
  476. const char __user *filename;
  477. struct statx __user *buffer;
  478. };
  479. struct io_completion {
  480. struct file *file;
  481. struct list_head list;
  482. u32 cflags;
  483. };
  484. struct io_async_connect {
  485. struct sockaddr_storage address;
  486. };
  487. struct io_async_msghdr {
  488. struct iovec fast_iov[UIO_FASTIOV];
  489. struct iovec *iov;
  490. struct sockaddr __user *uaddr;
  491. struct msghdr msg;
  492. struct sockaddr_storage addr;
  493. };
  494. struct io_async_rw {
  495. struct iovec fast_iov[UIO_FASTIOV];
  496. const struct iovec *free_iovec;
  497. struct iov_iter iter;
  498. size_t bytes_done;
  499. struct wait_page_queue wpq;
  500. };
  501. enum {
  502. REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
  503. REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
  504. REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
  505. REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
  506. REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
  507. REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
  508. REQ_F_LINK_HEAD_BIT,
  509. REQ_F_FAIL_LINK_BIT,
  510. REQ_F_INFLIGHT_BIT,
  511. REQ_F_CUR_POS_BIT,
  512. REQ_F_NOWAIT_BIT,
  513. REQ_F_LINK_TIMEOUT_BIT,
  514. REQ_F_ISREG_BIT,
  515. REQ_F_NEED_CLEANUP_BIT,
  516. REQ_F_POLLED_BIT,
  517. REQ_F_BUFFER_SELECTED_BIT,
  518. REQ_F_NO_FILE_TABLE_BIT,
  519. REQ_F_WORK_INITIALIZED_BIT,
  520. REQ_F_LTIMEOUT_ACTIVE_BIT,
  521. /* not a real bit, just to check we're not overflowing the space */
  522. __REQ_F_LAST_BIT,
  523. };
  524. enum {
  525. /* ctx owns file */
  526. REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
  527. /* drain existing IO first */
  528. REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
  529. /* linked sqes */
  530. REQ_F_LINK = BIT(REQ_F_LINK_BIT),
  531. /* doesn't sever on completion < 0 */
  532. REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
  533. /* IOSQE_ASYNC */
  534. REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
  535. /* IOSQE_BUFFER_SELECT */
  536. REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
  537. /* head of a link */
  538. REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
  539. /* fail rest of links */
  540. REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
  541. /* on inflight list */
  542. REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
  543. /* read/write uses file position */
  544. REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
  545. /* must not punt to workers */
  546. REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
  547. /* has or had linked timeout */
  548. REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
  549. /* regular file */
  550. REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
  551. /* needs cleanup */
  552. REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
  553. /* already went through poll handler */
  554. REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
  555. /* buffer already selected */
  556. REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
  557. /* doesn't need file table for this request */
  558. REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
  559. /* io_wq_work is initialized */
  560. REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
  561. /* linked timeout is active, i.e. prepared by link's head */
  562. REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
  563. };
  564. struct async_poll {
  565. struct io_poll_iocb poll;
  566. struct io_poll_iocb *double_poll;
  567. };
  568. /*
  569. * NOTE! Each of the iocb union members has the file pointer
  570. * as the first entry in their struct definition. So you can
  571. * access the file pointer through any of the sub-structs,
  572. * or directly as just 'ki_filp' in this struct.
  573. */
  574. struct io_kiocb {
  575. union {
  576. struct file *file;
  577. struct io_rw rw;
  578. struct io_poll_iocb poll;
  579. struct io_accept accept;
  580. struct io_sync sync;
  581. struct io_cancel cancel;
  582. struct io_timeout timeout;
  583. struct io_timeout_rem timeout_rem;
  584. struct io_connect connect;
  585. struct io_sr_msg sr_msg;
  586. struct io_open open;
  587. struct io_close close;
  588. struct io_files_update files_update;
  589. struct io_fadvise fadvise;
  590. struct io_madvise madvise;
  591. struct io_epoll epoll;
  592. struct io_splice splice;
  593. struct io_provide_buf pbuf;
  594. struct io_statx statx;
  595. /* use only after cleaning per-op data, see io_clean_op() */
  596. struct io_completion compl;
  597. };
  598. /* opcode allocated if it needs to store data for async defer */
  599. void *async_data;
  600. u8 opcode;
  601. /* polled IO has completed */
  602. u8 iopoll_completed;
  603. u16 buf_index;
  604. u32 result;
  605. struct io_ring_ctx *ctx;
  606. unsigned int flags;
  607. refcount_t refs;
  608. struct task_struct *task;
  609. u64 user_data;
  610. struct list_head link_list;
  611. /*
  612. * 1. used with ctx->iopoll_list with reads/writes
  613. * 2. to track reqs with ->files (see io_op_def::file_table)
  614. */
  615. struct list_head inflight_entry;
  616. struct percpu_ref *fixed_file_refs;
  617. struct callback_head task_work;
  618. /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
  619. struct hlist_node hash_node;
  620. struct async_poll *apoll;
  621. struct io_wq_work work;
  622. };
  623. struct io_defer_entry {
  624. struct list_head list;
  625. struct io_kiocb *req;
  626. u32 seq;
  627. };
  628. #define IO_IOPOLL_BATCH 8
  629. struct io_comp_state {
  630. unsigned int nr;
  631. struct list_head list;
  632. struct io_ring_ctx *ctx;
  633. };
  634. struct io_submit_state {
  635. struct blk_plug plug;
  636. /*
  637. * io_kiocb alloc cache
  638. */
  639. void *reqs[IO_IOPOLL_BATCH];
  640. unsigned int free_reqs;
  641. /*
  642. * Batch completion logic
  643. */
  644. struct io_comp_state comp;
  645. /*
  646. * File reference cache
  647. */
  648. struct file *file;
  649. unsigned int fd;
  650. unsigned int has_refs;
  651. unsigned int ios_left;
  652. };
  653. struct io_op_def {
  654. /* needs req->file assigned */
  655. unsigned needs_file : 1;
  656. /* don't fail if file grab fails */
  657. unsigned needs_file_no_error : 1;
  658. /* hash wq insertion if file is a regular file */
  659. unsigned hash_reg_file : 1;
  660. /* unbound wq insertion if file is a non-regular file */
  661. unsigned unbound_nonreg_file : 1;
  662. /* opcode is not supported by this kernel */
  663. unsigned not_supported : 1;
  664. /* set if opcode supports polled "wait" */
  665. unsigned pollin : 1;
  666. unsigned pollout : 1;
  667. /* op supports buffer selection */
  668. unsigned buffer_select : 1;
  669. /* must always have async data allocated */
  670. unsigned needs_async_data : 1;
  671. /* size of async data needed, if any */
  672. unsigned short async_size;
  673. unsigned work_flags;
  674. };
  675. static const struct io_op_def io_op_defs[] = {
  676. [IORING_OP_NOP] = {},
  677. [IORING_OP_READV] = {
  678. .needs_file = 1,
  679. .unbound_nonreg_file = 1,
  680. .pollin = 1,
  681. .buffer_select = 1,
  682. .needs_async_data = 1,
  683. .async_size = sizeof(struct io_async_rw),
  684. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
  685. },
  686. [IORING_OP_WRITEV] = {
  687. .needs_file = 1,
  688. .hash_reg_file = 1,
  689. .unbound_nonreg_file = 1,
  690. .pollout = 1,
  691. .needs_async_data = 1,
  692. .async_size = sizeof(struct io_async_rw),
  693. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
  694. IO_WQ_WORK_FSIZE,
  695. },
  696. [IORING_OP_FSYNC] = {
  697. .needs_file = 1,
  698. .work_flags = IO_WQ_WORK_BLKCG,
  699. },
  700. [IORING_OP_READ_FIXED] = {
  701. .needs_file = 1,
  702. .unbound_nonreg_file = 1,
  703. .pollin = 1,
  704. .async_size = sizeof(struct io_async_rw),
  705. .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
  706. },
  707. [IORING_OP_WRITE_FIXED] = {
  708. .needs_file = 1,
  709. .hash_reg_file = 1,
  710. .unbound_nonreg_file = 1,
  711. .pollout = 1,
  712. .async_size = sizeof(struct io_async_rw),
  713. .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
  714. IO_WQ_WORK_MM,
  715. },
  716. [IORING_OP_POLL_ADD] = {
  717. .needs_file = 1,
  718. .unbound_nonreg_file = 1,
  719. },
  720. [IORING_OP_POLL_REMOVE] = {},
  721. [IORING_OP_SYNC_FILE_RANGE] = {
  722. .needs_file = 1,
  723. .work_flags = IO_WQ_WORK_BLKCG,
  724. },
  725. [IORING_OP_SENDMSG] = {
  726. .needs_file = 1,
  727. .unbound_nonreg_file = 1,
  728. .pollout = 1,
  729. .needs_async_data = 1,
  730. .async_size = sizeof(struct io_async_msghdr),
  731. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
  732. IO_WQ_WORK_FS,
  733. },
  734. [IORING_OP_RECVMSG] = {
  735. .needs_file = 1,
  736. .unbound_nonreg_file = 1,
  737. .pollin = 1,
  738. .buffer_select = 1,
  739. .needs_async_data = 1,
  740. .async_size = sizeof(struct io_async_msghdr),
  741. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
  742. IO_WQ_WORK_FS,
  743. },
  744. [IORING_OP_TIMEOUT] = {
  745. .needs_async_data = 1,
  746. .async_size = sizeof(struct io_timeout_data),
  747. .work_flags = IO_WQ_WORK_MM,
  748. },
  749. [IORING_OP_TIMEOUT_REMOVE] = {},
  750. [IORING_OP_ACCEPT] = {
  751. .needs_file = 1,
  752. .unbound_nonreg_file = 1,
  753. .pollin = 1,
  754. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
  755. },
  756. [IORING_OP_ASYNC_CANCEL] = {},
  757. [IORING_OP_LINK_TIMEOUT] = {
  758. .needs_async_data = 1,
  759. .async_size = sizeof(struct io_timeout_data),
  760. .work_flags = IO_WQ_WORK_MM,
  761. },
  762. [IORING_OP_CONNECT] = {
  763. .needs_file = 1,
  764. .unbound_nonreg_file = 1,
  765. .pollout = 1,
  766. .needs_async_data = 1,
  767. .async_size = sizeof(struct io_async_connect),
  768. .work_flags = IO_WQ_WORK_MM,
  769. },
  770. [IORING_OP_FALLOCATE] = {
  771. .needs_file = 1,
  772. .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
  773. },
  774. [IORING_OP_OPENAT] = {
  775. .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
  776. IO_WQ_WORK_FS,
  777. },
  778. [IORING_OP_CLOSE] = {
  779. .needs_file = 1,
  780. .needs_file_no_error = 1,
  781. .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
  782. },
  783. [IORING_OP_FILES_UPDATE] = {
  784. .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
  785. },
  786. [IORING_OP_STATX] = {
  787. .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
  788. IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
  789. },
  790. [IORING_OP_READ] = {
  791. .needs_file = 1,
  792. .unbound_nonreg_file = 1,
  793. .pollin = 1,
  794. .buffer_select = 1,
  795. .async_size = sizeof(struct io_async_rw),
  796. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
  797. },
  798. [IORING_OP_WRITE] = {
  799. .needs_file = 1,
  800. .hash_reg_file = 1,
  801. .unbound_nonreg_file = 1,
  802. .pollout = 1,
  803. .async_size = sizeof(struct io_async_rw),
  804. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
  805. IO_WQ_WORK_FSIZE,
  806. },
  807. [IORING_OP_FADVISE] = {
  808. .needs_file = 1,
  809. .work_flags = IO_WQ_WORK_BLKCG,
  810. },
  811. [IORING_OP_MADVISE] = {
  812. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
  813. },
  814. [IORING_OP_SEND] = {
  815. .needs_file = 1,
  816. .unbound_nonreg_file = 1,
  817. .pollout = 1,
  818. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
  819. },
  820. [IORING_OP_RECV] = {
  821. .needs_file = 1,
  822. .unbound_nonreg_file = 1,
  823. .pollin = 1,
  824. .buffer_select = 1,
  825. .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
  826. },
  827. [IORING_OP_OPENAT2] = {
  828. .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
  829. IO_WQ_WORK_BLKCG,
  830. },
  831. [IORING_OP_EPOLL_CTL] = {
  832. .unbound_nonreg_file = 1,
  833. .work_flags = IO_WQ_WORK_FILES,
  834. },
  835. [IORING_OP_SPLICE] = {
  836. .needs_file = 1,
  837. .hash_reg_file = 1,
  838. .unbound_nonreg_file = 1,
  839. .work_flags = IO_WQ_WORK_BLKCG,
  840. },
  841. [IORING_OP_PROVIDE_BUFFERS] = {},
  842. [IORING_OP_REMOVE_BUFFERS] = {},
  843. [IORING_OP_TEE] = {
  844. .needs_file = 1,
  845. .hash_reg_file = 1,
  846. .unbound_nonreg_file = 1,
  847. },
  848. };
  849. enum io_mem_account {
  850. ACCT_LOCKED,
  851. ACCT_PINNED,
  852. };
  853. static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
  854. static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
  855. struct io_ring_ctx *ctx);
  856. static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
  857. struct io_comp_state *cs);
  858. static void io_cqring_fill_event(struct io_kiocb *req, long res);
  859. static void io_put_req(struct io_kiocb *req);
  860. static void io_put_req_deferred(struct io_kiocb *req, int nr);
  861. static void io_double_put_req(struct io_kiocb *req);
  862. static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
  863. static void __io_queue_linked_timeout(struct io_kiocb *req);
  864. static void io_queue_linked_timeout(struct io_kiocb *req);
  865. static int __io_sqe_files_update(struct io_ring_ctx *ctx,
  866. struct io_uring_files_update *ip,
  867. unsigned nr_args);
  868. static void __io_clean_op(struct io_kiocb *req);
  869. static struct file *io_file_get(struct io_submit_state *state,
  870. struct io_kiocb *req, int fd, bool fixed);
  871. static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
  872. static void io_file_put_work(struct work_struct *work);
  873. static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
  874. struct iovec **iovec, struct iov_iter *iter,
  875. bool needs_lock);
  876. static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
  877. const struct iovec *fast_iov,
  878. struct iov_iter *iter, bool force);
  879. static void io_req_drop_files(struct io_kiocb *req);
  880. static void io_req_task_queue(struct io_kiocb *req);
  881. static struct kmem_cache *req_cachep;
  882. static const struct file_operations io_uring_fops;
  883. struct sock *io_uring_get_socket(struct file *file)
  884. {
  885. #if defined(CONFIG_UNIX)
  886. if (file->f_op == &io_uring_fops) {
  887. struct io_ring_ctx *ctx = file->private_data;
  888. return ctx->ring_sock->sk;
  889. }
  890. #endif
  891. return NULL;
  892. }
  893. EXPORT_SYMBOL(io_uring_get_socket);
  894. static inline void io_clean_op(struct io_kiocb *req)
  895. {
  896. if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
  897. __io_clean_op(req);
  898. }
  899. static inline bool __io_match_files(struct io_kiocb *req,
  900. struct files_struct *files)
  901. {
  902. if (req->file && req->file->f_op == &io_uring_fops)
  903. return true;
  904. return ((req->flags & REQ_F_WORK_INITIALIZED) &&
  905. (req->work.flags & IO_WQ_WORK_FILES)) &&
  906. req->work.identity->files == files;
  907. }
  908. static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
  909. {
  910. bool got = percpu_ref_tryget(ref);
  911. /* already at zero, wait for ->release() */
  912. if (!got)
  913. wait_for_completion(compl);
  914. percpu_ref_resurrect(ref);
  915. if (got)
  916. percpu_ref_put(ref);
  917. }
  918. static bool io_match_task(struct io_kiocb *head,
  919. struct task_struct *task,
  920. struct files_struct *files)
  921. {
  922. struct io_kiocb *link;
  923. if (task && head->task != task) {
  924. /* in terms of cancelation, always match if req task is dead */
  925. if (head->task->flags & PF_EXITING)
  926. return true;
  927. return false;
  928. }
  929. if (!files)
  930. return true;
  931. if (__io_match_files(head, files))
  932. return true;
  933. if (head->flags & REQ_F_LINK_HEAD) {
  934. list_for_each_entry(link, &head->link_list, link_list) {
  935. if (__io_match_files(link, files))
  936. return true;
  937. }
  938. }
  939. return false;
  940. }
  941. static void io_sq_thread_drop_mm(void)
  942. {
  943. struct mm_struct *mm = current->mm;
  944. if (mm) {
  945. kthread_unuse_mm(mm);
  946. mmput(mm);
  947. current->mm = NULL;
  948. }
  949. }
  950. static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
  951. {
  952. struct mm_struct *mm;
  953. if (current->flags & PF_EXITING)
  954. return -EFAULT;
  955. if (current->mm)
  956. return 0;
  957. /* Should never happen */
  958. if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
  959. return -EFAULT;
  960. task_lock(ctx->sqo_task);
  961. mm = ctx->sqo_task->mm;
  962. if (unlikely(!mm || !mmget_not_zero(mm)))
  963. mm = NULL;
  964. task_unlock(ctx->sqo_task);
  965. if (mm) {
  966. kthread_use_mm(mm);
  967. return 0;
  968. }
  969. return -EFAULT;
  970. }
  971. static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
  972. struct io_kiocb *req)
  973. {
  974. if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM))
  975. return 0;
  976. return __io_sq_thread_acquire_mm(ctx);
  977. }
  978. static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
  979. struct cgroup_subsys_state **cur_css)
  980. {
  981. #ifdef CONFIG_BLK_CGROUP
  982. /* puts the old one when swapping */
  983. if (*cur_css != ctx->sqo_blkcg_css) {
  984. kthread_associate_blkcg(ctx->sqo_blkcg_css);
  985. *cur_css = ctx->sqo_blkcg_css;
  986. }
  987. #endif
  988. }
  989. static void io_sq_thread_unassociate_blkcg(void)
  990. {
  991. #ifdef CONFIG_BLK_CGROUP
  992. kthread_associate_blkcg(NULL);
  993. #endif
  994. }
  995. static inline void req_set_fail_links(struct io_kiocb *req)
  996. {
  997. if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
  998. req->flags |= REQ_F_FAIL_LINK;
  999. }
  1000. /*
  1001. * None of these are dereferenced, they are simply used to check if any of
  1002. * them have changed. If we're under current and check they are still the
  1003. * same, we're fine to grab references to them for actual out-of-line use.
  1004. */
  1005. static void io_init_identity(struct io_identity *id)
  1006. {
  1007. id->files = current->files;
  1008. id->mm = current->mm;
  1009. #ifdef CONFIG_BLK_CGROUP
  1010. rcu_read_lock();
  1011. id->blkcg_css = blkcg_css();
  1012. rcu_read_unlock();
  1013. #endif
  1014. id->creds = current_cred();
  1015. id->nsproxy = current->nsproxy;
  1016. id->fs = current->fs;
  1017. id->fsize = rlimit(RLIMIT_FSIZE);
  1018. #ifdef CONFIG_AUDIT
  1019. id->loginuid = current->loginuid;
  1020. id->sessionid = current->sessionid;
  1021. #endif
  1022. refcount_set(&id->count, 1);
  1023. }
  1024. static inline void __io_req_init_async(struct io_kiocb *req)
  1025. {
  1026. memset(&req->work, 0, sizeof(req->work));
  1027. req->flags |= REQ_F_WORK_INITIALIZED;
  1028. }
  1029. /*
  1030. * Note: must call io_req_init_async() for the first time you
  1031. * touch any members of io_wq_work.
  1032. */
  1033. static inline void io_req_init_async(struct io_kiocb *req)
  1034. {
  1035. struct io_uring_task *tctx = current->io_uring;
  1036. if (req->flags & REQ_F_WORK_INITIALIZED)
  1037. return;
  1038. __io_req_init_async(req);
  1039. /* Grab a ref if this isn't our static identity */
  1040. req->work.identity = tctx->identity;
  1041. if (tctx->identity != &tctx->__identity)
  1042. refcount_inc(&req->work.identity->count);
  1043. }
  1044. static inline bool io_async_submit(struct io_ring_ctx *ctx)
  1045. {
  1046. return ctx->flags & IORING_SETUP_SQPOLL;
  1047. }
  1048. static void io_ring_ctx_ref_free(struct percpu_ref *ref)
  1049. {
  1050. struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
  1051. complete(&ctx->ref_comp);
  1052. }
  1053. static inline bool io_is_timeout_noseq(struct io_kiocb *req)
  1054. {
  1055. return !req->timeout.off;
  1056. }
  1057. static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
  1058. {
  1059. struct io_ring_ctx *ctx;
  1060. int hash_bits;
  1061. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1062. if (!ctx)
  1063. return NULL;
  1064. ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
  1065. if (!ctx->fallback_req)
  1066. goto err;
  1067. /*
  1068. * Use 5 bits less than the max cq entries, that should give us around
  1069. * 32 entries per hash list if totally full and uniformly spread.
  1070. */
  1071. hash_bits = ilog2(p->cq_entries);
  1072. hash_bits -= 5;
  1073. if (hash_bits <= 0)
  1074. hash_bits = 1;
  1075. ctx->cancel_hash_bits = hash_bits;
  1076. ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
  1077. GFP_KERNEL);
  1078. if (!ctx->cancel_hash)
  1079. goto err;
  1080. __hash_init(ctx->cancel_hash, 1U << hash_bits);
  1081. if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
  1082. PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
  1083. goto err;
  1084. ctx->flags = p->flags;
  1085. init_waitqueue_head(&ctx->sqo_sq_wait);
  1086. INIT_LIST_HEAD(&ctx->sqd_list);
  1087. init_waitqueue_head(&ctx->cq_wait);
  1088. INIT_LIST_HEAD(&ctx->cq_overflow_list);
  1089. init_completion(&ctx->ref_comp);
  1090. init_completion(&ctx->sq_thread_comp);
  1091. xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
  1092. xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
  1093. mutex_init(&ctx->uring_lock);
  1094. init_waitqueue_head(&ctx->wait);
  1095. spin_lock_init(&ctx->completion_lock);
  1096. INIT_LIST_HEAD(&ctx->iopoll_list);
  1097. INIT_LIST_HEAD(&ctx->defer_list);
  1098. INIT_LIST_HEAD(&ctx->timeout_list);
  1099. spin_lock_init(&ctx->inflight_lock);
  1100. INIT_LIST_HEAD(&ctx->inflight_list);
  1101. INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
  1102. init_llist_head(&ctx->file_put_llist);
  1103. return ctx;
  1104. err:
  1105. if (ctx->fallback_req)
  1106. kmem_cache_free(req_cachep, ctx->fallback_req);
  1107. kfree(ctx->cancel_hash);
  1108. kfree(ctx);
  1109. return NULL;
  1110. }
  1111. static bool req_need_defer(struct io_kiocb *req, u32 seq)
  1112. {
  1113. if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
  1114. struct io_ring_ctx *ctx = req->ctx;
  1115. return seq != ctx->cached_cq_tail
  1116. + READ_ONCE(ctx->cached_cq_overflow);
  1117. }
  1118. return false;
  1119. }
  1120. static void __io_commit_cqring(struct io_ring_ctx *ctx)
  1121. {
  1122. struct io_rings *rings = ctx->rings;
  1123. /* order cqe stores with ring update */
  1124. smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
  1125. }
  1126. static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
  1127. {
  1128. if (req->work.identity == &tctx->__identity)
  1129. return;
  1130. if (refcount_dec_and_test(&req->work.identity->count))
  1131. kfree(req->work.identity);
  1132. }
  1133. static void io_req_clean_work(struct io_kiocb *req)
  1134. {
  1135. if (!(req->flags & REQ_F_WORK_INITIALIZED))
  1136. return;
  1137. req->flags &= ~REQ_F_WORK_INITIALIZED;
  1138. if (req->work.flags & IO_WQ_WORK_MM) {
  1139. mmdrop(req->work.identity->mm);
  1140. req->work.flags &= ~IO_WQ_WORK_MM;
  1141. }
  1142. #ifdef CONFIG_BLK_CGROUP
  1143. if (req->work.flags & IO_WQ_WORK_BLKCG) {
  1144. css_put(req->work.identity->blkcg_css);
  1145. req->work.flags &= ~IO_WQ_WORK_BLKCG;
  1146. }
  1147. #endif
  1148. if (req->work.flags & IO_WQ_WORK_CREDS) {
  1149. put_cred(req->work.identity->creds);
  1150. req->work.flags &= ~IO_WQ_WORK_CREDS;
  1151. }
  1152. if (req->work.flags & IO_WQ_WORK_FS) {
  1153. struct fs_struct *fs = req->work.identity->fs;
  1154. spin_lock(&req->work.identity->fs->lock);
  1155. if (--fs->users)
  1156. fs = NULL;
  1157. spin_unlock(&req->work.identity->fs->lock);
  1158. if (fs)
  1159. free_fs_struct(fs);
  1160. req->work.flags &= ~IO_WQ_WORK_FS;
  1161. }
  1162. if (req->flags & REQ_F_INFLIGHT)
  1163. io_req_drop_files(req);
  1164. io_put_identity(req->task->io_uring, req);
  1165. }
  1166. /*
  1167. * Create a private copy of io_identity, since some fields don't match
  1168. * the current context.
  1169. */
  1170. static bool io_identity_cow(struct io_kiocb *req)
  1171. {
  1172. struct io_uring_task *tctx = current->io_uring;
  1173. const struct cred *creds = NULL;
  1174. struct io_identity *id;
  1175. if (req->work.flags & IO_WQ_WORK_CREDS)
  1176. creds = req->work.identity->creds;
  1177. id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
  1178. if (unlikely(!id)) {
  1179. req->work.flags |= IO_WQ_WORK_CANCEL;
  1180. return false;
  1181. }
  1182. /*
  1183. * We can safely just re-init the creds we copied Either the field
  1184. * matches the current one, or we haven't grabbed it yet. The only
  1185. * exception is ->creds, through registered personalities, so handle
  1186. * that one separately.
  1187. */
  1188. io_init_identity(id);
  1189. if (creds)
  1190. id->creds = creds;
  1191. /* add one for this request */
  1192. refcount_inc(&id->count);
  1193. /* drop tctx and req identity references, if needed */
  1194. if (tctx->identity != &tctx->__identity &&
  1195. refcount_dec_and_test(&tctx->identity->count))
  1196. kfree(tctx->identity);
  1197. if (req->work.identity != &tctx->__identity &&
  1198. refcount_dec_and_test(&req->work.identity->count))
  1199. kfree(req->work.identity);
  1200. req->work.identity = id;
  1201. tctx->identity = id;
  1202. return true;
  1203. }
  1204. static bool io_grab_identity(struct io_kiocb *req)
  1205. {
  1206. const struct io_op_def *def = &io_op_defs[req->opcode];
  1207. struct io_identity *id = req->work.identity;
  1208. struct io_ring_ctx *ctx = req->ctx;
  1209. if (def->work_flags & IO_WQ_WORK_FSIZE) {
  1210. if (id->fsize != rlimit(RLIMIT_FSIZE))
  1211. return false;
  1212. req->work.flags |= IO_WQ_WORK_FSIZE;
  1213. }
  1214. #ifdef CONFIG_BLK_CGROUP
  1215. if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
  1216. (def->work_flags & IO_WQ_WORK_BLKCG)) {
  1217. rcu_read_lock();
  1218. if (id->blkcg_css != blkcg_css()) {
  1219. rcu_read_unlock();
  1220. return false;
  1221. }
  1222. /*
  1223. * This should be rare, either the cgroup is dying or the task
  1224. * is moving cgroups. Just punt to root for the handful of ios.
  1225. */
  1226. if (css_tryget_online(id->blkcg_css))
  1227. req->work.flags |= IO_WQ_WORK_BLKCG;
  1228. rcu_read_unlock();
  1229. }
  1230. #endif
  1231. if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
  1232. if (id->creds != current_cred())
  1233. return false;
  1234. get_cred(id->creds);
  1235. req->work.flags |= IO_WQ_WORK_CREDS;
  1236. }
  1237. #ifdef CONFIG_AUDIT
  1238. if (!uid_eq(current->loginuid, id->loginuid) ||
  1239. current->sessionid != id->sessionid)
  1240. return false;
  1241. #endif
  1242. if (!(req->work.flags & IO_WQ_WORK_FS) &&
  1243. (def->work_flags & IO_WQ_WORK_FS)) {
  1244. if (current->fs != id->fs)
  1245. return false;
  1246. spin_lock(&id->fs->lock);
  1247. if (!id->fs->in_exec) {
  1248. id->fs->users++;
  1249. req->work.flags |= IO_WQ_WORK_FS;
  1250. } else {
  1251. req->work.flags |= IO_WQ_WORK_CANCEL;
  1252. }
  1253. spin_unlock(&current->fs->lock);
  1254. }
  1255. if (!(req->work.flags & IO_WQ_WORK_FILES) &&
  1256. (def->work_flags & IO_WQ_WORK_FILES) &&
  1257. !(req->flags & REQ_F_NO_FILE_TABLE)) {
  1258. if (id->files != current->files ||
  1259. id->nsproxy != current->nsproxy)
  1260. return false;
  1261. atomic_inc(&id->files->count);
  1262. get_nsproxy(id->nsproxy);
  1263. if (!(req->flags & REQ_F_INFLIGHT)) {
  1264. req->flags |= REQ_F_INFLIGHT;
  1265. spin_lock_irq(&ctx->inflight_lock);
  1266. list_add(&req->inflight_entry, &ctx->inflight_list);
  1267. spin_unlock_irq(&ctx->inflight_lock);
  1268. }
  1269. req->work.flags |= IO_WQ_WORK_FILES;
  1270. }
  1271. if (!(req->work.flags & IO_WQ_WORK_MM) &&
  1272. (def->work_flags & IO_WQ_WORK_MM)) {
  1273. if (id->mm != current->mm)
  1274. return false;
  1275. mmgrab(id->mm);
  1276. req->work.flags |= IO_WQ_WORK_MM;
  1277. }
  1278. return true;
  1279. }
  1280. static void io_prep_async_work(struct io_kiocb *req)
  1281. {
  1282. const struct io_op_def *def = &io_op_defs[req->opcode];
  1283. struct io_ring_ctx *ctx = req->ctx;
  1284. struct io_identity *id;
  1285. io_req_init_async(req);
  1286. id = req->work.identity;
  1287. if (req->flags & REQ_F_FORCE_ASYNC)
  1288. req->work.flags |= IO_WQ_WORK_CONCURRENT;
  1289. if (req->flags & REQ_F_ISREG) {
  1290. if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
  1291. io_wq_hash_work(&req->work, file_inode(req->file));
  1292. } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
  1293. if (def->unbound_nonreg_file)
  1294. req->work.flags |= IO_WQ_WORK_UNBOUND;
  1295. }
  1296. /* if we fail grabbing identity, we must COW, regrab, and retry */
  1297. if (io_grab_identity(req))
  1298. return;
  1299. if (!io_identity_cow(req))
  1300. return;
  1301. /* can't fail at this point */
  1302. if (!io_grab_identity(req))
  1303. WARN_ON(1);
  1304. }
  1305. static void io_prep_async_link(struct io_kiocb *req)
  1306. {
  1307. struct io_kiocb *cur;
  1308. io_prep_async_work(req);
  1309. if (req->flags & REQ_F_LINK_HEAD)
  1310. list_for_each_entry(cur, &req->link_list, link_list)
  1311. io_prep_async_work(cur);
  1312. }
  1313. static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
  1314. {
  1315. struct io_ring_ctx *ctx = req->ctx;
  1316. struct io_kiocb *link = io_prep_linked_timeout(req);
  1317. trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
  1318. &req->work, req->flags);
  1319. io_wq_enqueue(ctx->io_wq, &req->work);
  1320. return link;
  1321. }
  1322. static void io_queue_async_work(struct io_kiocb *req)
  1323. {
  1324. struct io_kiocb *link;
  1325. /* init ->work of the whole link before punting */
  1326. io_prep_async_link(req);
  1327. link = __io_queue_async_work(req);
  1328. if (link)
  1329. io_queue_linked_timeout(link);
  1330. }
  1331. static void io_kill_timeout(struct io_kiocb *req, int status)
  1332. {
  1333. struct io_timeout_data *io = req->async_data;
  1334. int ret;
  1335. ret = hrtimer_try_to_cancel(&io->timer);
  1336. if (ret != -1) {
  1337. if (status)
  1338. req_set_fail_links(req);
  1339. atomic_set(&req->ctx->cq_timeouts,
  1340. atomic_read(&req->ctx->cq_timeouts) + 1);
  1341. list_del_init(&req->timeout.list);
  1342. io_cqring_fill_event(req, status);
  1343. io_put_req_deferred(req, 1);
  1344. }
  1345. }
  1346. /*
  1347. * Returns true if we found and killed one or more timeouts
  1348. */
  1349. static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
  1350. struct files_struct *files)
  1351. {
  1352. struct io_kiocb *req, *tmp;
  1353. int canceled = 0;
  1354. spin_lock_irq(&ctx->completion_lock);
  1355. list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
  1356. if (io_match_task(req, tsk, files)) {
  1357. io_kill_timeout(req, -ECANCELED);
  1358. canceled++;
  1359. }
  1360. }
  1361. spin_unlock_irq(&ctx->completion_lock);
  1362. return canceled != 0;
  1363. }
  1364. static void __io_queue_deferred(struct io_ring_ctx *ctx)
  1365. {
  1366. do {
  1367. struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
  1368. struct io_defer_entry, list);
  1369. if (req_need_defer(de->req, de->seq))
  1370. break;
  1371. list_del_init(&de->list);
  1372. io_req_task_queue(de->req);
  1373. kfree(de);
  1374. } while (!list_empty(&ctx->defer_list));
  1375. }
  1376. static void io_flush_timeouts(struct io_ring_ctx *ctx)
  1377. {
  1378. struct io_kiocb *req, *tmp;
  1379. u32 seq;
  1380. if (list_empty(&ctx->timeout_list))
  1381. return;
  1382. seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
  1383. list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
  1384. u32 events_needed, events_got;
  1385. if (io_is_timeout_noseq(req))
  1386. break;
  1387. /*
  1388. * Since seq can easily wrap around over time, subtract
  1389. * the last seq at which timeouts were flushed before comparing.
  1390. * Assuming not more than 2^31-1 events have happened since,
  1391. * these subtractions won't have wrapped, so we can check if
  1392. * target is in [last_seq, current_seq] by comparing the two.
  1393. */
  1394. events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
  1395. events_got = seq - ctx->cq_last_tm_flush;
  1396. if (events_got < events_needed)
  1397. break;
  1398. io_kill_timeout(req, 0);
  1399. }
  1400. ctx->cq_last_tm_flush = seq;
  1401. }
  1402. static void io_commit_cqring(struct io_ring_ctx *ctx)
  1403. {
  1404. io_flush_timeouts(ctx);
  1405. __io_commit_cqring(ctx);
  1406. if (unlikely(!list_empty(&ctx->defer_list)))
  1407. __io_queue_deferred(ctx);
  1408. }
  1409. static inline bool io_sqring_full(struct io_ring_ctx *ctx)
  1410. {
  1411. struct io_rings *r = ctx->rings;
  1412. return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
  1413. }
  1414. static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
  1415. {
  1416. struct io_rings *rings = ctx->rings;
  1417. unsigned tail;
  1418. tail = ctx->cached_cq_tail;
  1419. /*
  1420. * writes to the cq entry need to come after reading head; the
  1421. * control dependency is enough as we're using WRITE_ONCE to
  1422. * fill the cq entry
  1423. */
  1424. if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
  1425. return NULL;
  1426. ctx->cached_cq_tail++;
  1427. return &rings->cqes[tail & ctx->cq_mask];
  1428. }
  1429. static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
  1430. {
  1431. if (!ctx->cq_ev_fd)
  1432. return false;
  1433. if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
  1434. return false;
  1435. if (!ctx->eventfd_async)
  1436. return true;
  1437. return io_wq_current_is_worker();
  1438. }
  1439. static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
  1440. {
  1441. if (wq_has_sleeper(&ctx->cq_wait)) {
  1442. wake_up_interruptible(&ctx->cq_wait);
  1443. kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
  1444. }
  1445. if (waitqueue_active(&ctx->wait))
  1446. wake_up(&ctx->wait);
  1447. if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
  1448. wake_up(&ctx->sq_data->wait);
  1449. if (io_should_trigger_evfd(ctx))
  1450. eventfd_signal(ctx->cq_ev_fd, 1);
  1451. }
  1452. static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
  1453. {
  1454. if (list_empty(&ctx->cq_overflow_list)) {
  1455. clear_bit(0, &ctx->sq_check_overflow);
  1456. clear_bit(0, &ctx->cq_check_overflow);
  1457. ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
  1458. }
  1459. }
  1460. /* Returns true if there are no backlogged entries after the flush */
  1461. static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
  1462. struct task_struct *tsk,
  1463. struct files_struct *files)
  1464. {
  1465. struct io_rings *rings = ctx->rings;
  1466. struct io_kiocb *req, *tmp;
  1467. struct io_uring_cqe *cqe;
  1468. unsigned long flags;
  1469. LIST_HEAD(list);
  1470. if (!force) {
  1471. if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
  1472. rings->cq_ring_entries))
  1473. return false;
  1474. }
  1475. spin_lock_irqsave(&ctx->completion_lock, flags);
  1476. cqe = NULL;
  1477. list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
  1478. if (!io_match_task(req, tsk, files))
  1479. continue;
  1480. cqe = io_get_cqring(ctx);
  1481. if (!cqe && !force)
  1482. break;
  1483. list_move(&req->compl.list, &list);
  1484. if (cqe) {
  1485. WRITE_ONCE(cqe->user_data, req->user_data);
  1486. WRITE_ONCE(cqe->res, req->result);
  1487. WRITE_ONCE(cqe->flags, req->compl.cflags);
  1488. } else {
  1489. ctx->cached_cq_overflow++;
  1490. WRITE_ONCE(ctx->rings->cq_overflow,
  1491. ctx->cached_cq_overflow);
  1492. }
  1493. }
  1494. io_commit_cqring(ctx);
  1495. io_cqring_mark_overflow(ctx);
  1496. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  1497. io_cqring_ev_posted(ctx);
  1498. while (!list_empty(&list)) {
  1499. req = list_first_entry(&list, struct io_kiocb, compl.list);
  1500. list_del(&req->compl.list);
  1501. io_put_req(req);
  1502. }
  1503. return cqe != NULL;
  1504. }
  1505. static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
  1506. struct task_struct *tsk,
  1507. struct files_struct *files)
  1508. {
  1509. if (test_bit(0, &ctx->cq_check_overflow)) {
  1510. /* iopoll syncs against uring_lock, not completion_lock */
  1511. if (ctx->flags & IORING_SETUP_IOPOLL)
  1512. mutex_lock(&ctx->uring_lock);
  1513. __io_cqring_overflow_flush(ctx, force, tsk, files);
  1514. if (ctx->flags & IORING_SETUP_IOPOLL)
  1515. mutex_unlock(&ctx->uring_lock);
  1516. }
  1517. }
  1518. static void __io_cqring_fill_event(struct io_kiocb *req, long res,
  1519. unsigned int cflags)
  1520. {
  1521. struct io_ring_ctx *ctx = req->ctx;
  1522. struct io_uring_cqe *cqe;
  1523. trace_io_uring_complete(ctx, req->user_data, res);
  1524. /*
  1525. * If we can't get a cq entry, userspace overflowed the
  1526. * submission (by quite a lot). Increment the overflow count in
  1527. * the ring.
  1528. */
  1529. cqe = io_get_cqring(ctx);
  1530. if (likely(cqe)) {
  1531. WRITE_ONCE(cqe->user_data, req->user_data);
  1532. WRITE_ONCE(cqe->res, res);
  1533. WRITE_ONCE(cqe->flags, cflags);
  1534. } else if (ctx->cq_overflow_flushed ||
  1535. atomic_read(&req->task->io_uring->in_idle)) {
  1536. /*
  1537. * If we're in ring overflow flush mode, or in task cancel mode,
  1538. * then we cannot store the request for later flushing, we need
  1539. * to drop it on the floor.
  1540. */
  1541. ctx->cached_cq_overflow++;
  1542. WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
  1543. } else {
  1544. if (list_empty(&ctx->cq_overflow_list)) {
  1545. set_bit(0, &ctx->sq_check_overflow);
  1546. set_bit(0, &ctx->cq_check_overflow);
  1547. ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
  1548. }
  1549. io_clean_op(req);
  1550. req->result = res;
  1551. req->compl.cflags = cflags;
  1552. refcount_inc(&req->refs);
  1553. list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
  1554. }
  1555. }
  1556. static void io_cqring_fill_event(struct io_kiocb *req, long res)
  1557. {
  1558. __io_cqring_fill_event(req, res, 0);
  1559. }
  1560. static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
  1561. {
  1562. struct io_ring_ctx *ctx = req->ctx;
  1563. unsigned long flags;
  1564. spin_lock_irqsave(&ctx->completion_lock, flags);
  1565. __io_cqring_fill_event(req, res, cflags);
  1566. io_commit_cqring(ctx);
  1567. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  1568. io_cqring_ev_posted(ctx);
  1569. }
  1570. static void io_submit_flush_completions(struct io_comp_state *cs)
  1571. {
  1572. struct io_ring_ctx *ctx = cs->ctx;
  1573. spin_lock_irq(&ctx->completion_lock);
  1574. while (!list_empty(&cs->list)) {
  1575. struct io_kiocb *req;
  1576. req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
  1577. list_del(&req->compl.list);
  1578. __io_cqring_fill_event(req, req->result, req->compl.cflags);
  1579. /*
  1580. * io_free_req() doesn't care about completion_lock unless one
  1581. * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
  1582. * because of a potential deadlock with req->work.fs->lock
  1583. */
  1584. if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
  1585. |REQ_F_WORK_INITIALIZED)) {
  1586. spin_unlock_irq(&ctx->completion_lock);
  1587. io_put_req(req);
  1588. spin_lock_irq(&ctx->completion_lock);
  1589. } else {
  1590. io_put_req(req);
  1591. }
  1592. }
  1593. io_commit_cqring(ctx);
  1594. spin_unlock_irq(&ctx->completion_lock);
  1595. io_cqring_ev_posted(ctx);
  1596. cs->nr = 0;
  1597. }
  1598. static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
  1599. struct io_comp_state *cs)
  1600. {
  1601. if (!cs) {
  1602. io_cqring_add_event(req, res, cflags);
  1603. io_put_req(req);
  1604. } else {
  1605. io_clean_op(req);
  1606. req->result = res;
  1607. req->compl.cflags = cflags;
  1608. list_add_tail(&req->compl.list, &cs->list);
  1609. if (++cs->nr >= 32)
  1610. io_submit_flush_completions(cs);
  1611. }
  1612. }
  1613. static void io_req_complete(struct io_kiocb *req, long res)
  1614. {
  1615. __io_req_complete(req, res, 0, NULL);
  1616. }
  1617. static inline bool io_is_fallback_req(struct io_kiocb *req)
  1618. {
  1619. return req == (struct io_kiocb *)
  1620. ((unsigned long) req->ctx->fallback_req & ~1UL);
  1621. }
  1622. static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
  1623. {
  1624. struct io_kiocb *req;
  1625. req = ctx->fallback_req;
  1626. if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
  1627. return req;
  1628. return NULL;
  1629. }
  1630. static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
  1631. struct io_submit_state *state)
  1632. {
  1633. if (!state->free_reqs) {
  1634. gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
  1635. size_t sz;
  1636. int ret;
  1637. sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
  1638. ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
  1639. /*
  1640. * Bulk alloc is all-or-nothing. If we fail to get a batch,
  1641. * retry single alloc to be on the safe side.
  1642. */
  1643. if (unlikely(ret <= 0)) {
  1644. state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
  1645. if (!state->reqs[0])
  1646. goto fallback;
  1647. ret = 1;
  1648. }
  1649. state->free_reqs = ret;
  1650. }
  1651. state->free_reqs--;
  1652. return state->reqs[state->free_reqs];
  1653. fallback:
  1654. return io_get_fallback_req(ctx);
  1655. }
  1656. static inline void io_put_file(struct io_kiocb *req, struct file *file,
  1657. bool fixed)
  1658. {
  1659. if (fixed)
  1660. percpu_ref_put(req->fixed_file_refs);
  1661. else
  1662. fput(file);
  1663. }
  1664. static void io_dismantle_req(struct io_kiocb *req)
  1665. {
  1666. io_clean_op(req);
  1667. if (req->async_data)
  1668. kfree(req->async_data);
  1669. if (req->file)
  1670. io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
  1671. io_req_clean_work(req);
  1672. }
  1673. static void __io_free_req(struct io_kiocb *req)
  1674. {
  1675. struct io_uring_task *tctx = req->task->io_uring;
  1676. struct io_ring_ctx *ctx = req->ctx;
  1677. io_dismantle_req(req);
  1678. percpu_counter_dec(&tctx->inflight);
  1679. if (atomic_read(&tctx->in_idle))
  1680. wake_up(&tctx->wait);
  1681. put_task_struct(req->task);
  1682. if (likely(!io_is_fallback_req(req)))
  1683. kmem_cache_free(req_cachep, req);
  1684. else
  1685. clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
  1686. percpu_ref_put(&ctx->refs);
  1687. }
  1688. static void io_kill_linked_timeout(struct io_kiocb *req)
  1689. {
  1690. struct io_ring_ctx *ctx = req->ctx;
  1691. struct io_kiocb *link;
  1692. bool cancelled = false;
  1693. unsigned long flags;
  1694. spin_lock_irqsave(&ctx->completion_lock, flags);
  1695. link = list_first_entry_or_null(&req->link_list, struct io_kiocb,
  1696. link_list);
  1697. /*
  1698. * Can happen if a linked timeout fired and link had been like
  1699. * req -> link t-out -> link t-out [-> ...]
  1700. */
  1701. if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
  1702. struct io_timeout_data *io = link->async_data;
  1703. int ret;
  1704. list_del_init(&link->link_list);
  1705. ret = hrtimer_try_to_cancel(&io->timer);
  1706. if (ret != -1) {
  1707. io_cqring_fill_event(link, -ECANCELED);
  1708. io_commit_cqring(ctx);
  1709. cancelled = true;
  1710. }
  1711. }
  1712. req->flags &= ~REQ_F_LINK_TIMEOUT;
  1713. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  1714. if (cancelled) {
  1715. io_cqring_ev_posted(ctx);
  1716. io_put_req(link);
  1717. }
  1718. }
  1719. static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
  1720. {
  1721. struct io_kiocb *nxt;
  1722. /*
  1723. * The list should never be empty when we are called here. But could
  1724. * potentially happen if the chain is messed up, check to be on the
  1725. * safe side.
  1726. */
  1727. if (unlikely(list_empty(&req->link_list)))
  1728. return NULL;
  1729. nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
  1730. list_del_init(&req->link_list);
  1731. if (!list_empty(&nxt->link_list))
  1732. nxt->flags |= REQ_F_LINK_HEAD;
  1733. return nxt;
  1734. }
  1735. /*
  1736. * Called if REQ_F_LINK_HEAD is set, and we fail the head request
  1737. */
  1738. static void io_fail_links(struct io_kiocb *req)
  1739. {
  1740. struct io_ring_ctx *ctx = req->ctx;
  1741. unsigned long flags;
  1742. spin_lock_irqsave(&ctx->completion_lock, flags);
  1743. while (!list_empty(&req->link_list)) {
  1744. struct io_kiocb *link = list_first_entry(&req->link_list,
  1745. struct io_kiocb, link_list);
  1746. list_del_init(&link->link_list);
  1747. trace_io_uring_fail_link(req, link);
  1748. io_cqring_fill_event(link, -ECANCELED);
  1749. /*
  1750. * It's ok to free under spinlock as they're not linked anymore,
  1751. * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
  1752. * work.fs->lock.
  1753. */
  1754. if (link->flags & REQ_F_WORK_INITIALIZED)
  1755. io_put_req_deferred(link, 2);
  1756. else
  1757. io_double_put_req(link);
  1758. }
  1759. io_commit_cqring(ctx);
  1760. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  1761. io_cqring_ev_posted(ctx);
  1762. }
  1763. static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
  1764. {
  1765. req->flags &= ~REQ_F_LINK_HEAD;
  1766. if (req->flags & REQ_F_LINK_TIMEOUT)
  1767. io_kill_linked_timeout(req);
  1768. /*
  1769. * If LINK is set, we have dependent requests in this chain. If we
  1770. * didn't fail this request, queue the first one up, moving any other
  1771. * dependencies to the next request. In case of failure, fail the rest
  1772. * of the chain.
  1773. */
  1774. if (likely(!(req->flags & REQ_F_FAIL_LINK)))
  1775. return io_req_link_next(req);
  1776. io_fail_links(req);
  1777. return NULL;
  1778. }
  1779. static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
  1780. {
  1781. if (likely(!(req->flags & REQ_F_LINK_HEAD)))
  1782. return NULL;
  1783. return __io_req_find_next(req);
  1784. }
  1785. static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
  1786. {
  1787. struct task_struct *tsk = req->task;
  1788. struct io_ring_ctx *ctx = req->ctx;
  1789. enum task_work_notify_mode notify;
  1790. int ret;
  1791. if (tsk->flags & PF_EXITING)
  1792. return -ESRCH;
  1793. /*
  1794. * SQPOLL kernel thread doesn't need notification, just a wakeup. For
  1795. * all other cases, use TWA_SIGNAL unconditionally to ensure we're
  1796. * processing task_work. There's no reliable way to tell if TWA_RESUME
  1797. * will do the job.
  1798. */
  1799. notify = TWA_NONE;
  1800. if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
  1801. notify = TWA_SIGNAL;
  1802. ret = task_work_add(tsk, &req->task_work, notify);
  1803. if (!ret)
  1804. wake_up_process(tsk);
  1805. return ret;
  1806. }
  1807. static void __io_req_task_cancel(struct io_kiocb *req, int error)
  1808. {
  1809. struct io_ring_ctx *ctx = req->ctx;
  1810. spin_lock_irq(&ctx->completion_lock);
  1811. io_cqring_fill_event(req, error);
  1812. io_commit_cqring(ctx);
  1813. spin_unlock_irq(&ctx->completion_lock);
  1814. io_cqring_ev_posted(ctx);
  1815. req_set_fail_links(req);
  1816. io_double_put_req(req);
  1817. }
  1818. static void io_req_task_cancel(struct callback_head *cb)
  1819. {
  1820. struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
  1821. struct io_ring_ctx *ctx = req->ctx;
  1822. mutex_lock(&ctx->uring_lock);
  1823. __io_req_task_cancel(req, -ECANCELED);
  1824. mutex_unlock(&ctx->uring_lock);
  1825. percpu_ref_put(&ctx->refs);
  1826. }
  1827. static void __io_req_task_submit(struct io_kiocb *req)
  1828. {
  1829. struct io_ring_ctx *ctx = req->ctx;
  1830. mutex_lock(&ctx->uring_lock);
  1831. if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx))
  1832. __io_queue_sqe(req, NULL);
  1833. else
  1834. __io_req_task_cancel(req, -EFAULT);
  1835. mutex_unlock(&ctx->uring_lock);
  1836. if (ctx->flags & IORING_SETUP_SQPOLL)
  1837. io_sq_thread_drop_mm();
  1838. }
  1839. static void io_req_task_submit(struct callback_head *cb)
  1840. {
  1841. struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
  1842. struct io_ring_ctx *ctx = req->ctx;
  1843. __io_req_task_submit(req);
  1844. percpu_ref_put(&ctx->refs);
  1845. }
  1846. static void io_req_task_queue(struct io_kiocb *req)
  1847. {
  1848. int ret;
  1849. init_task_work(&req->task_work, io_req_task_submit);
  1850. percpu_ref_get(&req->ctx->refs);
  1851. ret = io_req_task_work_add(req, true);
  1852. if (unlikely(ret)) {
  1853. struct task_struct *tsk;
  1854. init_task_work(&req->task_work, io_req_task_cancel);
  1855. tsk = io_wq_get_task(req->ctx->io_wq);
  1856. task_work_add(tsk, &req->task_work, TWA_NONE);
  1857. wake_up_process(tsk);
  1858. }
  1859. }
  1860. static void io_queue_next(struct io_kiocb *req)
  1861. {
  1862. struct io_kiocb *nxt = io_req_find_next(req);
  1863. if (nxt)
  1864. io_req_task_queue(nxt);
  1865. }
  1866. static void io_free_req(struct io_kiocb *req)
  1867. {
  1868. io_queue_next(req);
  1869. __io_free_req(req);
  1870. }
  1871. struct req_batch {
  1872. void *reqs[IO_IOPOLL_BATCH];
  1873. int to_free;
  1874. struct task_struct *task;
  1875. int task_refs;
  1876. };
  1877. static inline void io_init_req_batch(struct req_batch *rb)
  1878. {
  1879. rb->to_free = 0;
  1880. rb->task_refs = 0;
  1881. rb->task = NULL;
  1882. }
  1883. static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
  1884. struct req_batch *rb)
  1885. {
  1886. kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
  1887. percpu_ref_put_many(&ctx->refs, rb->to_free);
  1888. rb->to_free = 0;
  1889. }
  1890. static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
  1891. struct req_batch *rb)
  1892. {
  1893. if (rb->to_free)
  1894. __io_req_free_batch_flush(ctx, rb);
  1895. if (rb->task) {
  1896. struct io_uring_task *tctx = rb->task->io_uring;
  1897. percpu_counter_sub(&tctx->inflight, rb->task_refs);
  1898. if (atomic_read(&tctx->in_idle))
  1899. wake_up(&tctx->wait);
  1900. put_task_struct_many(rb->task, rb->task_refs);
  1901. rb->task = NULL;
  1902. }
  1903. }
  1904. static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
  1905. {
  1906. if (unlikely(io_is_fallback_req(req))) {
  1907. io_free_req(req);
  1908. return;
  1909. }
  1910. if (req->flags & REQ_F_LINK_HEAD)
  1911. io_queue_next(req);
  1912. if (req->task != rb->task) {
  1913. if (rb->task) {
  1914. struct io_uring_task *tctx = rb->task->io_uring;
  1915. percpu_counter_sub(&tctx->inflight, rb->task_refs);
  1916. if (atomic_read(&tctx->in_idle))
  1917. wake_up(&tctx->wait);
  1918. put_task_struct_many(rb->task, rb->task_refs);
  1919. }
  1920. rb->task = req->task;
  1921. rb->task_refs = 0;
  1922. }
  1923. rb->task_refs++;
  1924. io_dismantle_req(req);
  1925. rb->reqs[rb->to_free++] = req;
  1926. if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
  1927. __io_req_free_batch_flush(req->ctx, rb);
  1928. }
  1929. /*
  1930. * Drop reference to request, return next in chain (if there is one) if this
  1931. * was the last reference to this request.
  1932. */
  1933. static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
  1934. {
  1935. struct io_kiocb *nxt = NULL;
  1936. if (refcount_dec_and_test(&req->refs)) {
  1937. nxt = io_req_find_next(req);
  1938. __io_free_req(req);
  1939. }
  1940. return nxt;
  1941. }
  1942. static void io_put_req(struct io_kiocb *req)
  1943. {
  1944. if (refcount_dec_and_test(&req->refs))
  1945. io_free_req(req);
  1946. }
  1947. static void io_put_req_deferred_cb(struct callback_head *cb)
  1948. {
  1949. struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
  1950. io_free_req(req);
  1951. }
  1952. static void io_free_req_deferred(struct io_kiocb *req)
  1953. {
  1954. int ret;
  1955. init_task_work(&req->task_work, io_put_req_deferred_cb);
  1956. ret = io_req_task_work_add(req, true);
  1957. if (unlikely(ret)) {
  1958. struct task_struct *tsk;
  1959. tsk = io_wq_get_task(req->ctx->io_wq);
  1960. task_work_add(tsk, &req->task_work, TWA_NONE);
  1961. wake_up_process(tsk);
  1962. }
  1963. }
  1964. static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
  1965. {
  1966. if (refcount_sub_and_test(refs, &req->refs))
  1967. io_free_req_deferred(req);
  1968. }
  1969. static struct io_wq_work *io_steal_work(struct io_kiocb *req)
  1970. {
  1971. struct io_kiocb *nxt;
  1972. /*
  1973. * A ref is owned by io-wq in which context we're. So, if that's the
  1974. * last one, it's safe to steal next work. False negatives are Ok,
  1975. * it just will be re-punted async in io_put_work()
  1976. */
  1977. if (refcount_read(&req->refs) != 1)
  1978. return NULL;
  1979. nxt = io_req_find_next(req);
  1980. return nxt ? &nxt->work : NULL;
  1981. }
  1982. static void io_double_put_req(struct io_kiocb *req)
  1983. {
  1984. /* drop both submit and complete references */
  1985. if (refcount_sub_and_test(2, &req->refs))
  1986. io_free_req(req);
  1987. }
  1988. static unsigned io_cqring_events(struct io_ring_ctx *ctx)
  1989. {
  1990. struct io_rings *rings = ctx->rings;
  1991. /* See comment at the top of this file */
  1992. smp_rmb();
  1993. return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
  1994. }
  1995. static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
  1996. {
  1997. struct io_rings *rings = ctx->rings;
  1998. /* make sure SQ entry isn't read before tail */
  1999. return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
  2000. }
  2001. static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
  2002. {
  2003. unsigned int cflags;
  2004. cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
  2005. cflags |= IORING_CQE_F_BUFFER;
  2006. req->flags &= ~REQ_F_BUFFER_SELECTED;
  2007. kfree(kbuf);
  2008. return cflags;
  2009. }
  2010. static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
  2011. {
  2012. struct io_buffer *kbuf;
  2013. kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
  2014. return io_put_kbuf(req, kbuf);
  2015. }
  2016. static inline bool io_run_task_work(void)
  2017. {
  2018. /*
  2019. * Not safe to run on exiting task, and the task_work handling will
  2020. * not add work to such a task.
  2021. */
  2022. if (unlikely(current->flags & PF_EXITING))
  2023. return false;
  2024. if (current->task_works) {
  2025. __set_current_state(TASK_RUNNING);
  2026. task_work_run();
  2027. return true;
  2028. }
  2029. return false;
  2030. }
  2031. static void io_iopoll_queue(struct list_head *again)
  2032. {
  2033. struct io_kiocb *req;
  2034. do {
  2035. req = list_first_entry(again, struct io_kiocb, inflight_entry);
  2036. list_del(&req->inflight_entry);
  2037. __io_complete_rw(req, -EAGAIN, 0, NULL);
  2038. } while (!list_empty(again));
  2039. }
  2040. /*
  2041. * Find and free completed poll iocbs
  2042. */
  2043. static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
  2044. struct list_head *done)
  2045. {
  2046. struct req_batch rb;
  2047. struct io_kiocb *req;
  2048. LIST_HEAD(again);
  2049. /* order with ->result store in io_complete_rw_iopoll() */
  2050. smp_rmb();
  2051. io_init_req_batch(&rb);
  2052. while (!list_empty(done)) {
  2053. int cflags = 0;
  2054. req = list_first_entry(done, struct io_kiocb, inflight_entry);
  2055. if (READ_ONCE(req->result) == -EAGAIN) {
  2056. req->result = 0;
  2057. req->iopoll_completed = 0;
  2058. list_move_tail(&req->inflight_entry, &again);
  2059. continue;
  2060. }
  2061. list_del(&req->inflight_entry);
  2062. if (req->flags & REQ_F_BUFFER_SELECTED)
  2063. cflags = io_put_rw_kbuf(req);
  2064. __io_cqring_fill_event(req, req->result, cflags);
  2065. (*nr_events)++;
  2066. if (refcount_dec_and_test(&req->refs))
  2067. io_req_free_batch(&rb, req);
  2068. }
  2069. io_commit_cqring(ctx);
  2070. if (ctx->flags & IORING_SETUP_SQPOLL)
  2071. io_cqring_ev_posted(ctx);
  2072. io_req_free_batch_finish(ctx, &rb);
  2073. if (!list_empty(&again))
  2074. io_iopoll_queue(&again);
  2075. }
  2076. static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
  2077. long min)
  2078. {
  2079. struct io_kiocb *req, *tmp;
  2080. LIST_HEAD(done);
  2081. bool spin;
  2082. int ret;
  2083. /*
  2084. * Only spin for completions if we don't have multiple devices hanging
  2085. * off our complete list, and we're under the requested amount.
  2086. */
  2087. spin = !ctx->poll_multi_file && *nr_events < min;
  2088. ret = 0;
  2089. list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
  2090. struct kiocb *kiocb = &req->rw.kiocb;
  2091. /*
  2092. * Move completed and retryable entries to our local lists.
  2093. * If we find a request that requires polling, break out
  2094. * and complete those lists first, if we have entries there.
  2095. */
  2096. if (READ_ONCE(req->iopoll_completed)) {
  2097. list_move_tail(&req->inflight_entry, &done);
  2098. continue;
  2099. }
  2100. if (!list_empty(&done))
  2101. break;
  2102. ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
  2103. if (ret < 0)
  2104. break;
  2105. /* iopoll may have completed current req */
  2106. if (READ_ONCE(req->iopoll_completed))
  2107. list_move_tail(&req->inflight_entry, &done);
  2108. if (ret && spin)
  2109. spin = false;
  2110. ret = 0;
  2111. }
  2112. if (!list_empty(&done))
  2113. io_iopoll_complete(ctx, nr_events, &done);
  2114. return ret;
  2115. }
  2116. /*
  2117. * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
  2118. * non-spinning poll check - we'll still enter the driver poll loop, but only
  2119. * as a non-spinning completion check.
  2120. */
  2121. static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
  2122. long min)
  2123. {
  2124. while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
  2125. int ret;
  2126. ret = io_do_iopoll(ctx, nr_events, min);
  2127. if (ret < 0)
  2128. return ret;
  2129. if (*nr_events >= min)
  2130. return 0;
  2131. }
  2132. return 1;
  2133. }
  2134. /*
  2135. * We can't just wait for polled events to come to us, we have to actively
  2136. * find and complete them.
  2137. */
  2138. static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
  2139. {
  2140. if (!(ctx->flags & IORING_SETUP_IOPOLL))
  2141. return;
  2142. mutex_lock(&ctx->uring_lock);
  2143. while (!list_empty(&ctx->iopoll_list)) {
  2144. unsigned int nr_events = 0;
  2145. io_do_iopoll(ctx, &nr_events, 0);
  2146. /* let it sleep and repeat later if can't complete a request */
  2147. if (nr_events == 0)
  2148. break;
  2149. /*
  2150. * Ensure we allow local-to-the-cpu processing to take place,
  2151. * in this case we need to ensure that we reap all events.
  2152. * Also let task_work, etc. to progress by releasing the mutex
  2153. */
  2154. if (need_resched()) {
  2155. mutex_unlock(&ctx->uring_lock);
  2156. cond_resched();
  2157. mutex_lock(&ctx->uring_lock);
  2158. }
  2159. }
  2160. mutex_unlock(&ctx->uring_lock);
  2161. }
  2162. static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
  2163. {
  2164. unsigned int nr_events = 0;
  2165. int iters = 0, ret = 0;
  2166. /*
  2167. * We disallow the app entering submit/complete with polling, but we
  2168. * still need to lock the ring to prevent racing with polled issue
  2169. * that got punted to a workqueue.
  2170. */
  2171. mutex_lock(&ctx->uring_lock);
  2172. do {
  2173. /*
  2174. * Don't enter poll loop if we already have events pending.
  2175. * If we do, we can potentially be spinning for commands that
  2176. * already triggered a CQE (eg in error).
  2177. */
  2178. if (test_bit(0, &ctx->cq_check_overflow))
  2179. __io_cqring_overflow_flush(ctx, false, NULL, NULL);
  2180. if (io_cqring_events(ctx))
  2181. break;
  2182. /*
  2183. * If a submit got punted to a workqueue, we can have the
  2184. * application entering polling for a command before it gets
  2185. * issued. That app will hold the uring_lock for the duration
  2186. * of the poll right here, so we need to take a breather every
  2187. * now and then to ensure that the issue has a chance to add
  2188. * the poll to the issued list. Otherwise we can spin here
  2189. * forever, while the workqueue is stuck trying to acquire the
  2190. * very same mutex.
  2191. */
  2192. if (!(++iters & 7)) {
  2193. mutex_unlock(&ctx->uring_lock);
  2194. io_run_task_work();
  2195. mutex_lock(&ctx->uring_lock);
  2196. }
  2197. ret = io_iopoll_getevents(ctx, &nr_events, min);
  2198. if (ret <= 0)
  2199. break;
  2200. ret = 0;
  2201. } while (min && !nr_events && !need_resched());
  2202. mutex_unlock(&ctx->uring_lock);
  2203. return ret;
  2204. }
  2205. static void kiocb_end_write(struct io_kiocb *req)
  2206. {
  2207. /*
  2208. * Tell lockdep we inherited freeze protection from submission
  2209. * thread.
  2210. */
  2211. if (req->flags & REQ_F_ISREG) {
  2212. struct inode *inode = file_inode(req->file);
  2213. __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
  2214. }
  2215. file_end_write(req->file);
  2216. }
  2217. static void io_complete_rw_common(struct kiocb *kiocb, long res,
  2218. struct io_comp_state *cs)
  2219. {
  2220. struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
  2221. int cflags = 0;
  2222. if (kiocb->ki_flags & IOCB_WRITE)
  2223. kiocb_end_write(req);
  2224. if (res != req->result)
  2225. req_set_fail_links(req);
  2226. if (req->flags & REQ_F_BUFFER_SELECTED)
  2227. cflags = io_put_rw_kbuf(req);
  2228. __io_req_complete(req, res, cflags, cs);
  2229. }
  2230. #ifdef CONFIG_BLOCK
  2231. static bool io_resubmit_prep(struct io_kiocb *req, int error)
  2232. {
  2233. struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
  2234. ssize_t ret = -ECANCELED;
  2235. struct iov_iter iter;
  2236. int rw;
  2237. if (error) {
  2238. ret = error;
  2239. goto end_req;
  2240. }
  2241. switch (req->opcode) {
  2242. case IORING_OP_READV:
  2243. case IORING_OP_READ_FIXED:
  2244. case IORING_OP_READ:
  2245. rw = READ;
  2246. break;
  2247. case IORING_OP_WRITEV:
  2248. case IORING_OP_WRITE_FIXED:
  2249. case IORING_OP_WRITE:
  2250. rw = WRITE;
  2251. break;
  2252. default:
  2253. printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
  2254. req->opcode);
  2255. goto end_req;
  2256. }
  2257. if (!req->async_data) {
  2258. ret = io_import_iovec(rw, req, &iovec, &iter, false);
  2259. if (ret < 0)
  2260. goto end_req;
  2261. ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
  2262. if (!ret)
  2263. return true;
  2264. kfree(iovec);
  2265. } else {
  2266. return true;
  2267. }
  2268. end_req:
  2269. req_set_fail_links(req);
  2270. return false;
  2271. }
  2272. #endif
  2273. static bool io_rw_reissue(struct io_kiocb *req, long res)
  2274. {
  2275. #ifdef CONFIG_BLOCK
  2276. umode_t mode = file_inode(req->file)->i_mode;
  2277. int ret;
  2278. if (!S_ISBLK(mode) && !S_ISREG(mode))
  2279. return false;
  2280. if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
  2281. return false;
  2282. /*
  2283. * If ref is dying, we might be running poll reap from the exit work.
  2284. * Don't attempt to reissue from that path, just let it fail with
  2285. * -EAGAIN.
  2286. */
  2287. if (percpu_ref_is_dying(&req->ctx->refs))
  2288. return false;
  2289. ret = io_sq_thread_acquire_mm(req->ctx, req);
  2290. if (io_resubmit_prep(req, ret)) {
  2291. refcount_inc(&req->refs);
  2292. io_queue_async_work(req);
  2293. return true;
  2294. }
  2295. #endif
  2296. return false;
  2297. }
  2298. static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
  2299. struct io_comp_state *cs)
  2300. {
  2301. if (!io_rw_reissue(req, res))
  2302. io_complete_rw_common(&req->rw.kiocb, res, cs);
  2303. }
  2304. static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
  2305. {
  2306. struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
  2307. __io_complete_rw(req, res, res2, NULL);
  2308. }
  2309. static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
  2310. {
  2311. struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
  2312. if (kiocb->ki_flags & IOCB_WRITE)
  2313. kiocb_end_write(req);
  2314. if (res != -EAGAIN && res != req->result)
  2315. req_set_fail_links(req);
  2316. WRITE_ONCE(req->result, res);
  2317. /* order with io_poll_complete() checking ->result */
  2318. smp_wmb();
  2319. WRITE_ONCE(req->iopoll_completed, 1);
  2320. }
  2321. /*
  2322. * After the iocb has been issued, it's safe to be found on the poll list.
  2323. * Adding the kiocb to the list AFTER submission ensures that we don't
  2324. * find it from a io_iopoll_getevents() thread before the issuer is done
  2325. * accessing the kiocb cookie.
  2326. */
  2327. static void io_iopoll_req_issued(struct io_kiocb *req)
  2328. {
  2329. struct io_ring_ctx *ctx = req->ctx;
  2330. /*
  2331. * Track whether we have multiple files in our lists. This will impact
  2332. * how we do polling eventually, not spinning if we're on potentially
  2333. * different devices.
  2334. */
  2335. if (list_empty(&ctx->iopoll_list)) {
  2336. ctx->poll_multi_file = false;
  2337. } else if (!ctx->poll_multi_file) {
  2338. struct io_kiocb *list_req;
  2339. list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
  2340. inflight_entry);
  2341. if (list_req->file != req->file)
  2342. ctx->poll_multi_file = true;
  2343. }
  2344. /*
  2345. * For fast devices, IO may have already completed. If it has, add
  2346. * it to the front so we find it first.
  2347. */
  2348. if (READ_ONCE(req->iopoll_completed))
  2349. list_add(&req->inflight_entry, &ctx->iopoll_list);
  2350. else
  2351. list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
  2352. if ((ctx->flags & IORING_SETUP_SQPOLL) &&
  2353. wq_has_sleeper(&ctx->sq_data->wait))
  2354. wake_up(&ctx->sq_data->wait);
  2355. }
  2356. static void __io_state_file_put(struct io_submit_state *state)
  2357. {
  2358. if (state->has_refs)
  2359. fput_many(state->file, state->has_refs);
  2360. state->file = NULL;
  2361. }
  2362. static inline void io_state_file_put(struct io_submit_state *state)
  2363. {
  2364. if (state->file)
  2365. __io_state_file_put(state);
  2366. }
  2367. /*
  2368. * Get as many references to a file as we have IOs left in this submission,
  2369. * assuming most submissions are for one file, or at least that each file
  2370. * has more than one submission.
  2371. */
  2372. static struct file *__io_file_get(struct io_submit_state *state, int fd)
  2373. {
  2374. if (!state)
  2375. return fget(fd);
  2376. if (state->file) {
  2377. if (state->fd == fd) {
  2378. state->has_refs--;
  2379. return state->file;
  2380. }
  2381. __io_state_file_put(state);
  2382. }
  2383. state->file = fget_many(fd, state->ios_left);
  2384. if (!state->file)
  2385. return NULL;
  2386. state->fd = fd;
  2387. state->has_refs = state->ios_left - 1;
  2388. return state->file;
  2389. }
  2390. static bool io_bdev_nowait(struct block_device *bdev)
  2391. {
  2392. #ifdef CONFIG_BLOCK
  2393. return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
  2394. #else
  2395. return true;
  2396. #endif
  2397. }
  2398. /*
  2399. * If we tracked the file through the SCM inflight mechanism, we could support
  2400. * any file. For now, just ensure that anything potentially problematic is done
  2401. * inline.
  2402. */
  2403. static bool io_file_supports_async(struct file *file, int rw)
  2404. {
  2405. umode_t mode = file_inode(file)->i_mode;
  2406. if (S_ISBLK(mode)) {
  2407. if (io_bdev_nowait(file->f_inode->i_bdev))
  2408. return true;
  2409. return false;
  2410. }
  2411. if (S_ISSOCK(mode))
  2412. return true;
  2413. if (S_ISREG(mode)) {
  2414. if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
  2415. file->f_op != &io_uring_fops)
  2416. return true;
  2417. return false;
  2418. }
  2419. /* any ->read/write should understand O_NONBLOCK */
  2420. if (file->f_flags & O_NONBLOCK)
  2421. return true;
  2422. if (!(file->f_mode & FMODE_NOWAIT))
  2423. return false;
  2424. if (rw == READ)
  2425. return file->f_op->read_iter != NULL;
  2426. return file->f_op->write_iter != NULL;
  2427. }
  2428. static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  2429. {
  2430. struct io_ring_ctx *ctx = req->ctx;
  2431. struct kiocb *kiocb = &req->rw.kiocb;
  2432. unsigned ioprio;
  2433. int ret;
  2434. if (S_ISREG(file_inode(req->file)->i_mode))
  2435. req->flags |= REQ_F_ISREG;
  2436. kiocb->ki_pos = READ_ONCE(sqe->off);
  2437. if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
  2438. req->flags |= REQ_F_CUR_POS;
  2439. kiocb->ki_pos = req->file->f_pos;
  2440. }
  2441. kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
  2442. kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
  2443. ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
  2444. if (unlikely(ret))
  2445. return ret;
  2446. ioprio = READ_ONCE(sqe->ioprio);
  2447. if (ioprio) {
  2448. ret = ioprio_check_cap(ioprio);
  2449. if (ret)
  2450. return ret;
  2451. kiocb->ki_ioprio = ioprio;
  2452. } else
  2453. kiocb->ki_ioprio = get_current_ioprio();
  2454. /* don't allow async punt if RWF_NOWAIT was requested */
  2455. if (kiocb->ki_flags & IOCB_NOWAIT)
  2456. req->flags |= REQ_F_NOWAIT;
  2457. if (ctx->flags & IORING_SETUP_IOPOLL) {
  2458. if (!(kiocb->ki_flags & IOCB_DIRECT) ||
  2459. !kiocb->ki_filp->f_op->iopoll)
  2460. return -EOPNOTSUPP;
  2461. kiocb->ki_flags |= IOCB_HIPRI;
  2462. kiocb->ki_complete = io_complete_rw_iopoll;
  2463. req->iopoll_completed = 0;
  2464. } else {
  2465. if (kiocb->ki_flags & IOCB_HIPRI)
  2466. return -EINVAL;
  2467. kiocb->ki_complete = io_complete_rw;
  2468. }
  2469. req->rw.addr = READ_ONCE(sqe->addr);
  2470. req->rw.len = READ_ONCE(sqe->len);
  2471. req->buf_index = READ_ONCE(sqe->buf_index);
  2472. return 0;
  2473. }
  2474. static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
  2475. {
  2476. switch (ret) {
  2477. case -EIOCBQUEUED:
  2478. break;
  2479. case -ERESTARTSYS:
  2480. case -ERESTARTNOINTR:
  2481. case -ERESTARTNOHAND:
  2482. case -ERESTART_RESTARTBLOCK:
  2483. /*
  2484. * We can't just restart the syscall, since previously
  2485. * submitted sqes may already be in progress. Just fail this
  2486. * IO with EINTR.
  2487. */
  2488. ret = -EINTR;
  2489. fallthrough;
  2490. default:
  2491. kiocb->ki_complete(kiocb, ret, 0);
  2492. }
  2493. }
  2494. static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
  2495. struct io_comp_state *cs)
  2496. {
  2497. struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
  2498. struct io_async_rw *io = req->async_data;
  2499. /* add previously done IO, if any */
  2500. if (io && io->bytes_done > 0) {
  2501. if (ret < 0)
  2502. ret = io->bytes_done;
  2503. else
  2504. ret += io->bytes_done;
  2505. }
  2506. if (req->flags & REQ_F_CUR_POS)
  2507. req->file->f_pos = kiocb->ki_pos;
  2508. if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
  2509. __io_complete_rw(req, ret, 0, cs);
  2510. else
  2511. io_rw_done(kiocb, ret);
  2512. }
  2513. static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
  2514. struct iov_iter *iter)
  2515. {
  2516. struct io_ring_ctx *ctx = req->ctx;
  2517. size_t len = req->rw.len;
  2518. struct io_mapped_ubuf *imu;
  2519. u16 index, buf_index = req->buf_index;
  2520. size_t offset;
  2521. u64 buf_addr;
  2522. if (unlikely(buf_index >= ctx->nr_user_bufs))
  2523. return -EFAULT;
  2524. index = array_index_nospec(buf_index, ctx->nr_user_bufs);
  2525. imu = &ctx->user_bufs[index];
  2526. buf_addr = req->rw.addr;
  2527. /* overflow */
  2528. if (buf_addr + len < buf_addr)
  2529. return -EFAULT;
  2530. /* not inside the mapped region */
  2531. if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
  2532. return -EFAULT;
  2533. /*
  2534. * May not be a start of buffer, set size appropriately
  2535. * and advance us to the beginning.
  2536. */
  2537. offset = buf_addr - imu->ubuf;
  2538. iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
  2539. if (offset) {
  2540. /*
  2541. * Don't use iov_iter_advance() here, as it's really slow for
  2542. * using the latter parts of a big fixed buffer - it iterates
  2543. * over each segment manually. We can cheat a bit here, because
  2544. * we know that:
  2545. *
  2546. * 1) it's a BVEC iter, we set it up
  2547. * 2) all bvecs are PAGE_SIZE in size, except potentially the
  2548. * first and last bvec
  2549. *
  2550. * So just find our index, and adjust the iterator afterwards.
  2551. * If the offset is within the first bvec (or the whole first
  2552. * bvec, just use iov_iter_advance(). This makes it easier
  2553. * since we can just skip the first segment, which may not
  2554. * be PAGE_SIZE aligned.
  2555. */
  2556. const struct bio_vec *bvec = imu->bvec;
  2557. if (offset <= bvec->bv_len) {
  2558. iov_iter_advance(iter, offset);
  2559. } else {
  2560. unsigned long seg_skip;
  2561. /* skip first vec */
  2562. offset -= bvec->bv_len;
  2563. seg_skip = 1 + (offset >> PAGE_SHIFT);
  2564. iter->bvec = bvec + seg_skip;
  2565. iter->nr_segs -= seg_skip;
  2566. iter->count -= bvec->bv_len + offset;
  2567. iter->iov_offset = offset & ~PAGE_MASK;
  2568. }
  2569. }
  2570. return len;
  2571. }
  2572. static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
  2573. {
  2574. if (needs_lock)
  2575. mutex_unlock(&ctx->uring_lock);
  2576. }
  2577. static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
  2578. {
  2579. /*
  2580. * "Normal" inline submissions always hold the uring_lock, since we
  2581. * grab it from the system call. Same is true for the SQPOLL offload.
  2582. * The only exception is when we've detached the request and issue it
  2583. * from an async worker thread, grab the lock for that case.
  2584. */
  2585. if (needs_lock)
  2586. mutex_lock(&ctx->uring_lock);
  2587. }
  2588. static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
  2589. int bgid, struct io_buffer *kbuf,
  2590. bool needs_lock)
  2591. {
  2592. struct io_buffer *head;
  2593. if (req->flags & REQ_F_BUFFER_SELECTED)
  2594. return kbuf;
  2595. io_ring_submit_lock(req->ctx, needs_lock);
  2596. lockdep_assert_held(&req->ctx->uring_lock);
  2597. head = xa_load(&req->ctx->io_buffers, bgid);
  2598. if (head) {
  2599. if (!list_empty(&head->list)) {
  2600. kbuf = list_last_entry(&head->list, struct io_buffer,
  2601. list);
  2602. list_del(&kbuf->list);
  2603. } else {
  2604. kbuf = head;
  2605. xa_erase(&req->ctx->io_buffers, bgid);
  2606. }
  2607. if (*len > kbuf->len)
  2608. *len = kbuf->len;
  2609. } else {
  2610. kbuf = ERR_PTR(-ENOBUFS);
  2611. }
  2612. io_ring_submit_unlock(req->ctx, needs_lock);
  2613. return kbuf;
  2614. }
  2615. static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
  2616. bool needs_lock)
  2617. {
  2618. struct io_buffer *kbuf;
  2619. u16 bgid;
  2620. kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
  2621. bgid = req->buf_index;
  2622. kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
  2623. if (IS_ERR(kbuf))
  2624. return kbuf;
  2625. req->rw.addr = (u64) (unsigned long) kbuf;
  2626. req->flags |= REQ_F_BUFFER_SELECTED;
  2627. return u64_to_user_ptr(kbuf->addr);
  2628. }
  2629. #ifdef CONFIG_COMPAT
  2630. static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
  2631. bool needs_lock)
  2632. {
  2633. struct compat_iovec __user *uiov;
  2634. compat_ssize_t clen;
  2635. void __user *buf;
  2636. ssize_t len;
  2637. uiov = u64_to_user_ptr(req->rw.addr);
  2638. if (!access_ok(uiov, sizeof(*uiov)))
  2639. return -EFAULT;
  2640. if (__get_user(clen, &uiov->iov_len))
  2641. return -EFAULT;
  2642. if (clen < 0)
  2643. return -EINVAL;
  2644. len = clen;
  2645. buf = io_rw_buffer_select(req, &len, needs_lock);
  2646. if (IS_ERR(buf))
  2647. return PTR_ERR(buf);
  2648. iov[0].iov_base = buf;
  2649. iov[0].iov_len = (compat_size_t) len;
  2650. return 0;
  2651. }
  2652. #endif
  2653. static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
  2654. bool needs_lock)
  2655. {
  2656. struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
  2657. void __user *buf;
  2658. ssize_t len;
  2659. if (copy_from_user(iov, uiov, sizeof(*uiov)))
  2660. return -EFAULT;
  2661. len = iov[0].iov_len;
  2662. if (len < 0)
  2663. return -EINVAL;
  2664. buf = io_rw_buffer_select(req, &len, needs_lock);
  2665. if (IS_ERR(buf))
  2666. return PTR_ERR(buf);
  2667. iov[0].iov_base = buf;
  2668. iov[0].iov_len = len;
  2669. return 0;
  2670. }
  2671. static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
  2672. bool needs_lock)
  2673. {
  2674. if (req->flags & REQ_F_BUFFER_SELECTED) {
  2675. struct io_buffer *kbuf;
  2676. kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
  2677. iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
  2678. iov[0].iov_len = kbuf->len;
  2679. return 0;
  2680. }
  2681. if (req->rw.len != 1)
  2682. return -EINVAL;
  2683. #ifdef CONFIG_COMPAT
  2684. if (req->ctx->compat)
  2685. return io_compat_import(req, iov, needs_lock);
  2686. #endif
  2687. return __io_iov_buffer_select(req, iov, needs_lock);
  2688. }
  2689. static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
  2690. struct iovec **iovec, struct iov_iter *iter,
  2691. bool needs_lock)
  2692. {
  2693. void __user *buf = u64_to_user_ptr(req->rw.addr);
  2694. size_t sqe_len = req->rw.len;
  2695. ssize_t ret;
  2696. u8 opcode;
  2697. opcode = req->opcode;
  2698. if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
  2699. *iovec = NULL;
  2700. return io_import_fixed(req, rw, iter);
  2701. }
  2702. /* buffer index only valid with fixed read/write, or buffer select */
  2703. if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
  2704. return -EINVAL;
  2705. if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
  2706. if (req->flags & REQ_F_BUFFER_SELECT) {
  2707. buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
  2708. if (IS_ERR(buf))
  2709. return PTR_ERR(buf);
  2710. req->rw.len = sqe_len;
  2711. }
  2712. ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
  2713. *iovec = NULL;
  2714. return ret;
  2715. }
  2716. if (req->flags & REQ_F_BUFFER_SELECT) {
  2717. ret = io_iov_buffer_select(req, *iovec, needs_lock);
  2718. if (!ret) {
  2719. ret = (*iovec)->iov_len;
  2720. iov_iter_init(iter, rw, *iovec, 1, ret);
  2721. }
  2722. *iovec = NULL;
  2723. return ret;
  2724. }
  2725. return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
  2726. req->ctx->compat);
  2727. }
  2728. static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
  2729. struct iovec **iovec, struct iov_iter *iter,
  2730. bool needs_lock)
  2731. {
  2732. struct io_async_rw *iorw = req->async_data;
  2733. if (!iorw)
  2734. return __io_import_iovec(rw, req, iovec, iter, needs_lock);
  2735. *iovec = NULL;
  2736. return 0;
  2737. }
  2738. static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
  2739. {
  2740. return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
  2741. }
  2742. /*
  2743. * For files that don't have ->read_iter() and ->write_iter(), handle them
  2744. * by looping over ->read() or ->write() manually.
  2745. */
  2746. static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
  2747. {
  2748. struct kiocb *kiocb = &req->rw.kiocb;
  2749. struct file *file = req->file;
  2750. ssize_t ret = 0;
  2751. /*
  2752. * Don't support polled IO through this interface, and we can't
  2753. * support non-blocking either. For the latter, this just causes
  2754. * the kiocb to be handled from an async context.
  2755. */
  2756. if (kiocb->ki_flags & IOCB_HIPRI)
  2757. return -EOPNOTSUPP;
  2758. if (kiocb->ki_flags & IOCB_NOWAIT)
  2759. return -EAGAIN;
  2760. while (iov_iter_count(iter)) {
  2761. struct iovec iovec;
  2762. ssize_t nr;
  2763. if (!iov_iter_is_bvec(iter)) {
  2764. iovec = iov_iter_iovec(iter);
  2765. } else {
  2766. iovec.iov_base = u64_to_user_ptr(req->rw.addr);
  2767. iovec.iov_len = req->rw.len;
  2768. }
  2769. if (rw == READ) {
  2770. nr = file->f_op->read(file, iovec.iov_base,
  2771. iovec.iov_len, io_kiocb_ppos(kiocb));
  2772. } else {
  2773. nr = file->f_op->write(file, iovec.iov_base,
  2774. iovec.iov_len, io_kiocb_ppos(kiocb));
  2775. }
  2776. if (nr < 0) {
  2777. if (!ret)
  2778. ret = nr;
  2779. break;
  2780. }
  2781. ret += nr;
  2782. if (!iov_iter_is_bvec(iter)) {
  2783. iov_iter_advance(iter, nr);
  2784. } else {
  2785. req->rw.addr += nr;
  2786. req->rw.len -= nr;
  2787. if (!req->rw.len)
  2788. break;
  2789. }
  2790. if (nr != iovec.iov_len)
  2791. break;
  2792. }
  2793. return ret;
  2794. }
  2795. static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
  2796. const struct iovec *fast_iov, struct iov_iter *iter)
  2797. {
  2798. struct io_async_rw *rw = req->async_data;
  2799. memcpy(&rw->iter, iter, sizeof(*iter));
  2800. rw->free_iovec = iovec;
  2801. rw->bytes_done = 0;
  2802. /* can only be fixed buffers, no need to do anything */
  2803. if (iov_iter_is_bvec(iter))
  2804. return;
  2805. if (!iovec) {
  2806. unsigned iov_off = 0;
  2807. rw->iter.iov = rw->fast_iov;
  2808. if (iter->iov != fast_iov) {
  2809. iov_off = iter->iov - fast_iov;
  2810. rw->iter.iov += iov_off;
  2811. }
  2812. if (rw->fast_iov != fast_iov)
  2813. memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
  2814. sizeof(struct iovec) * iter->nr_segs);
  2815. } else {
  2816. req->flags |= REQ_F_NEED_CLEANUP;
  2817. }
  2818. }
  2819. static inline int __io_alloc_async_data(struct io_kiocb *req)
  2820. {
  2821. WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
  2822. req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
  2823. return req->async_data == NULL;
  2824. }
  2825. static int io_alloc_async_data(struct io_kiocb *req)
  2826. {
  2827. if (!io_op_defs[req->opcode].needs_async_data)
  2828. return 0;
  2829. return __io_alloc_async_data(req);
  2830. }
  2831. static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
  2832. const struct iovec *fast_iov,
  2833. struct iov_iter *iter, bool force)
  2834. {
  2835. if (!force && !io_op_defs[req->opcode].needs_async_data)
  2836. return 0;
  2837. if (!req->async_data) {
  2838. if (__io_alloc_async_data(req))
  2839. return -ENOMEM;
  2840. io_req_map_rw(req, iovec, fast_iov, iter);
  2841. }
  2842. return 0;
  2843. }
  2844. static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
  2845. {
  2846. struct io_async_rw *iorw = req->async_data;
  2847. struct iovec *iov = iorw->fast_iov;
  2848. ssize_t ret;
  2849. ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
  2850. if (unlikely(ret < 0))
  2851. return ret;
  2852. iorw->bytes_done = 0;
  2853. iorw->free_iovec = iov;
  2854. if (iov)
  2855. req->flags |= REQ_F_NEED_CLEANUP;
  2856. return 0;
  2857. }
  2858. static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  2859. {
  2860. ssize_t ret;
  2861. ret = io_prep_rw(req, sqe);
  2862. if (ret)
  2863. return ret;
  2864. if (unlikely(!(req->file->f_mode & FMODE_READ)))
  2865. return -EBADF;
  2866. /* either don't need iovec imported or already have it */
  2867. if (!req->async_data)
  2868. return 0;
  2869. return io_rw_prep_async(req, READ);
  2870. }
  2871. /*
  2872. * This is our waitqueue callback handler, registered through lock_page_async()
  2873. * when we initially tried to do the IO with the iocb armed our waitqueue.
  2874. * This gets called when the page is unlocked, and we generally expect that to
  2875. * happen when the page IO is completed and the page is now uptodate. This will
  2876. * queue a task_work based retry of the operation, attempting to copy the data
  2877. * again. If the latter fails because the page was NOT uptodate, then we will
  2878. * do a thread based blocking retry of the operation. That's the unexpected
  2879. * slow path.
  2880. */
  2881. static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
  2882. int sync, void *arg)
  2883. {
  2884. struct wait_page_queue *wpq;
  2885. struct io_kiocb *req = wait->private;
  2886. struct wait_page_key *key = arg;
  2887. int ret;
  2888. wpq = container_of(wait, struct wait_page_queue, wait);
  2889. if (!wake_page_match(wpq, key))
  2890. return 0;
  2891. req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
  2892. list_del_init(&wait->entry);
  2893. init_task_work(&req->task_work, io_req_task_submit);
  2894. percpu_ref_get(&req->ctx->refs);
  2895. /* submit ref gets dropped, acquire a new one */
  2896. refcount_inc(&req->refs);
  2897. ret = io_req_task_work_add(req, true);
  2898. if (unlikely(ret)) {
  2899. struct task_struct *tsk;
  2900. /* queue just for cancelation */
  2901. init_task_work(&req->task_work, io_req_task_cancel);
  2902. tsk = io_wq_get_task(req->ctx->io_wq);
  2903. task_work_add(tsk, &req->task_work, TWA_NONE);
  2904. wake_up_process(tsk);
  2905. }
  2906. return 1;
  2907. }
  2908. /*
  2909. * This controls whether a given IO request should be armed for async page
  2910. * based retry. If we return false here, the request is handed to the async
  2911. * worker threads for retry. If we're doing buffered reads on a regular file,
  2912. * we prepare a private wait_page_queue entry and retry the operation. This
  2913. * will either succeed because the page is now uptodate and unlocked, or it
  2914. * will register a callback when the page is unlocked at IO completion. Through
  2915. * that callback, io_uring uses task_work to setup a retry of the operation.
  2916. * That retry will attempt the buffered read again. The retry will generally
  2917. * succeed, or in rare cases where it fails, we then fall back to using the
  2918. * async worker threads for a blocking retry.
  2919. */
  2920. static bool io_rw_should_retry(struct io_kiocb *req)
  2921. {
  2922. struct io_async_rw *rw = req->async_data;
  2923. struct wait_page_queue *wait = &rw->wpq;
  2924. struct kiocb *kiocb = &req->rw.kiocb;
  2925. /* never retry for NOWAIT, we just complete with -EAGAIN */
  2926. if (req->flags & REQ_F_NOWAIT)
  2927. return false;
  2928. /* Only for buffered IO */
  2929. if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
  2930. return false;
  2931. /*
  2932. * just use poll if we can, and don't attempt if the fs doesn't
  2933. * support callback based unlocks
  2934. */
  2935. if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
  2936. return false;
  2937. wait->wait.func = io_async_buf_func;
  2938. wait->wait.private = req;
  2939. wait->wait.flags = 0;
  2940. INIT_LIST_HEAD(&wait->wait.entry);
  2941. kiocb->ki_flags |= IOCB_WAITQ;
  2942. kiocb->ki_flags &= ~IOCB_NOWAIT;
  2943. kiocb->ki_waitq = wait;
  2944. return true;
  2945. }
  2946. static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
  2947. {
  2948. if (req->file->f_op->read_iter)
  2949. return call_read_iter(req->file, &req->rw.kiocb, iter);
  2950. else if (req->file->f_op->read)
  2951. return loop_rw_iter(READ, req, iter);
  2952. else
  2953. return -EINVAL;
  2954. }
  2955. static int io_read(struct io_kiocb *req, bool force_nonblock,
  2956. struct io_comp_state *cs)
  2957. {
  2958. struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
  2959. struct kiocb *kiocb = &req->rw.kiocb;
  2960. struct iov_iter __iter, *iter = &__iter;
  2961. struct io_async_rw *rw = req->async_data;
  2962. ssize_t io_size, ret, ret2;
  2963. bool no_async;
  2964. if (rw)
  2965. iter = &rw->iter;
  2966. ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
  2967. if (ret < 0)
  2968. return ret;
  2969. io_size = iov_iter_count(iter);
  2970. req->result = io_size;
  2971. ret = 0;
  2972. /* Ensure we clear previously set non-block flag */
  2973. if (!force_nonblock)
  2974. kiocb->ki_flags &= ~IOCB_NOWAIT;
  2975. else
  2976. kiocb->ki_flags |= IOCB_NOWAIT;
  2977. /* If the file doesn't support async, just async punt */
  2978. no_async = force_nonblock && !io_file_supports_async(req->file, READ);
  2979. if (no_async)
  2980. goto copy_iov;
  2981. ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
  2982. if (unlikely(ret))
  2983. goto out_free;
  2984. ret = io_iter_do_read(req, iter);
  2985. if (!ret) {
  2986. goto done;
  2987. } else if (ret == -EIOCBQUEUED) {
  2988. ret = 0;
  2989. goto out_free;
  2990. } else if (ret == -EAGAIN) {
  2991. /* IOPOLL retry should happen for io-wq threads */
  2992. if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
  2993. goto done;
  2994. /* no retry on NONBLOCK marked file */
  2995. if (req->file->f_flags & O_NONBLOCK)
  2996. goto done;
  2997. /* some cases will consume bytes even on error returns */
  2998. iov_iter_revert(iter, io_size - iov_iter_count(iter));
  2999. ret = 0;
  3000. goto copy_iov;
  3001. } else if (ret < 0) {
  3002. /* make sure -ERESTARTSYS -> -EINTR is done */
  3003. goto done;
  3004. }
  3005. /* read it all, or we did blocking attempt. no retry. */
  3006. if (!iov_iter_count(iter) || !force_nonblock ||
  3007. (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
  3008. goto done;
  3009. io_size -= ret;
  3010. copy_iov:
  3011. ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
  3012. if (ret2) {
  3013. ret = ret2;
  3014. goto out_free;
  3015. }
  3016. if (no_async)
  3017. return -EAGAIN;
  3018. rw = req->async_data;
  3019. /* it's copied and will be cleaned with ->io */
  3020. iovec = NULL;
  3021. /* now use our persistent iterator, if we aren't already */
  3022. iter = &rw->iter;
  3023. retry:
  3024. rw->bytes_done += ret;
  3025. /* if we can retry, do so with the callbacks armed */
  3026. if (!io_rw_should_retry(req)) {
  3027. kiocb->ki_flags &= ~IOCB_WAITQ;
  3028. return -EAGAIN;
  3029. }
  3030. /*
  3031. * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
  3032. * get -EIOCBQUEUED, then we'll get a notification when the desired
  3033. * page gets unlocked. We can also get a partial read here, and if we
  3034. * do, then just retry at the new offset.
  3035. */
  3036. ret = io_iter_do_read(req, iter);
  3037. if (ret == -EIOCBQUEUED) {
  3038. ret = 0;
  3039. goto out_free;
  3040. } else if (ret > 0 && ret < io_size) {
  3041. /* we got some bytes, but not all. retry. */
  3042. kiocb->ki_flags &= ~IOCB_WAITQ;
  3043. goto retry;
  3044. }
  3045. done:
  3046. kiocb_done(kiocb, ret, cs);
  3047. ret = 0;
  3048. out_free:
  3049. /* it's reportedly faster than delegating the null check to kfree() */
  3050. if (iovec)
  3051. kfree(iovec);
  3052. return ret;
  3053. }
  3054. static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3055. {
  3056. ssize_t ret;
  3057. ret = io_prep_rw(req, sqe);
  3058. if (ret)
  3059. return ret;
  3060. if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
  3061. return -EBADF;
  3062. /* either don't need iovec imported or already have it */
  3063. if (!req->async_data)
  3064. return 0;
  3065. return io_rw_prep_async(req, WRITE);
  3066. }
  3067. static int io_write(struct io_kiocb *req, bool force_nonblock,
  3068. struct io_comp_state *cs)
  3069. {
  3070. struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
  3071. struct kiocb *kiocb = &req->rw.kiocb;
  3072. struct iov_iter __iter, *iter = &__iter;
  3073. struct io_async_rw *rw = req->async_data;
  3074. ssize_t ret, ret2, io_size;
  3075. if (rw)
  3076. iter = &rw->iter;
  3077. ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
  3078. if (ret < 0)
  3079. return ret;
  3080. io_size = iov_iter_count(iter);
  3081. req->result = io_size;
  3082. /* Ensure we clear previously set non-block flag */
  3083. if (!force_nonblock)
  3084. kiocb->ki_flags &= ~IOCB_NOWAIT;
  3085. else
  3086. kiocb->ki_flags |= IOCB_NOWAIT;
  3087. /* If the file doesn't support async, just async punt */
  3088. if (force_nonblock && !io_file_supports_async(req->file, WRITE))
  3089. goto copy_iov;
  3090. /* file path doesn't support NOWAIT for non-direct_IO */
  3091. if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
  3092. (req->flags & REQ_F_ISREG))
  3093. goto copy_iov;
  3094. ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
  3095. if (unlikely(ret))
  3096. goto out_free;
  3097. /*
  3098. * Open-code file_start_write here to grab freeze protection,
  3099. * which will be released by another thread in
  3100. * io_complete_rw(). Fool lockdep by telling it the lock got
  3101. * released so that it doesn't complain about the held lock when
  3102. * we return to userspace.
  3103. */
  3104. if (req->flags & REQ_F_ISREG) {
  3105. sb_start_write(file_inode(req->file)->i_sb);
  3106. __sb_writers_release(file_inode(req->file)->i_sb,
  3107. SB_FREEZE_WRITE);
  3108. }
  3109. kiocb->ki_flags |= IOCB_WRITE;
  3110. if (req->file->f_op->write_iter)
  3111. ret2 = call_write_iter(req->file, kiocb, iter);
  3112. else if (req->file->f_op->write)
  3113. ret2 = loop_rw_iter(WRITE, req, iter);
  3114. else
  3115. ret2 = -EINVAL;
  3116. /*
  3117. * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
  3118. * retry them without IOCB_NOWAIT.
  3119. */
  3120. if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
  3121. ret2 = -EAGAIN;
  3122. /* no retry on NONBLOCK marked file */
  3123. if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
  3124. goto done;
  3125. if (!force_nonblock || ret2 != -EAGAIN) {
  3126. /* IOPOLL retry should happen for io-wq threads */
  3127. if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
  3128. goto copy_iov;
  3129. done:
  3130. kiocb_done(kiocb, ret2, cs);
  3131. } else {
  3132. copy_iov:
  3133. /* some cases will consume bytes even on error returns */
  3134. iov_iter_revert(iter, io_size - iov_iter_count(iter));
  3135. ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
  3136. if (!ret)
  3137. return -EAGAIN;
  3138. }
  3139. out_free:
  3140. /* it's reportedly faster than delegating the null check to kfree() */
  3141. if (iovec)
  3142. kfree(iovec);
  3143. return ret;
  3144. }
  3145. static int __io_splice_prep(struct io_kiocb *req,
  3146. const struct io_uring_sqe *sqe)
  3147. {
  3148. struct io_splice* sp = &req->splice;
  3149. unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
  3150. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  3151. return -EINVAL;
  3152. sp->file_in = NULL;
  3153. sp->len = READ_ONCE(sqe->len);
  3154. sp->flags = READ_ONCE(sqe->splice_flags);
  3155. if (unlikely(sp->flags & ~valid_flags))
  3156. return -EINVAL;
  3157. sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
  3158. (sp->flags & SPLICE_F_FD_IN_FIXED));
  3159. if (!sp->file_in)
  3160. return -EBADF;
  3161. req->flags |= REQ_F_NEED_CLEANUP;
  3162. if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
  3163. /*
  3164. * Splice operation will be punted aync, and here need to
  3165. * modify io_wq_work.flags, so initialize io_wq_work firstly.
  3166. */
  3167. io_req_init_async(req);
  3168. req->work.flags |= IO_WQ_WORK_UNBOUND;
  3169. }
  3170. return 0;
  3171. }
  3172. static int io_tee_prep(struct io_kiocb *req,
  3173. const struct io_uring_sqe *sqe)
  3174. {
  3175. if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
  3176. return -EINVAL;
  3177. return __io_splice_prep(req, sqe);
  3178. }
  3179. static int io_tee(struct io_kiocb *req, bool force_nonblock)
  3180. {
  3181. struct io_splice *sp = &req->splice;
  3182. struct file *in = sp->file_in;
  3183. struct file *out = sp->file_out;
  3184. unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
  3185. long ret = 0;
  3186. if (force_nonblock)
  3187. return -EAGAIN;
  3188. if (sp->len)
  3189. ret = do_tee(in, out, sp->len, flags);
  3190. io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
  3191. req->flags &= ~REQ_F_NEED_CLEANUP;
  3192. if (ret != sp->len)
  3193. req_set_fail_links(req);
  3194. io_req_complete(req, ret);
  3195. return 0;
  3196. }
  3197. static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3198. {
  3199. struct io_splice* sp = &req->splice;
  3200. sp->off_in = READ_ONCE(sqe->splice_off_in);
  3201. sp->off_out = READ_ONCE(sqe->off);
  3202. return __io_splice_prep(req, sqe);
  3203. }
  3204. static int io_splice(struct io_kiocb *req, bool force_nonblock)
  3205. {
  3206. struct io_splice *sp = &req->splice;
  3207. struct file *in = sp->file_in;
  3208. struct file *out = sp->file_out;
  3209. unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
  3210. loff_t *poff_in, *poff_out;
  3211. long ret = 0;
  3212. if (force_nonblock)
  3213. return -EAGAIN;
  3214. poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
  3215. poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
  3216. if (sp->len)
  3217. ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
  3218. io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
  3219. req->flags &= ~REQ_F_NEED_CLEANUP;
  3220. if (ret != sp->len)
  3221. req_set_fail_links(req);
  3222. io_req_complete(req, ret);
  3223. return 0;
  3224. }
  3225. /*
  3226. * IORING_OP_NOP just posts a completion event, nothing else.
  3227. */
  3228. static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
  3229. {
  3230. struct io_ring_ctx *ctx = req->ctx;
  3231. if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
  3232. return -EINVAL;
  3233. __io_req_complete(req, 0, 0, cs);
  3234. return 0;
  3235. }
  3236. static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3237. {
  3238. struct io_ring_ctx *ctx = req->ctx;
  3239. if (!req->file)
  3240. return -EBADF;
  3241. if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
  3242. return -EINVAL;
  3243. if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
  3244. sqe->splice_fd_in))
  3245. return -EINVAL;
  3246. req->sync.flags = READ_ONCE(sqe->fsync_flags);
  3247. if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
  3248. return -EINVAL;
  3249. req->sync.off = READ_ONCE(sqe->off);
  3250. req->sync.len = READ_ONCE(sqe->len);
  3251. return 0;
  3252. }
  3253. static int io_fsync(struct io_kiocb *req, bool force_nonblock)
  3254. {
  3255. loff_t end = req->sync.off + req->sync.len;
  3256. int ret;
  3257. /* fsync always requires a blocking context */
  3258. if (force_nonblock)
  3259. return -EAGAIN;
  3260. ret = vfs_fsync_range(req->file, req->sync.off,
  3261. end > 0 ? end : LLONG_MAX,
  3262. req->sync.flags & IORING_FSYNC_DATASYNC);
  3263. if (ret < 0)
  3264. req_set_fail_links(req);
  3265. io_req_complete(req, ret);
  3266. return 0;
  3267. }
  3268. static int io_fallocate_prep(struct io_kiocb *req,
  3269. const struct io_uring_sqe *sqe)
  3270. {
  3271. if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
  3272. sqe->splice_fd_in)
  3273. return -EINVAL;
  3274. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  3275. return -EINVAL;
  3276. req->sync.off = READ_ONCE(sqe->off);
  3277. req->sync.len = READ_ONCE(sqe->addr);
  3278. req->sync.mode = READ_ONCE(sqe->len);
  3279. return 0;
  3280. }
  3281. static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
  3282. {
  3283. int ret;
  3284. /* fallocate always requiring blocking context */
  3285. if (force_nonblock)
  3286. return -EAGAIN;
  3287. ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
  3288. req->sync.len);
  3289. if (ret < 0)
  3290. req_set_fail_links(req);
  3291. io_req_complete(req, ret);
  3292. return 0;
  3293. }
  3294. static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3295. {
  3296. const char __user *fname;
  3297. int ret;
  3298. if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in))
  3299. return -EINVAL;
  3300. if (unlikely(req->flags & REQ_F_FIXED_FILE))
  3301. return -EBADF;
  3302. /* open.how should be already initialised */
  3303. if (!(req->open.how.flags & O_PATH) && force_o_largefile())
  3304. req->open.how.flags |= O_LARGEFILE;
  3305. req->open.dfd = READ_ONCE(sqe->fd);
  3306. fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
  3307. req->open.filename = getname(fname);
  3308. if (IS_ERR(req->open.filename)) {
  3309. ret = PTR_ERR(req->open.filename);
  3310. req->open.filename = NULL;
  3311. return ret;
  3312. }
  3313. req->open.nofile = rlimit(RLIMIT_NOFILE);
  3314. req->open.ignore_nonblock = false;
  3315. req->flags |= REQ_F_NEED_CLEANUP;
  3316. return 0;
  3317. }
  3318. static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3319. {
  3320. u64 flags, mode;
  3321. if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
  3322. return -EINVAL;
  3323. mode = READ_ONCE(sqe->len);
  3324. flags = READ_ONCE(sqe->open_flags);
  3325. req->open.how = build_open_how(flags, mode);
  3326. return __io_openat_prep(req, sqe);
  3327. }
  3328. static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3329. {
  3330. struct open_how __user *how;
  3331. size_t len;
  3332. int ret;
  3333. if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
  3334. return -EINVAL;
  3335. how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
  3336. len = READ_ONCE(sqe->len);
  3337. if (len < OPEN_HOW_SIZE_VER0)
  3338. return -EINVAL;
  3339. ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
  3340. len);
  3341. if (ret)
  3342. return ret;
  3343. return __io_openat_prep(req, sqe);
  3344. }
  3345. static int io_openat2(struct io_kiocb *req, bool force_nonblock)
  3346. {
  3347. struct open_flags op;
  3348. struct file *file;
  3349. int ret;
  3350. if (force_nonblock && !req->open.ignore_nonblock)
  3351. return -EAGAIN;
  3352. ret = build_open_flags(&req->open.how, &op);
  3353. if (ret)
  3354. goto err;
  3355. ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
  3356. if (ret < 0)
  3357. goto err;
  3358. file = do_filp_open(req->open.dfd, req->open.filename, &op);
  3359. if (IS_ERR(file)) {
  3360. put_unused_fd(ret);
  3361. ret = PTR_ERR(file);
  3362. /*
  3363. * A work-around to ensure that /proc/self works that way
  3364. * that it should - if we get -EOPNOTSUPP back, then assume
  3365. * that proc_self_get_link() failed us because we're in async
  3366. * context. We should be safe to retry this from the task
  3367. * itself with force_nonblock == false set, as it should not
  3368. * block on lookup. Would be nice to know this upfront and
  3369. * avoid the async dance, but doesn't seem feasible.
  3370. */
  3371. if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
  3372. req->open.ignore_nonblock = true;
  3373. refcount_inc(&req->refs);
  3374. io_req_task_queue(req);
  3375. return 0;
  3376. }
  3377. } else {
  3378. fsnotify_open(file);
  3379. fd_install(ret, file);
  3380. }
  3381. err:
  3382. putname(req->open.filename);
  3383. req->flags &= ~REQ_F_NEED_CLEANUP;
  3384. if (ret < 0)
  3385. req_set_fail_links(req);
  3386. io_req_complete(req, ret);
  3387. return 0;
  3388. }
  3389. static int io_openat(struct io_kiocb *req, bool force_nonblock)
  3390. {
  3391. return io_openat2(req, force_nonblock);
  3392. }
  3393. static int io_remove_buffers_prep(struct io_kiocb *req,
  3394. const struct io_uring_sqe *sqe)
  3395. {
  3396. struct io_provide_buf *p = &req->pbuf;
  3397. u64 tmp;
  3398. if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
  3399. sqe->splice_fd_in)
  3400. return -EINVAL;
  3401. tmp = READ_ONCE(sqe->fd);
  3402. if (!tmp || tmp > USHRT_MAX)
  3403. return -EINVAL;
  3404. memset(p, 0, sizeof(*p));
  3405. p->nbufs = tmp;
  3406. p->bgid = READ_ONCE(sqe->buf_group);
  3407. return 0;
  3408. }
  3409. static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
  3410. int bgid, unsigned nbufs)
  3411. {
  3412. unsigned i = 0;
  3413. /* shouldn't happen */
  3414. if (!nbufs)
  3415. return 0;
  3416. /* the head kbuf is the list itself */
  3417. while (!list_empty(&buf->list)) {
  3418. struct io_buffer *nxt;
  3419. nxt = list_first_entry(&buf->list, struct io_buffer, list);
  3420. list_del(&nxt->list);
  3421. kfree(nxt);
  3422. if (++i == nbufs)
  3423. return i;
  3424. }
  3425. i++;
  3426. kfree(buf);
  3427. xa_erase(&ctx->io_buffers, bgid);
  3428. return i;
  3429. }
  3430. static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
  3431. struct io_comp_state *cs)
  3432. {
  3433. struct io_provide_buf *p = &req->pbuf;
  3434. struct io_ring_ctx *ctx = req->ctx;
  3435. struct io_buffer *head;
  3436. int ret = 0;
  3437. io_ring_submit_lock(ctx, !force_nonblock);
  3438. lockdep_assert_held(&ctx->uring_lock);
  3439. ret = -ENOENT;
  3440. head = xa_load(&ctx->io_buffers, p->bgid);
  3441. if (head)
  3442. ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
  3443. if (ret < 0)
  3444. req_set_fail_links(req);
  3445. /* need to hold the lock to complete IOPOLL requests */
  3446. if (ctx->flags & IORING_SETUP_IOPOLL) {
  3447. __io_req_complete(req, ret, 0, cs);
  3448. io_ring_submit_unlock(ctx, !force_nonblock);
  3449. } else {
  3450. io_ring_submit_unlock(ctx, !force_nonblock);
  3451. __io_req_complete(req, ret, 0, cs);
  3452. }
  3453. return 0;
  3454. }
  3455. static int io_provide_buffers_prep(struct io_kiocb *req,
  3456. const struct io_uring_sqe *sqe)
  3457. {
  3458. unsigned long size, tmp_check;
  3459. struct io_provide_buf *p = &req->pbuf;
  3460. u64 tmp;
  3461. if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
  3462. return -EINVAL;
  3463. tmp = READ_ONCE(sqe->fd);
  3464. if (!tmp || tmp > USHRT_MAX)
  3465. return -E2BIG;
  3466. p->nbufs = tmp;
  3467. p->addr = READ_ONCE(sqe->addr);
  3468. p->len = READ_ONCE(sqe->len);
  3469. if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
  3470. &size))
  3471. return -EOVERFLOW;
  3472. if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
  3473. return -EOVERFLOW;
  3474. size = (unsigned long)p->len * p->nbufs;
  3475. if (!access_ok(u64_to_user_ptr(p->addr), size))
  3476. return -EFAULT;
  3477. p->bgid = READ_ONCE(sqe->buf_group);
  3478. tmp = READ_ONCE(sqe->off);
  3479. if (tmp > USHRT_MAX)
  3480. return -E2BIG;
  3481. p->bid = tmp;
  3482. return 0;
  3483. }
  3484. static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
  3485. {
  3486. struct io_buffer *buf;
  3487. u64 addr = pbuf->addr;
  3488. int i, bid = pbuf->bid;
  3489. for (i = 0; i < pbuf->nbufs; i++) {
  3490. buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
  3491. if (!buf)
  3492. break;
  3493. buf->addr = addr;
  3494. buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
  3495. buf->bid = bid;
  3496. addr += pbuf->len;
  3497. bid++;
  3498. if (!*head) {
  3499. INIT_LIST_HEAD(&buf->list);
  3500. *head = buf;
  3501. } else {
  3502. list_add_tail(&buf->list, &(*head)->list);
  3503. }
  3504. cond_resched();
  3505. }
  3506. return i ? i : -ENOMEM;
  3507. }
  3508. static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
  3509. struct io_comp_state *cs)
  3510. {
  3511. struct io_provide_buf *p = &req->pbuf;
  3512. struct io_ring_ctx *ctx = req->ctx;
  3513. struct io_buffer *head, *list;
  3514. int ret = 0;
  3515. io_ring_submit_lock(ctx, !force_nonblock);
  3516. lockdep_assert_held(&ctx->uring_lock);
  3517. list = head = xa_load(&ctx->io_buffers, p->bgid);
  3518. ret = io_add_buffers(p, &head);
  3519. if (ret >= 0 && !list) {
  3520. ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
  3521. if (ret < 0)
  3522. __io_remove_buffers(ctx, head, p->bgid, -1U);
  3523. }
  3524. if (ret < 0)
  3525. req_set_fail_links(req);
  3526. /* need to hold the lock to complete IOPOLL requests */
  3527. if (ctx->flags & IORING_SETUP_IOPOLL) {
  3528. __io_req_complete(req, ret, 0, cs);
  3529. io_ring_submit_unlock(ctx, !force_nonblock);
  3530. } else {
  3531. io_ring_submit_unlock(ctx, !force_nonblock);
  3532. __io_req_complete(req, ret, 0, cs);
  3533. }
  3534. return 0;
  3535. }
  3536. static int io_epoll_ctl_prep(struct io_kiocb *req,
  3537. const struct io_uring_sqe *sqe)
  3538. {
  3539. #if defined(CONFIG_EPOLL)
  3540. if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
  3541. return -EINVAL;
  3542. if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
  3543. return -EINVAL;
  3544. req->epoll.epfd = READ_ONCE(sqe->fd);
  3545. req->epoll.op = READ_ONCE(sqe->len);
  3546. req->epoll.fd = READ_ONCE(sqe->off);
  3547. if (ep_op_has_event(req->epoll.op)) {
  3548. struct epoll_event __user *ev;
  3549. ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
  3550. if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
  3551. return -EFAULT;
  3552. }
  3553. return 0;
  3554. #else
  3555. return -EOPNOTSUPP;
  3556. #endif
  3557. }
  3558. static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
  3559. struct io_comp_state *cs)
  3560. {
  3561. #if defined(CONFIG_EPOLL)
  3562. struct io_epoll *ie = &req->epoll;
  3563. int ret;
  3564. ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
  3565. if (force_nonblock && ret == -EAGAIN)
  3566. return -EAGAIN;
  3567. if (ret < 0)
  3568. req_set_fail_links(req);
  3569. __io_req_complete(req, ret, 0, cs);
  3570. return 0;
  3571. #else
  3572. return -EOPNOTSUPP;
  3573. #endif
  3574. }
  3575. static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3576. {
  3577. #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
  3578. if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
  3579. return -EINVAL;
  3580. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  3581. return -EINVAL;
  3582. req->madvise.addr = READ_ONCE(sqe->addr);
  3583. req->madvise.len = READ_ONCE(sqe->len);
  3584. req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
  3585. return 0;
  3586. #else
  3587. return -EOPNOTSUPP;
  3588. #endif
  3589. }
  3590. static int io_madvise(struct io_kiocb *req, bool force_nonblock)
  3591. {
  3592. #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
  3593. struct io_madvise *ma = &req->madvise;
  3594. int ret;
  3595. if (force_nonblock)
  3596. return -EAGAIN;
  3597. ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
  3598. if (ret < 0)
  3599. req_set_fail_links(req);
  3600. io_req_complete(req, ret);
  3601. return 0;
  3602. #else
  3603. return -EOPNOTSUPP;
  3604. #endif
  3605. }
  3606. static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3607. {
  3608. if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
  3609. return -EINVAL;
  3610. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  3611. return -EINVAL;
  3612. req->fadvise.offset = READ_ONCE(sqe->off);
  3613. req->fadvise.len = READ_ONCE(sqe->len);
  3614. req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
  3615. return 0;
  3616. }
  3617. static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
  3618. {
  3619. struct io_fadvise *fa = &req->fadvise;
  3620. int ret;
  3621. if (force_nonblock) {
  3622. switch (fa->advice) {
  3623. case POSIX_FADV_NORMAL:
  3624. case POSIX_FADV_RANDOM:
  3625. case POSIX_FADV_SEQUENTIAL:
  3626. break;
  3627. default:
  3628. return -EAGAIN;
  3629. }
  3630. }
  3631. ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
  3632. if (ret < 0)
  3633. req_set_fail_links(req);
  3634. io_req_complete(req, ret);
  3635. return 0;
  3636. }
  3637. static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3638. {
  3639. if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
  3640. return -EINVAL;
  3641. if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
  3642. return -EINVAL;
  3643. if (req->flags & REQ_F_FIXED_FILE)
  3644. return -EBADF;
  3645. req->statx.dfd = READ_ONCE(sqe->fd);
  3646. req->statx.mask = READ_ONCE(sqe->len);
  3647. req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
  3648. req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
  3649. req->statx.flags = READ_ONCE(sqe->statx_flags);
  3650. return 0;
  3651. }
  3652. static int io_statx(struct io_kiocb *req, bool force_nonblock)
  3653. {
  3654. struct io_statx *ctx = &req->statx;
  3655. int ret;
  3656. if (force_nonblock) {
  3657. /* only need file table for an actual valid fd */
  3658. if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
  3659. req->flags |= REQ_F_NO_FILE_TABLE;
  3660. return -EAGAIN;
  3661. }
  3662. ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
  3663. ctx->buffer);
  3664. if (ret < 0)
  3665. req_set_fail_links(req);
  3666. io_req_complete(req, ret);
  3667. return 0;
  3668. }
  3669. static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3670. {
  3671. /*
  3672. * If we queue this for async, it must not be cancellable. That would
  3673. * leave the 'file' in an undeterminate state, and here need to modify
  3674. * io_wq_work.flags, so initialize io_wq_work firstly.
  3675. */
  3676. io_req_init_async(req);
  3677. if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
  3678. return -EINVAL;
  3679. if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
  3680. sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
  3681. return -EINVAL;
  3682. if (req->flags & REQ_F_FIXED_FILE)
  3683. return -EBADF;
  3684. req->close.fd = READ_ONCE(sqe->fd);
  3685. if ((req->file && req->file->f_op == &io_uring_fops))
  3686. return -EBADF;
  3687. req->close.put_file = NULL;
  3688. return 0;
  3689. }
  3690. static int io_close(struct io_kiocb *req, bool force_nonblock,
  3691. struct io_comp_state *cs)
  3692. {
  3693. struct io_close *close = &req->close;
  3694. int ret;
  3695. /* might be already done during nonblock submission */
  3696. if (!close->put_file) {
  3697. ret = __close_fd_get_file(close->fd, &close->put_file);
  3698. if (ret < 0)
  3699. return (ret == -ENOENT) ? -EBADF : ret;
  3700. }
  3701. /* if the file has a flush method, be safe and punt to async */
  3702. if (close->put_file->f_op->flush && force_nonblock) {
  3703. /* not safe to cancel at this point */
  3704. req->work.flags |= IO_WQ_WORK_NO_CANCEL;
  3705. /* was never set, but play safe */
  3706. req->flags &= ~REQ_F_NOWAIT;
  3707. /* avoid grabbing files - we don't need the files */
  3708. req->flags |= REQ_F_NO_FILE_TABLE;
  3709. return -EAGAIN;
  3710. }
  3711. /* No ->flush() or already async, safely close from here */
  3712. ret = filp_close(close->put_file, req->work.identity->files);
  3713. if (ret < 0)
  3714. req_set_fail_links(req);
  3715. fput(close->put_file);
  3716. close->put_file = NULL;
  3717. __io_req_complete(req, ret, 0, cs);
  3718. return 0;
  3719. }
  3720. static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3721. {
  3722. struct io_ring_ctx *ctx = req->ctx;
  3723. if (!req->file)
  3724. return -EBADF;
  3725. if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
  3726. return -EINVAL;
  3727. if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
  3728. sqe->splice_fd_in))
  3729. return -EINVAL;
  3730. req->sync.off = READ_ONCE(sqe->off);
  3731. req->sync.len = READ_ONCE(sqe->len);
  3732. req->sync.flags = READ_ONCE(sqe->sync_range_flags);
  3733. return 0;
  3734. }
  3735. static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
  3736. {
  3737. int ret;
  3738. /* sync_file_range always requires a blocking context */
  3739. if (force_nonblock)
  3740. return -EAGAIN;
  3741. ret = sync_file_range(req->file, req->sync.off, req->sync.len,
  3742. req->sync.flags);
  3743. if (ret < 0)
  3744. req_set_fail_links(req);
  3745. io_req_complete(req, ret);
  3746. return 0;
  3747. }
  3748. #if defined(CONFIG_NET)
  3749. static int io_setup_async_msg(struct io_kiocb *req,
  3750. struct io_async_msghdr *kmsg)
  3751. {
  3752. struct io_async_msghdr *async_msg = req->async_data;
  3753. if (async_msg)
  3754. return -EAGAIN;
  3755. if (io_alloc_async_data(req)) {
  3756. if (kmsg->iov != kmsg->fast_iov)
  3757. kfree(kmsg->iov);
  3758. return -ENOMEM;
  3759. }
  3760. async_msg = req->async_data;
  3761. req->flags |= REQ_F_NEED_CLEANUP;
  3762. memcpy(async_msg, kmsg, sizeof(*kmsg));
  3763. return -EAGAIN;
  3764. }
  3765. static int io_sendmsg_copy_hdr(struct io_kiocb *req,
  3766. struct io_async_msghdr *iomsg)
  3767. {
  3768. iomsg->iov = iomsg->fast_iov;
  3769. iomsg->msg.msg_name = &iomsg->addr;
  3770. return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
  3771. req->sr_msg.msg_flags, &iomsg->iov);
  3772. }
  3773. static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  3774. {
  3775. struct io_async_msghdr *async_msg = req->async_data;
  3776. struct io_sr_msg *sr = &req->sr_msg;
  3777. int ret;
  3778. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  3779. return -EINVAL;
  3780. sr->msg_flags = READ_ONCE(sqe->msg_flags);
  3781. sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
  3782. sr->len = READ_ONCE(sqe->len);
  3783. #ifdef CONFIG_COMPAT
  3784. if (req->ctx->compat)
  3785. sr->msg_flags |= MSG_CMSG_COMPAT;
  3786. #endif
  3787. if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
  3788. return 0;
  3789. ret = io_sendmsg_copy_hdr(req, async_msg);
  3790. if (!ret)
  3791. req->flags |= REQ_F_NEED_CLEANUP;
  3792. return ret;
  3793. }
  3794. static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
  3795. struct io_comp_state *cs)
  3796. {
  3797. struct io_async_msghdr iomsg, *kmsg;
  3798. struct socket *sock;
  3799. unsigned flags;
  3800. int min_ret = 0;
  3801. int ret;
  3802. sock = sock_from_file(req->file, &ret);
  3803. if (unlikely(!sock))
  3804. return ret;
  3805. if (req->async_data) {
  3806. kmsg = req->async_data;
  3807. kmsg->msg.msg_name = &kmsg->addr;
  3808. /* if iov is set, it's allocated already */
  3809. if (!kmsg->iov)
  3810. kmsg->iov = kmsg->fast_iov;
  3811. kmsg->msg.msg_iter.iov = kmsg->iov;
  3812. } else {
  3813. ret = io_sendmsg_copy_hdr(req, &iomsg);
  3814. if (ret)
  3815. return ret;
  3816. kmsg = &iomsg;
  3817. }
  3818. flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
  3819. if (flags & MSG_DONTWAIT)
  3820. req->flags |= REQ_F_NOWAIT;
  3821. else if (force_nonblock)
  3822. flags |= MSG_DONTWAIT;
  3823. if (flags & MSG_WAITALL)
  3824. min_ret = iov_iter_count(&kmsg->msg.msg_iter);
  3825. ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
  3826. if (force_nonblock && ret == -EAGAIN)
  3827. return io_setup_async_msg(req, kmsg);
  3828. if (ret == -ERESTARTSYS)
  3829. ret = -EINTR;
  3830. if (kmsg->iov != kmsg->fast_iov)
  3831. kfree(kmsg->iov);
  3832. req->flags &= ~REQ_F_NEED_CLEANUP;
  3833. if (ret < min_ret)
  3834. req_set_fail_links(req);
  3835. __io_req_complete(req, ret, 0, cs);
  3836. return 0;
  3837. }
  3838. static int io_send(struct io_kiocb *req, bool force_nonblock,
  3839. struct io_comp_state *cs)
  3840. {
  3841. struct io_sr_msg *sr = &req->sr_msg;
  3842. struct msghdr msg;
  3843. struct iovec iov;
  3844. struct socket *sock;
  3845. unsigned flags;
  3846. int min_ret = 0;
  3847. int ret;
  3848. sock = sock_from_file(req->file, &ret);
  3849. if (unlikely(!sock))
  3850. return ret;
  3851. ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
  3852. if (unlikely(ret))
  3853. return ret;
  3854. msg.msg_name = NULL;
  3855. msg.msg_control = NULL;
  3856. msg.msg_controllen = 0;
  3857. msg.msg_namelen = 0;
  3858. flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
  3859. if (flags & MSG_DONTWAIT)
  3860. req->flags |= REQ_F_NOWAIT;
  3861. else if (force_nonblock)
  3862. flags |= MSG_DONTWAIT;
  3863. if (flags & MSG_WAITALL)
  3864. min_ret = iov_iter_count(&msg.msg_iter);
  3865. msg.msg_flags = flags;
  3866. ret = sock_sendmsg(sock, &msg);
  3867. if (force_nonblock && ret == -EAGAIN)
  3868. return -EAGAIN;
  3869. if (ret == -ERESTARTSYS)
  3870. ret = -EINTR;
  3871. if (ret < min_ret)
  3872. req_set_fail_links(req);
  3873. __io_req_complete(req, ret, 0, cs);
  3874. return 0;
  3875. }
  3876. static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
  3877. struct io_async_msghdr *iomsg)
  3878. {
  3879. struct io_sr_msg *sr = &req->sr_msg;
  3880. struct iovec __user *uiov;
  3881. size_t iov_len;
  3882. int ret;
  3883. ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
  3884. &iomsg->uaddr, &uiov, &iov_len);
  3885. if (ret)
  3886. return ret;
  3887. if (req->flags & REQ_F_BUFFER_SELECT) {
  3888. if (iov_len > 1)
  3889. return -EINVAL;
  3890. if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
  3891. return -EFAULT;
  3892. sr->len = iomsg->iov[0].iov_len;
  3893. iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
  3894. sr->len);
  3895. iomsg->iov = NULL;
  3896. } else {
  3897. ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
  3898. &iomsg->iov, &iomsg->msg.msg_iter,
  3899. false);
  3900. if (ret > 0)
  3901. ret = 0;
  3902. }
  3903. return ret;
  3904. }
  3905. #ifdef CONFIG_COMPAT
  3906. static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
  3907. struct io_async_msghdr *iomsg)
  3908. {
  3909. struct compat_msghdr __user *msg_compat;
  3910. struct io_sr_msg *sr = &req->sr_msg;
  3911. struct compat_iovec __user *uiov;
  3912. compat_uptr_t ptr;
  3913. compat_size_t len;
  3914. int ret;
  3915. msg_compat = (struct compat_msghdr __user *) sr->umsg;
  3916. ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
  3917. &ptr, &len);
  3918. if (ret)
  3919. return ret;
  3920. uiov = compat_ptr(ptr);
  3921. if (req->flags & REQ_F_BUFFER_SELECT) {
  3922. compat_ssize_t clen;
  3923. if (len > 1)
  3924. return -EINVAL;
  3925. if (!access_ok(uiov, sizeof(*uiov)))
  3926. return -EFAULT;
  3927. if (__get_user(clen, &uiov->iov_len))
  3928. return -EFAULT;
  3929. if (clen < 0)
  3930. return -EINVAL;
  3931. sr->len = clen;
  3932. iomsg->iov[0].iov_len = clen;
  3933. iomsg->iov = NULL;
  3934. } else {
  3935. ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
  3936. UIO_FASTIOV, &iomsg->iov,
  3937. &iomsg->msg.msg_iter, true);
  3938. if (ret < 0)
  3939. return ret;
  3940. }
  3941. return 0;
  3942. }
  3943. #endif
  3944. static int io_recvmsg_copy_hdr(struct io_kiocb *req,
  3945. struct io_async_msghdr *iomsg)
  3946. {
  3947. iomsg->msg.msg_name = &iomsg->addr;
  3948. iomsg->iov = iomsg->fast_iov;
  3949. #ifdef CONFIG_COMPAT
  3950. if (req->ctx->compat)
  3951. return __io_compat_recvmsg_copy_hdr(req, iomsg);
  3952. #endif
  3953. return __io_recvmsg_copy_hdr(req, iomsg);
  3954. }
  3955. static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
  3956. bool needs_lock)
  3957. {
  3958. struct io_sr_msg *sr = &req->sr_msg;
  3959. struct io_buffer *kbuf;
  3960. kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
  3961. if (IS_ERR(kbuf))
  3962. return kbuf;
  3963. sr->kbuf = kbuf;
  3964. req->flags |= REQ_F_BUFFER_SELECTED;
  3965. return kbuf;
  3966. }
  3967. static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
  3968. {
  3969. return io_put_kbuf(req, req->sr_msg.kbuf);
  3970. }
  3971. static int io_recvmsg_prep(struct io_kiocb *req,
  3972. const struct io_uring_sqe *sqe)
  3973. {
  3974. struct io_async_msghdr *async_msg = req->async_data;
  3975. struct io_sr_msg *sr = &req->sr_msg;
  3976. int ret;
  3977. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  3978. return -EINVAL;
  3979. sr->msg_flags = READ_ONCE(sqe->msg_flags);
  3980. sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
  3981. sr->len = READ_ONCE(sqe->len);
  3982. sr->bgid = READ_ONCE(sqe->buf_group);
  3983. #ifdef CONFIG_COMPAT
  3984. if (req->ctx->compat)
  3985. sr->msg_flags |= MSG_CMSG_COMPAT;
  3986. #endif
  3987. if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
  3988. return 0;
  3989. ret = io_recvmsg_copy_hdr(req, async_msg);
  3990. if (!ret)
  3991. req->flags |= REQ_F_NEED_CLEANUP;
  3992. return ret;
  3993. }
  3994. static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
  3995. struct io_comp_state *cs)
  3996. {
  3997. struct io_async_msghdr iomsg, *kmsg;
  3998. struct socket *sock;
  3999. struct io_buffer *kbuf;
  4000. unsigned flags;
  4001. int min_ret = 0;
  4002. int ret, cflags = 0;
  4003. sock = sock_from_file(req->file, &ret);
  4004. if (unlikely(!sock))
  4005. return ret;
  4006. if (req->async_data) {
  4007. kmsg = req->async_data;
  4008. kmsg->msg.msg_name = &kmsg->addr;
  4009. /* if iov is set, it's allocated already */
  4010. if (!kmsg->iov)
  4011. kmsg->iov = kmsg->fast_iov;
  4012. kmsg->msg.msg_iter.iov = kmsg->iov;
  4013. } else {
  4014. ret = io_recvmsg_copy_hdr(req, &iomsg);
  4015. if (ret)
  4016. return ret;
  4017. kmsg = &iomsg;
  4018. }
  4019. if (req->flags & REQ_F_BUFFER_SELECT) {
  4020. kbuf = io_recv_buffer_select(req, !force_nonblock);
  4021. if (IS_ERR(kbuf))
  4022. return PTR_ERR(kbuf);
  4023. kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
  4024. iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
  4025. 1, req->sr_msg.len);
  4026. }
  4027. flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
  4028. if (flags & MSG_DONTWAIT)
  4029. req->flags |= REQ_F_NOWAIT;
  4030. else if (force_nonblock)
  4031. flags |= MSG_DONTWAIT;
  4032. if (flags & MSG_WAITALL)
  4033. min_ret = iov_iter_count(&kmsg->msg.msg_iter);
  4034. ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
  4035. kmsg->uaddr, flags);
  4036. if (force_nonblock && ret == -EAGAIN)
  4037. return io_setup_async_msg(req, kmsg);
  4038. if (ret == -ERESTARTSYS)
  4039. ret = -EINTR;
  4040. if (req->flags & REQ_F_BUFFER_SELECTED)
  4041. cflags = io_put_recv_kbuf(req);
  4042. if (kmsg->iov != kmsg->fast_iov)
  4043. kfree(kmsg->iov);
  4044. req->flags &= ~REQ_F_NEED_CLEANUP;
  4045. if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
  4046. req_set_fail_links(req);
  4047. __io_req_complete(req, ret, cflags, cs);
  4048. return 0;
  4049. }
  4050. static int io_recv(struct io_kiocb *req, bool force_nonblock,
  4051. struct io_comp_state *cs)
  4052. {
  4053. struct io_buffer *kbuf;
  4054. struct io_sr_msg *sr = &req->sr_msg;
  4055. struct msghdr msg;
  4056. void __user *buf = sr->buf;
  4057. struct socket *sock;
  4058. struct iovec iov;
  4059. unsigned flags;
  4060. int min_ret = 0;
  4061. int ret, cflags = 0;
  4062. sock = sock_from_file(req->file, &ret);
  4063. if (unlikely(!sock))
  4064. return ret;
  4065. if (req->flags & REQ_F_BUFFER_SELECT) {
  4066. kbuf = io_recv_buffer_select(req, !force_nonblock);
  4067. if (IS_ERR(kbuf))
  4068. return PTR_ERR(kbuf);
  4069. buf = u64_to_user_ptr(kbuf->addr);
  4070. }
  4071. ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
  4072. if (unlikely(ret))
  4073. goto out_free;
  4074. msg.msg_name = NULL;
  4075. msg.msg_control = NULL;
  4076. msg.msg_controllen = 0;
  4077. msg.msg_namelen = 0;
  4078. msg.msg_iocb = NULL;
  4079. msg.msg_flags = 0;
  4080. flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
  4081. if (flags & MSG_DONTWAIT)
  4082. req->flags |= REQ_F_NOWAIT;
  4083. else if (force_nonblock)
  4084. flags |= MSG_DONTWAIT;
  4085. if (flags & MSG_WAITALL)
  4086. min_ret = iov_iter_count(&msg.msg_iter);
  4087. ret = sock_recvmsg(sock, &msg, flags);
  4088. if (force_nonblock && ret == -EAGAIN)
  4089. return -EAGAIN;
  4090. if (ret == -ERESTARTSYS)
  4091. ret = -EINTR;
  4092. out_free:
  4093. if (req->flags & REQ_F_BUFFER_SELECTED)
  4094. cflags = io_put_recv_kbuf(req);
  4095. if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
  4096. req_set_fail_links(req);
  4097. __io_req_complete(req, ret, cflags, cs);
  4098. return 0;
  4099. }
  4100. static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  4101. {
  4102. struct io_accept *accept = &req->accept;
  4103. if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
  4104. return -EINVAL;
  4105. if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in)
  4106. return -EINVAL;
  4107. accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
  4108. accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
  4109. accept->flags = READ_ONCE(sqe->accept_flags);
  4110. accept->nofile = rlimit(RLIMIT_NOFILE);
  4111. return 0;
  4112. }
  4113. static int io_accept(struct io_kiocb *req, bool force_nonblock,
  4114. struct io_comp_state *cs)
  4115. {
  4116. struct io_accept *accept = &req->accept;
  4117. unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
  4118. int ret;
  4119. if (req->file->f_flags & O_NONBLOCK)
  4120. req->flags |= REQ_F_NOWAIT;
  4121. ret = __sys_accept4_file(req->file, file_flags, accept->addr,
  4122. accept->addr_len, accept->flags,
  4123. accept->nofile);
  4124. if (ret == -EAGAIN && force_nonblock)
  4125. return -EAGAIN;
  4126. if (ret < 0) {
  4127. if (ret == -ERESTARTSYS)
  4128. ret = -EINTR;
  4129. req_set_fail_links(req);
  4130. }
  4131. __io_req_complete(req, ret, 0, cs);
  4132. return 0;
  4133. }
  4134. static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  4135. {
  4136. struct io_connect *conn = &req->connect;
  4137. struct io_async_connect *io = req->async_data;
  4138. if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
  4139. return -EINVAL;
  4140. if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
  4141. sqe->splice_fd_in)
  4142. return -EINVAL;
  4143. conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
  4144. conn->addr_len = READ_ONCE(sqe->addr2);
  4145. if (!io)
  4146. return 0;
  4147. return move_addr_to_kernel(conn->addr, conn->addr_len,
  4148. &io->address);
  4149. }
  4150. static int io_connect(struct io_kiocb *req, bool force_nonblock,
  4151. struct io_comp_state *cs)
  4152. {
  4153. struct io_async_connect __io, *io;
  4154. unsigned file_flags;
  4155. int ret;
  4156. if (req->async_data) {
  4157. io = req->async_data;
  4158. } else {
  4159. ret = move_addr_to_kernel(req->connect.addr,
  4160. req->connect.addr_len,
  4161. &__io.address);
  4162. if (ret)
  4163. goto out;
  4164. io = &__io;
  4165. }
  4166. file_flags = force_nonblock ? O_NONBLOCK : 0;
  4167. ret = __sys_connect_file(req->file, &io->address,
  4168. req->connect.addr_len, file_flags);
  4169. if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
  4170. if (req->async_data)
  4171. return -EAGAIN;
  4172. if (io_alloc_async_data(req)) {
  4173. ret = -ENOMEM;
  4174. goto out;
  4175. }
  4176. io = req->async_data;
  4177. memcpy(req->async_data, &__io, sizeof(__io));
  4178. return -EAGAIN;
  4179. }
  4180. if (ret == -ERESTARTSYS)
  4181. ret = -EINTR;
  4182. out:
  4183. if (ret < 0)
  4184. req_set_fail_links(req);
  4185. __io_req_complete(req, ret, 0, cs);
  4186. return 0;
  4187. }
  4188. #else /* !CONFIG_NET */
  4189. static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  4190. {
  4191. return -EOPNOTSUPP;
  4192. }
  4193. static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
  4194. struct io_comp_state *cs)
  4195. {
  4196. return -EOPNOTSUPP;
  4197. }
  4198. static int io_send(struct io_kiocb *req, bool force_nonblock,
  4199. struct io_comp_state *cs)
  4200. {
  4201. return -EOPNOTSUPP;
  4202. }
  4203. static int io_recvmsg_prep(struct io_kiocb *req,
  4204. const struct io_uring_sqe *sqe)
  4205. {
  4206. return -EOPNOTSUPP;
  4207. }
  4208. static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
  4209. struct io_comp_state *cs)
  4210. {
  4211. return -EOPNOTSUPP;
  4212. }
  4213. static int io_recv(struct io_kiocb *req, bool force_nonblock,
  4214. struct io_comp_state *cs)
  4215. {
  4216. return -EOPNOTSUPP;
  4217. }
  4218. static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  4219. {
  4220. return -EOPNOTSUPP;
  4221. }
  4222. static int io_accept(struct io_kiocb *req, bool force_nonblock,
  4223. struct io_comp_state *cs)
  4224. {
  4225. return -EOPNOTSUPP;
  4226. }
  4227. static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  4228. {
  4229. return -EOPNOTSUPP;
  4230. }
  4231. static int io_connect(struct io_kiocb *req, bool force_nonblock,
  4232. struct io_comp_state *cs)
  4233. {
  4234. return -EOPNOTSUPP;
  4235. }
  4236. #endif /* CONFIG_NET */
  4237. struct io_poll_table {
  4238. struct poll_table_struct pt;
  4239. struct io_kiocb *req;
  4240. int nr_entries;
  4241. int error;
  4242. };
  4243. static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
  4244. __poll_t mask, task_work_func_t func)
  4245. {
  4246. bool twa_signal_ok;
  4247. int ret;
  4248. /* for instances that support it check for an event match first: */
  4249. if (mask && !(mask & poll->events))
  4250. return 0;
  4251. trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
  4252. list_del_init(&poll->wait.entry);
  4253. req->result = mask;
  4254. init_task_work(&req->task_work, func);
  4255. percpu_ref_get(&req->ctx->refs);
  4256. /*
  4257. * If we using the signalfd wait_queue_head for this wakeup, then
  4258. * it's not safe to use TWA_SIGNAL as we could be recursing on the
  4259. * tsk->sighand->siglock on doing the wakeup. Should not be needed
  4260. * either, as the normal wakeup will suffice.
  4261. */
  4262. twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
  4263. /*
  4264. * If this fails, then the task is exiting. When a task exits, the
  4265. * work gets canceled, so just cancel this request as well instead
  4266. * of executing it. We can't safely execute it anyway, as we may not
  4267. * have the needed state needed for it anyway.
  4268. */
  4269. ret = io_req_task_work_add(req, twa_signal_ok);
  4270. if (unlikely(ret)) {
  4271. struct task_struct *tsk;
  4272. WRITE_ONCE(poll->canceled, true);
  4273. tsk = io_wq_get_task(req->ctx->io_wq);
  4274. task_work_add(tsk, &req->task_work, TWA_NONE);
  4275. wake_up_process(tsk);
  4276. }
  4277. return 1;
  4278. }
  4279. static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
  4280. __acquires(&req->ctx->completion_lock)
  4281. {
  4282. struct io_ring_ctx *ctx = req->ctx;
  4283. if (!req->result && !READ_ONCE(poll->canceled)) {
  4284. struct poll_table_struct pt = { ._key = poll->events };
  4285. req->result = vfs_poll(req->file, &pt) & poll->events;
  4286. }
  4287. spin_lock_irq(&ctx->completion_lock);
  4288. if (!req->result && !READ_ONCE(poll->canceled)) {
  4289. add_wait_queue(poll->head, &poll->wait);
  4290. return true;
  4291. }
  4292. return false;
  4293. }
  4294. static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
  4295. {
  4296. /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
  4297. if (req->opcode == IORING_OP_POLL_ADD)
  4298. return req->async_data;
  4299. return req->apoll->double_poll;
  4300. }
  4301. static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
  4302. {
  4303. if (req->opcode == IORING_OP_POLL_ADD)
  4304. return &req->poll;
  4305. return &req->apoll->poll;
  4306. }
  4307. static void io_poll_remove_double(struct io_kiocb *req)
  4308. {
  4309. struct io_poll_iocb *poll = io_poll_get_double(req);
  4310. lockdep_assert_held(&req->ctx->completion_lock);
  4311. if (poll && poll->head) {
  4312. struct wait_queue_head *head = poll->head;
  4313. spin_lock(&head->lock);
  4314. list_del_init(&poll->wait.entry);
  4315. if (poll->wait.private)
  4316. refcount_dec(&req->refs);
  4317. poll->head = NULL;
  4318. spin_unlock(&head->lock);
  4319. }
  4320. }
  4321. static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
  4322. {
  4323. struct io_ring_ctx *ctx = req->ctx;
  4324. io_poll_remove_double(req);
  4325. req->poll.done = true;
  4326. io_cqring_fill_event(req, error ? error : mangle_poll(mask));
  4327. io_commit_cqring(ctx);
  4328. }
  4329. static void io_poll_task_func(struct callback_head *cb)
  4330. {
  4331. struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
  4332. struct io_ring_ctx *ctx = req->ctx;
  4333. struct io_kiocb *nxt;
  4334. if (io_poll_rewait(req, &req->poll)) {
  4335. spin_unlock_irq(&ctx->completion_lock);
  4336. } else {
  4337. hash_del(&req->hash_node);
  4338. io_poll_complete(req, req->result, 0);
  4339. spin_unlock_irq(&ctx->completion_lock);
  4340. nxt = io_put_req_find_next(req);
  4341. io_cqring_ev_posted(ctx);
  4342. if (nxt)
  4343. __io_req_task_submit(nxt);
  4344. }
  4345. percpu_ref_put(&ctx->refs);
  4346. }
  4347. static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
  4348. int sync, void *key)
  4349. {
  4350. struct io_kiocb *req = wait->private;
  4351. struct io_poll_iocb *poll = io_poll_get_single(req);
  4352. __poll_t mask = key_to_poll(key);
  4353. /* for instances that support it check for an event match first: */
  4354. if (mask && !(mask & poll->events))
  4355. return 0;
  4356. list_del_init(&wait->entry);
  4357. if (poll && poll->head) {
  4358. bool done;
  4359. spin_lock(&poll->head->lock);
  4360. done = list_empty(&poll->wait.entry);
  4361. if (!done)
  4362. list_del_init(&poll->wait.entry);
  4363. /* make sure double remove sees this as being gone */
  4364. wait->private = NULL;
  4365. spin_unlock(&poll->head->lock);
  4366. if (!done) {
  4367. /* use wait func handler, so it matches the rq type */
  4368. poll->wait.func(&poll->wait, mode, sync, key);
  4369. }
  4370. }
  4371. refcount_dec(&req->refs);
  4372. return 1;
  4373. }
  4374. static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
  4375. wait_queue_func_t wake_func)
  4376. {
  4377. poll->head = NULL;
  4378. poll->done = false;
  4379. poll->canceled = false;
  4380. poll->events = events;
  4381. INIT_LIST_HEAD(&poll->wait.entry);
  4382. init_waitqueue_func_entry(&poll->wait, wake_func);
  4383. }
  4384. static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
  4385. struct wait_queue_head *head,
  4386. struct io_poll_iocb **poll_ptr)
  4387. {
  4388. struct io_kiocb *req = pt->req;
  4389. /*
  4390. * The file being polled uses multiple waitqueues for poll handling
  4391. * (e.g. one for read, one for write). Setup a separate io_poll_iocb
  4392. * if this happens.
  4393. */
  4394. if (unlikely(pt->nr_entries)) {
  4395. struct io_poll_iocb *poll_one = poll;
  4396. /* already have a 2nd entry, fail a third attempt */
  4397. if (*poll_ptr) {
  4398. pt->error = -EINVAL;
  4399. return;
  4400. }
  4401. /* double add on the same waitqueue head, ignore */
  4402. if (poll->head == head)
  4403. return;
  4404. poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
  4405. if (!poll) {
  4406. pt->error = -ENOMEM;
  4407. return;
  4408. }
  4409. io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
  4410. refcount_inc(&req->refs);
  4411. poll->wait.private = req;
  4412. *poll_ptr = poll;
  4413. }
  4414. pt->nr_entries++;
  4415. poll->head = head;
  4416. if (poll->events & EPOLLEXCLUSIVE)
  4417. add_wait_queue_exclusive(head, &poll->wait);
  4418. else
  4419. add_wait_queue(head, &poll->wait);
  4420. }
  4421. static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
  4422. struct poll_table_struct *p)
  4423. {
  4424. struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
  4425. struct async_poll *apoll = pt->req->apoll;
  4426. __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
  4427. }
  4428. static void io_async_task_func(struct callback_head *cb)
  4429. {
  4430. struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
  4431. struct async_poll *apoll = req->apoll;
  4432. struct io_ring_ctx *ctx = req->ctx;
  4433. trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
  4434. if (io_poll_rewait(req, &apoll->poll)) {
  4435. spin_unlock_irq(&ctx->completion_lock);
  4436. percpu_ref_put(&ctx->refs);
  4437. return;
  4438. }
  4439. /* If req is still hashed, it cannot have been canceled. Don't check. */
  4440. if (hash_hashed(&req->hash_node))
  4441. hash_del(&req->hash_node);
  4442. io_poll_remove_double(req);
  4443. spin_unlock_irq(&ctx->completion_lock);
  4444. if (!READ_ONCE(apoll->poll.canceled))
  4445. __io_req_task_submit(req);
  4446. else
  4447. __io_req_task_cancel(req, -ECANCELED);
  4448. percpu_ref_put(&ctx->refs);
  4449. kfree(apoll->double_poll);
  4450. kfree(apoll);
  4451. }
  4452. static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
  4453. void *key)
  4454. {
  4455. struct io_kiocb *req = wait->private;
  4456. struct io_poll_iocb *poll = &req->apoll->poll;
  4457. trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
  4458. key_to_poll(key));
  4459. return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
  4460. }
  4461. static void io_poll_req_insert(struct io_kiocb *req)
  4462. {
  4463. struct io_ring_ctx *ctx = req->ctx;
  4464. struct hlist_head *list;
  4465. list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
  4466. hlist_add_head(&req->hash_node, list);
  4467. }
  4468. static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
  4469. struct io_poll_iocb *poll,
  4470. struct io_poll_table *ipt, __poll_t mask,
  4471. wait_queue_func_t wake_func)
  4472. __acquires(&ctx->completion_lock)
  4473. {
  4474. struct io_ring_ctx *ctx = req->ctx;
  4475. bool cancel = false;
  4476. INIT_HLIST_NODE(&req->hash_node);
  4477. io_init_poll_iocb(poll, mask, wake_func);
  4478. poll->file = req->file;
  4479. poll->wait.private = req;
  4480. ipt->pt._key = mask;
  4481. ipt->req = req;
  4482. ipt->error = 0;
  4483. ipt->nr_entries = 0;
  4484. mask = vfs_poll(req->file, &ipt->pt) & poll->events;
  4485. if (unlikely(!ipt->nr_entries) && !ipt->error)
  4486. ipt->error = -EINVAL;
  4487. spin_lock_irq(&ctx->completion_lock);
  4488. if (ipt->error)
  4489. io_poll_remove_double(req);
  4490. if (likely(poll->head)) {
  4491. spin_lock(&poll->head->lock);
  4492. if (unlikely(list_empty(&poll->wait.entry))) {
  4493. if (ipt->error)
  4494. cancel = true;
  4495. ipt->error = 0;
  4496. mask = 0;
  4497. }
  4498. if (mask || ipt->error)
  4499. list_del_init(&poll->wait.entry);
  4500. else if (cancel)
  4501. WRITE_ONCE(poll->canceled, true);
  4502. else if (!poll->done) /* actually waiting for an event */
  4503. io_poll_req_insert(req);
  4504. spin_unlock(&poll->head->lock);
  4505. }
  4506. return mask;
  4507. }
  4508. static bool io_arm_poll_handler(struct io_kiocb *req)
  4509. {
  4510. const struct io_op_def *def = &io_op_defs[req->opcode];
  4511. struct io_ring_ctx *ctx = req->ctx;
  4512. struct async_poll *apoll;
  4513. struct io_poll_table ipt;
  4514. __poll_t mask, ret;
  4515. int rw;
  4516. if (!req->file || !file_can_poll(req->file))
  4517. return false;
  4518. if (req->flags & REQ_F_POLLED)
  4519. return false;
  4520. if (def->pollin)
  4521. rw = READ;
  4522. else if (def->pollout)
  4523. rw = WRITE;
  4524. else
  4525. return false;
  4526. /* if we can't nonblock try, then no point in arming a poll handler */
  4527. if (!io_file_supports_async(req->file, rw))
  4528. return false;
  4529. apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
  4530. if (unlikely(!apoll))
  4531. return false;
  4532. apoll->double_poll = NULL;
  4533. req->flags |= REQ_F_POLLED;
  4534. req->apoll = apoll;
  4535. mask = 0;
  4536. if (def->pollin)
  4537. mask |= POLLIN | POLLRDNORM;
  4538. if (def->pollout)
  4539. mask |= POLLOUT | POLLWRNORM;
  4540. /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
  4541. if ((req->opcode == IORING_OP_RECVMSG) &&
  4542. (req->sr_msg.msg_flags & MSG_ERRQUEUE))
  4543. mask &= ~POLLIN;
  4544. mask |= POLLERR | POLLPRI;
  4545. ipt.pt._qproc = io_async_queue_proc;
  4546. ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
  4547. io_async_wake);
  4548. if (ret || ipt.error) {
  4549. io_poll_remove_double(req);
  4550. spin_unlock_irq(&ctx->completion_lock);
  4551. kfree(apoll->double_poll);
  4552. kfree(apoll);
  4553. return false;
  4554. }
  4555. spin_unlock_irq(&ctx->completion_lock);
  4556. trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
  4557. apoll->poll.events);
  4558. return true;
  4559. }
  4560. static bool __io_poll_remove_one(struct io_kiocb *req,
  4561. struct io_poll_iocb *poll)
  4562. {
  4563. bool do_complete = false;
  4564. spin_lock(&poll->head->lock);
  4565. WRITE_ONCE(poll->canceled, true);
  4566. if (!list_empty(&poll->wait.entry)) {
  4567. list_del_init(&poll->wait.entry);
  4568. do_complete = true;
  4569. }
  4570. spin_unlock(&poll->head->lock);
  4571. hash_del(&req->hash_node);
  4572. return do_complete;
  4573. }
  4574. static bool io_poll_remove_one(struct io_kiocb *req)
  4575. {
  4576. bool do_complete;
  4577. io_poll_remove_double(req);
  4578. if (req->opcode == IORING_OP_POLL_ADD) {
  4579. do_complete = __io_poll_remove_one(req, &req->poll);
  4580. } else {
  4581. struct async_poll *apoll = req->apoll;
  4582. /* non-poll requests have submit ref still */
  4583. do_complete = __io_poll_remove_one(req, &apoll->poll);
  4584. if (do_complete) {
  4585. io_put_req(req);
  4586. kfree(apoll->double_poll);
  4587. kfree(apoll);
  4588. }
  4589. }
  4590. if (do_complete) {
  4591. io_cqring_fill_event(req, -ECANCELED);
  4592. io_commit_cqring(req->ctx);
  4593. req_set_fail_links(req);
  4594. io_put_req_deferred(req, 1);
  4595. }
  4596. return do_complete;
  4597. }
  4598. /*
  4599. * Returns true if we found and killed one or more poll requests
  4600. */
  4601. static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
  4602. struct files_struct *files)
  4603. {
  4604. struct hlist_node *tmp;
  4605. struct io_kiocb *req;
  4606. int posted = 0, i;
  4607. spin_lock_irq(&ctx->completion_lock);
  4608. for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
  4609. struct hlist_head *list;
  4610. list = &ctx->cancel_hash[i];
  4611. hlist_for_each_entry_safe(req, tmp, list, hash_node) {
  4612. if (io_match_task(req, tsk, files))
  4613. posted += io_poll_remove_one(req);
  4614. }
  4615. }
  4616. spin_unlock_irq(&ctx->completion_lock);
  4617. if (posted)
  4618. io_cqring_ev_posted(ctx);
  4619. return posted != 0;
  4620. }
  4621. static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
  4622. {
  4623. struct hlist_head *list;
  4624. struct io_kiocb *req;
  4625. list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
  4626. hlist_for_each_entry(req, list, hash_node) {
  4627. if (sqe_addr != req->user_data)
  4628. continue;
  4629. if (io_poll_remove_one(req))
  4630. return 0;
  4631. return -EALREADY;
  4632. }
  4633. return -ENOENT;
  4634. }
  4635. static int io_poll_remove_prep(struct io_kiocb *req,
  4636. const struct io_uring_sqe *sqe)
  4637. {
  4638. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  4639. return -EINVAL;
  4640. if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
  4641. sqe->poll_events)
  4642. return -EINVAL;
  4643. req->poll.addr = READ_ONCE(sqe->addr);
  4644. return 0;
  4645. }
  4646. /*
  4647. * Find a running poll command that matches one specified in sqe->addr,
  4648. * and remove it if found.
  4649. */
  4650. static int io_poll_remove(struct io_kiocb *req)
  4651. {
  4652. struct io_ring_ctx *ctx = req->ctx;
  4653. u64 addr;
  4654. int ret;
  4655. addr = req->poll.addr;
  4656. spin_lock_irq(&ctx->completion_lock);
  4657. ret = io_poll_cancel(ctx, addr);
  4658. spin_unlock_irq(&ctx->completion_lock);
  4659. if (ret < 0)
  4660. req_set_fail_links(req);
  4661. io_req_complete(req, ret);
  4662. return 0;
  4663. }
  4664. static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
  4665. void *key)
  4666. {
  4667. struct io_kiocb *req = wait->private;
  4668. struct io_poll_iocb *poll = &req->poll;
  4669. return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
  4670. }
  4671. static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
  4672. struct poll_table_struct *p)
  4673. {
  4674. struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
  4675. __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
  4676. }
  4677. static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  4678. {
  4679. struct io_poll_iocb *poll = &req->poll;
  4680. u32 events;
  4681. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  4682. return -EINVAL;
  4683. if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
  4684. return -EINVAL;
  4685. events = READ_ONCE(sqe->poll32_events);
  4686. #ifdef __BIG_ENDIAN
  4687. events = swahw32(events);
  4688. #endif
  4689. poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
  4690. (events & EPOLLEXCLUSIVE);
  4691. return 0;
  4692. }
  4693. static int io_poll_add(struct io_kiocb *req)
  4694. {
  4695. struct io_poll_iocb *poll = &req->poll;
  4696. struct io_ring_ctx *ctx = req->ctx;
  4697. struct io_poll_table ipt;
  4698. __poll_t mask;
  4699. ipt.pt._qproc = io_poll_queue_proc;
  4700. mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
  4701. io_poll_wake);
  4702. if (mask) { /* no async, we'd stolen it */
  4703. ipt.error = 0;
  4704. io_poll_complete(req, mask, 0);
  4705. }
  4706. spin_unlock_irq(&ctx->completion_lock);
  4707. if (mask) {
  4708. io_cqring_ev_posted(ctx);
  4709. io_put_req(req);
  4710. }
  4711. return ipt.error;
  4712. }
  4713. static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
  4714. {
  4715. struct io_timeout_data *data = container_of(timer,
  4716. struct io_timeout_data, timer);
  4717. struct io_kiocb *req = data->req;
  4718. struct io_ring_ctx *ctx = req->ctx;
  4719. unsigned long flags;
  4720. spin_lock_irqsave(&ctx->completion_lock, flags);
  4721. list_del_init(&req->timeout.list);
  4722. atomic_set(&req->ctx->cq_timeouts,
  4723. atomic_read(&req->ctx->cq_timeouts) + 1);
  4724. io_cqring_fill_event(req, -ETIME);
  4725. io_commit_cqring(ctx);
  4726. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  4727. io_cqring_ev_posted(ctx);
  4728. req_set_fail_links(req);
  4729. io_put_req(req);
  4730. return HRTIMER_NORESTART;
  4731. }
  4732. static int __io_timeout_cancel(struct io_kiocb *req)
  4733. {
  4734. struct io_timeout_data *io = req->async_data;
  4735. int ret;
  4736. ret = hrtimer_try_to_cancel(&io->timer);
  4737. if (ret == -1)
  4738. return -EALREADY;
  4739. list_del_init(&req->timeout.list);
  4740. req_set_fail_links(req);
  4741. io_cqring_fill_event(req, -ECANCELED);
  4742. io_put_req_deferred(req, 1);
  4743. return 0;
  4744. }
  4745. static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
  4746. {
  4747. struct io_kiocb *req;
  4748. int ret = -ENOENT;
  4749. list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
  4750. if (user_data == req->user_data) {
  4751. ret = 0;
  4752. break;
  4753. }
  4754. }
  4755. if (ret == -ENOENT)
  4756. return ret;
  4757. return __io_timeout_cancel(req);
  4758. }
  4759. static int io_timeout_remove_prep(struct io_kiocb *req,
  4760. const struct io_uring_sqe *sqe)
  4761. {
  4762. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  4763. return -EINVAL;
  4764. if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
  4765. return -EINVAL;
  4766. if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags ||
  4767. sqe->splice_fd_in)
  4768. return -EINVAL;
  4769. req->timeout_rem.addr = READ_ONCE(sqe->addr);
  4770. return 0;
  4771. }
  4772. /*
  4773. * Remove or update an existing timeout command
  4774. */
  4775. static int io_timeout_remove(struct io_kiocb *req)
  4776. {
  4777. struct io_ring_ctx *ctx = req->ctx;
  4778. int ret;
  4779. spin_lock_irq(&ctx->completion_lock);
  4780. ret = io_timeout_cancel(ctx, req->timeout_rem.addr);
  4781. io_cqring_fill_event(req, ret);
  4782. io_commit_cqring(ctx);
  4783. spin_unlock_irq(&ctx->completion_lock);
  4784. io_cqring_ev_posted(ctx);
  4785. if (ret < 0)
  4786. req_set_fail_links(req);
  4787. io_put_req(req);
  4788. return 0;
  4789. }
  4790. static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
  4791. bool is_timeout_link)
  4792. {
  4793. struct io_timeout_data *data;
  4794. unsigned flags;
  4795. u32 off = READ_ONCE(sqe->off);
  4796. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  4797. return -EINVAL;
  4798. if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
  4799. sqe->splice_fd_in)
  4800. return -EINVAL;
  4801. if (off && is_timeout_link)
  4802. return -EINVAL;
  4803. flags = READ_ONCE(sqe->timeout_flags);
  4804. if (flags & ~IORING_TIMEOUT_ABS)
  4805. return -EINVAL;
  4806. req->timeout.off = off;
  4807. if (!req->async_data && io_alloc_async_data(req))
  4808. return -ENOMEM;
  4809. data = req->async_data;
  4810. data->req = req;
  4811. if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
  4812. return -EFAULT;
  4813. if (flags & IORING_TIMEOUT_ABS)
  4814. data->mode = HRTIMER_MODE_ABS;
  4815. else
  4816. data->mode = HRTIMER_MODE_REL;
  4817. INIT_LIST_HEAD(&req->timeout.list);
  4818. hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
  4819. return 0;
  4820. }
  4821. static int io_timeout(struct io_kiocb *req)
  4822. {
  4823. struct io_ring_ctx *ctx = req->ctx;
  4824. struct io_timeout_data *data = req->async_data;
  4825. struct list_head *entry;
  4826. u32 tail, off = req->timeout.off;
  4827. spin_lock_irq(&ctx->completion_lock);
  4828. /*
  4829. * sqe->off holds how many events that need to occur for this
  4830. * timeout event to be satisfied. If it isn't set, then this is
  4831. * a pure timeout request, sequence isn't used.
  4832. */
  4833. if (io_is_timeout_noseq(req)) {
  4834. entry = ctx->timeout_list.prev;
  4835. goto add;
  4836. }
  4837. tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
  4838. req->timeout.target_seq = tail + off;
  4839. /* Update the last seq here in case io_flush_timeouts() hasn't.
  4840. * This is safe because ->completion_lock is held, and submissions
  4841. * and completions are never mixed in the same ->completion_lock section.
  4842. */
  4843. ctx->cq_last_tm_flush = tail;
  4844. /*
  4845. * Insertion sort, ensuring the first entry in the list is always
  4846. * the one we need first.
  4847. */
  4848. list_for_each_prev(entry, &ctx->timeout_list) {
  4849. struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
  4850. timeout.list);
  4851. if (io_is_timeout_noseq(nxt))
  4852. continue;
  4853. /* nxt.seq is behind @tail, otherwise would've been completed */
  4854. if (off >= nxt->timeout.target_seq - tail)
  4855. break;
  4856. }
  4857. add:
  4858. list_add(&req->timeout.list, entry);
  4859. data->timer.function = io_timeout_fn;
  4860. hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
  4861. spin_unlock_irq(&ctx->completion_lock);
  4862. return 0;
  4863. }
  4864. static bool io_cancel_cb(struct io_wq_work *work, void *data)
  4865. {
  4866. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  4867. return req->user_data == (unsigned long) data;
  4868. }
  4869. static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
  4870. {
  4871. enum io_wq_cancel cancel_ret;
  4872. int ret = 0;
  4873. cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
  4874. switch (cancel_ret) {
  4875. case IO_WQ_CANCEL_OK:
  4876. ret = 0;
  4877. break;
  4878. case IO_WQ_CANCEL_RUNNING:
  4879. ret = -EALREADY;
  4880. break;
  4881. case IO_WQ_CANCEL_NOTFOUND:
  4882. ret = -ENOENT;
  4883. break;
  4884. }
  4885. return ret;
  4886. }
  4887. static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
  4888. struct io_kiocb *req, __u64 sqe_addr,
  4889. int success_ret)
  4890. {
  4891. unsigned long flags;
  4892. int ret;
  4893. ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
  4894. if (ret != -ENOENT) {
  4895. spin_lock_irqsave(&ctx->completion_lock, flags);
  4896. goto done;
  4897. }
  4898. spin_lock_irqsave(&ctx->completion_lock, flags);
  4899. ret = io_timeout_cancel(ctx, sqe_addr);
  4900. if (ret != -ENOENT)
  4901. goto done;
  4902. ret = io_poll_cancel(ctx, sqe_addr);
  4903. done:
  4904. if (!ret)
  4905. ret = success_ret;
  4906. io_cqring_fill_event(req, ret);
  4907. io_commit_cqring(ctx);
  4908. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  4909. io_cqring_ev_posted(ctx);
  4910. if (ret < 0)
  4911. req_set_fail_links(req);
  4912. io_put_req(req);
  4913. }
  4914. static int io_async_cancel_prep(struct io_kiocb *req,
  4915. const struct io_uring_sqe *sqe)
  4916. {
  4917. if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
  4918. return -EINVAL;
  4919. if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
  4920. return -EINVAL;
  4921. if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
  4922. sqe->splice_fd_in)
  4923. return -EINVAL;
  4924. req->cancel.addr = READ_ONCE(sqe->addr);
  4925. return 0;
  4926. }
  4927. static int io_async_cancel(struct io_kiocb *req)
  4928. {
  4929. struct io_ring_ctx *ctx = req->ctx;
  4930. io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
  4931. return 0;
  4932. }
  4933. static int io_files_update_prep(struct io_kiocb *req,
  4934. const struct io_uring_sqe *sqe)
  4935. {
  4936. if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
  4937. return -EINVAL;
  4938. if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
  4939. return -EINVAL;
  4940. if (sqe->ioprio || sqe->rw_flags)
  4941. return -EINVAL;
  4942. req->files_update.offset = READ_ONCE(sqe->off);
  4943. req->files_update.nr_args = READ_ONCE(sqe->len);
  4944. if (!req->files_update.nr_args)
  4945. return -EINVAL;
  4946. req->files_update.arg = READ_ONCE(sqe->addr);
  4947. return 0;
  4948. }
  4949. static int io_files_update(struct io_kiocb *req, bool force_nonblock,
  4950. struct io_comp_state *cs)
  4951. {
  4952. struct io_ring_ctx *ctx = req->ctx;
  4953. struct io_uring_files_update up;
  4954. int ret;
  4955. if (force_nonblock)
  4956. return -EAGAIN;
  4957. up.offset = req->files_update.offset;
  4958. up.fds = req->files_update.arg;
  4959. mutex_lock(&ctx->uring_lock);
  4960. ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
  4961. mutex_unlock(&ctx->uring_lock);
  4962. if (ret < 0)
  4963. req_set_fail_links(req);
  4964. __io_req_complete(req, ret, 0, cs);
  4965. return 0;
  4966. }
  4967. static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  4968. {
  4969. switch (req->opcode) {
  4970. case IORING_OP_NOP:
  4971. return 0;
  4972. case IORING_OP_READV:
  4973. case IORING_OP_READ_FIXED:
  4974. case IORING_OP_READ:
  4975. return io_read_prep(req, sqe);
  4976. case IORING_OP_WRITEV:
  4977. case IORING_OP_WRITE_FIXED:
  4978. case IORING_OP_WRITE:
  4979. return io_write_prep(req, sqe);
  4980. case IORING_OP_POLL_ADD:
  4981. return io_poll_add_prep(req, sqe);
  4982. case IORING_OP_POLL_REMOVE:
  4983. return io_poll_remove_prep(req, sqe);
  4984. case IORING_OP_FSYNC:
  4985. return io_prep_fsync(req, sqe);
  4986. case IORING_OP_SYNC_FILE_RANGE:
  4987. return io_prep_sfr(req, sqe);
  4988. case IORING_OP_SENDMSG:
  4989. case IORING_OP_SEND:
  4990. return io_sendmsg_prep(req, sqe);
  4991. case IORING_OP_RECVMSG:
  4992. case IORING_OP_RECV:
  4993. return io_recvmsg_prep(req, sqe);
  4994. case IORING_OP_CONNECT:
  4995. return io_connect_prep(req, sqe);
  4996. case IORING_OP_TIMEOUT:
  4997. return io_timeout_prep(req, sqe, false);
  4998. case IORING_OP_TIMEOUT_REMOVE:
  4999. return io_timeout_remove_prep(req, sqe);
  5000. case IORING_OP_ASYNC_CANCEL:
  5001. return io_async_cancel_prep(req, sqe);
  5002. case IORING_OP_LINK_TIMEOUT:
  5003. return io_timeout_prep(req, sqe, true);
  5004. case IORING_OP_ACCEPT:
  5005. return io_accept_prep(req, sqe);
  5006. case IORING_OP_FALLOCATE:
  5007. return io_fallocate_prep(req, sqe);
  5008. case IORING_OP_OPENAT:
  5009. return io_openat_prep(req, sqe);
  5010. case IORING_OP_CLOSE:
  5011. return io_close_prep(req, sqe);
  5012. case IORING_OP_FILES_UPDATE:
  5013. return io_files_update_prep(req, sqe);
  5014. case IORING_OP_STATX:
  5015. return io_statx_prep(req, sqe);
  5016. case IORING_OP_FADVISE:
  5017. return io_fadvise_prep(req, sqe);
  5018. case IORING_OP_MADVISE:
  5019. return io_madvise_prep(req, sqe);
  5020. case IORING_OP_OPENAT2:
  5021. return io_openat2_prep(req, sqe);
  5022. case IORING_OP_EPOLL_CTL:
  5023. return io_epoll_ctl_prep(req, sqe);
  5024. case IORING_OP_SPLICE:
  5025. return io_splice_prep(req, sqe);
  5026. case IORING_OP_PROVIDE_BUFFERS:
  5027. return io_provide_buffers_prep(req, sqe);
  5028. case IORING_OP_REMOVE_BUFFERS:
  5029. return io_remove_buffers_prep(req, sqe);
  5030. case IORING_OP_TEE:
  5031. return io_tee_prep(req, sqe);
  5032. }
  5033. printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
  5034. req->opcode);
  5035. return-EINVAL;
  5036. }
  5037. static int io_req_defer_prep(struct io_kiocb *req,
  5038. const struct io_uring_sqe *sqe)
  5039. {
  5040. if (!sqe)
  5041. return 0;
  5042. if (io_alloc_async_data(req))
  5043. return -EAGAIN;
  5044. return io_req_prep(req, sqe);
  5045. }
  5046. static u32 io_get_sequence(struct io_kiocb *req)
  5047. {
  5048. struct io_kiocb *pos;
  5049. struct io_ring_ctx *ctx = req->ctx;
  5050. u32 total_submitted, nr_reqs = 1;
  5051. if (req->flags & REQ_F_LINK_HEAD)
  5052. list_for_each_entry(pos, &req->link_list, link_list)
  5053. nr_reqs++;
  5054. total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
  5055. return total_submitted - nr_reqs;
  5056. }
  5057. static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  5058. {
  5059. struct io_ring_ctx *ctx = req->ctx;
  5060. struct io_defer_entry *de;
  5061. int ret;
  5062. u32 seq;
  5063. /* Still need defer if there is pending req in defer list. */
  5064. if (likely(list_empty_careful(&ctx->defer_list) &&
  5065. !(req->flags & REQ_F_IO_DRAIN)))
  5066. return 0;
  5067. seq = io_get_sequence(req);
  5068. /* Still a chance to pass the sequence check */
  5069. if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
  5070. return 0;
  5071. if (!req->async_data) {
  5072. ret = io_req_defer_prep(req, sqe);
  5073. if (ret)
  5074. return ret;
  5075. }
  5076. io_prep_async_link(req);
  5077. de = kmalloc(sizeof(*de), GFP_KERNEL);
  5078. if (!de)
  5079. return -ENOMEM;
  5080. spin_lock_irq(&ctx->completion_lock);
  5081. if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
  5082. spin_unlock_irq(&ctx->completion_lock);
  5083. kfree(de);
  5084. io_queue_async_work(req);
  5085. return -EIOCBQUEUED;
  5086. }
  5087. trace_io_uring_defer(ctx, req, req->user_data);
  5088. de->req = req;
  5089. de->seq = seq;
  5090. list_add_tail(&de->list, &ctx->defer_list);
  5091. spin_unlock_irq(&ctx->completion_lock);
  5092. return -EIOCBQUEUED;
  5093. }
  5094. static void io_req_drop_files(struct io_kiocb *req)
  5095. {
  5096. struct io_ring_ctx *ctx = req->ctx;
  5097. struct io_uring_task *tctx = req->task->io_uring;
  5098. unsigned long flags;
  5099. if (req->work.flags & IO_WQ_WORK_FILES) {
  5100. put_files_struct(req->work.identity->files);
  5101. put_nsproxy(req->work.identity->nsproxy);
  5102. }
  5103. spin_lock_irqsave(&ctx->inflight_lock, flags);
  5104. list_del(&req->inflight_entry);
  5105. spin_unlock_irqrestore(&ctx->inflight_lock, flags);
  5106. req->flags &= ~REQ_F_INFLIGHT;
  5107. req->work.flags &= ~IO_WQ_WORK_FILES;
  5108. if (atomic_read(&tctx->in_idle))
  5109. wake_up(&tctx->wait);
  5110. }
  5111. static void __io_clean_op(struct io_kiocb *req)
  5112. {
  5113. if (req->flags & REQ_F_BUFFER_SELECTED) {
  5114. switch (req->opcode) {
  5115. case IORING_OP_READV:
  5116. case IORING_OP_READ_FIXED:
  5117. case IORING_OP_READ:
  5118. kfree((void *)(unsigned long)req->rw.addr);
  5119. break;
  5120. case IORING_OP_RECVMSG:
  5121. case IORING_OP_RECV:
  5122. kfree(req->sr_msg.kbuf);
  5123. break;
  5124. }
  5125. req->flags &= ~REQ_F_BUFFER_SELECTED;
  5126. }
  5127. if (req->flags & REQ_F_NEED_CLEANUP) {
  5128. switch (req->opcode) {
  5129. case IORING_OP_READV:
  5130. case IORING_OP_READ_FIXED:
  5131. case IORING_OP_READ:
  5132. case IORING_OP_WRITEV:
  5133. case IORING_OP_WRITE_FIXED:
  5134. case IORING_OP_WRITE: {
  5135. struct io_async_rw *io = req->async_data;
  5136. if (io->free_iovec)
  5137. kfree(io->free_iovec);
  5138. break;
  5139. }
  5140. case IORING_OP_RECVMSG:
  5141. case IORING_OP_SENDMSG: {
  5142. struct io_async_msghdr *io = req->async_data;
  5143. if (io->iov != io->fast_iov)
  5144. kfree(io->iov);
  5145. break;
  5146. }
  5147. case IORING_OP_SPLICE:
  5148. case IORING_OP_TEE:
  5149. io_put_file(req, req->splice.file_in,
  5150. (req->splice.flags & SPLICE_F_FD_IN_FIXED));
  5151. break;
  5152. case IORING_OP_OPENAT:
  5153. case IORING_OP_OPENAT2:
  5154. if (req->open.filename)
  5155. putname(req->open.filename);
  5156. break;
  5157. }
  5158. req->flags &= ~REQ_F_NEED_CLEANUP;
  5159. }
  5160. }
  5161. static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
  5162. struct io_comp_state *cs)
  5163. {
  5164. struct io_ring_ctx *ctx = req->ctx;
  5165. int ret;
  5166. switch (req->opcode) {
  5167. case IORING_OP_NOP:
  5168. ret = io_nop(req, cs);
  5169. break;
  5170. case IORING_OP_READV:
  5171. case IORING_OP_READ_FIXED:
  5172. case IORING_OP_READ:
  5173. ret = io_read(req, force_nonblock, cs);
  5174. break;
  5175. case IORING_OP_WRITEV:
  5176. case IORING_OP_WRITE_FIXED:
  5177. case IORING_OP_WRITE:
  5178. ret = io_write(req, force_nonblock, cs);
  5179. break;
  5180. case IORING_OP_FSYNC:
  5181. ret = io_fsync(req, force_nonblock);
  5182. break;
  5183. case IORING_OP_POLL_ADD:
  5184. ret = io_poll_add(req);
  5185. break;
  5186. case IORING_OP_POLL_REMOVE:
  5187. ret = io_poll_remove(req);
  5188. break;
  5189. case IORING_OP_SYNC_FILE_RANGE:
  5190. ret = io_sync_file_range(req, force_nonblock);
  5191. break;
  5192. case IORING_OP_SENDMSG:
  5193. ret = io_sendmsg(req, force_nonblock, cs);
  5194. break;
  5195. case IORING_OP_SEND:
  5196. ret = io_send(req, force_nonblock, cs);
  5197. break;
  5198. case IORING_OP_RECVMSG:
  5199. ret = io_recvmsg(req, force_nonblock, cs);
  5200. break;
  5201. case IORING_OP_RECV:
  5202. ret = io_recv(req, force_nonblock, cs);
  5203. break;
  5204. case IORING_OP_TIMEOUT:
  5205. ret = io_timeout(req);
  5206. break;
  5207. case IORING_OP_TIMEOUT_REMOVE:
  5208. ret = io_timeout_remove(req);
  5209. break;
  5210. case IORING_OP_ACCEPT:
  5211. ret = io_accept(req, force_nonblock, cs);
  5212. break;
  5213. case IORING_OP_CONNECT:
  5214. ret = io_connect(req, force_nonblock, cs);
  5215. break;
  5216. case IORING_OP_ASYNC_CANCEL:
  5217. ret = io_async_cancel(req);
  5218. break;
  5219. case IORING_OP_FALLOCATE:
  5220. ret = io_fallocate(req, force_nonblock);
  5221. break;
  5222. case IORING_OP_OPENAT:
  5223. ret = io_openat(req, force_nonblock);
  5224. break;
  5225. case IORING_OP_CLOSE:
  5226. ret = io_close(req, force_nonblock, cs);
  5227. break;
  5228. case IORING_OP_FILES_UPDATE:
  5229. ret = io_files_update(req, force_nonblock, cs);
  5230. break;
  5231. case IORING_OP_STATX:
  5232. ret = io_statx(req, force_nonblock);
  5233. break;
  5234. case IORING_OP_FADVISE:
  5235. ret = io_fadvise(req, force_nonblock);
  5236. break;
  5237. case IORING_OP_MADVISE:
  5238. ret = io_madvise(req, force_nonblock);
  5239. break;
  5240. case IORING_OP_OPENAT2:
  5241. ret = io_openat2(req, force_nonblock);
  5242. break;
  5243. case IORING_OP_EPOLL_CTL:
  5244. ret = io_epoll_ctl(req, force_nonblock, cs);
  5245. break;
  5246. case IORING_OP_SPLICE:
  5247. ret = io_splice(req, force_nonblock);
  5248. break;
  5249. case IORING_OP_PROVIDE_BUFFERS:
  5250. ret = io_provide_buffers(req, force_nonblock, cs);
  5251. break;
  5252. case IORING_OP_REMOVE_BUFFERS:
  5253. ret = io_remove_buffers(req, force_nonblock, cs);
  5254. break;
  5255. case IORING_OP_TEE:
  5256. ret = io_tee(req, force_nonblock);
  5257. break;
  5258. default:
  5259. ret = -EINVAL;
  5260. break;
  5261. }
  5262. if (ret)
  5263. return ret;
  5264. /* If the op doesn't have a file, we're not polling for it */
  5265. if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
  5266. const bool in_async = io_wq_current_is_worker();
  5267. /* workqueue context doesn't hold uring_lock, grab it now */
  5268. if (in_async)
  5269. mutex_lock(&ctx->uring_lock);
  5270. io_iopoll_req_issued(req);
  5271. if (in_async)
  5272. mutex_unlock(&ctx->uring_lock);
  5273. }
  5274. return 0;
  5275. }
  5276. static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
  5277. {
  5278. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  5279. struct io_kiocb *timeout;
  5280. int ret = 0;
  5281. timeout = io_prep_linked_timeout(req);
  5282. if (timeout)
  5283. io_queue_linked_timeout(timeout);
  5284. /* if NO_CANCEL is set, we must still run the work */
  5285. if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
  5286. IO_WQ_WORK_CANCEL) {
  5287. ret = -ECANCELED;
  5288. }
  5289. if (!ret) {
  5290. do {
  5291. ret = io_issue_sqe(req, false, NULL);
  5292. /*
  5293. * We can get EAGAIN for polled IO even though we're
  5294. * forcing a sync submission from here, since we can't
  5295. * wait for request slots on the block side.
  5296. */
  5297. if (ret != -EAGAIN)
  5298. break;
  5299. cond_resched();
  5300. } while (1);
  5301. }
  5302. if (ret) {
  5303. struct io_ring_ctx *lock_ctx = NULL;
  5304. if (req->ctx->flags & IORING_SETUP_IOPOLL)
  5305. lock_ctx = req->ctx;
  5306. /*
  5307. * io_iopoll_complete() does not hold completion_lock to
  5308. * complete polled io, so here for polled io, we can not call
  5309. * io_req_complete() directly, otherwise there maybe concurrent
  5310. * access to cqring, defer_list, etc, which is not safe. Given
  5311. * that io_iopoll_complete() is always called under uring_lock,
  5312. * so here for polled io, we also get uring_lock to complete
  5313. * it.
  5314. */
  5315. if (lock_ctx)
  5316. mutex_lock(&lock_ctx->uring_lock);
  5317. req_set_fail_links(req);
  5318. io_req_complete(req, ret);
  5319. if (lock_ctx)
  5320. mutex_unlock(&lock_ctx->uring_lock);
  5321. }
  5322. return io_steal_work(req);
  5323. }
  5324. static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
  5325. int index)
  5326. {
  5327. struct fixed_file_table *table;
  5328. table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
  5329. return table->files[index & IORING_FILE_TABLE_MASK];
  5330. }
  5331. static struct file *io_file_get(struct io_submit_state *state,
  5332. struct io_kiocb *req, int fd, bool fixed)
  5333. {
  5334. struct io_ring_ctx *ctx = req->ctx;
  5335. struct file *file;
  5336. if (fixed) {
  5337. if (unlikely((unsigned int)fd >= ctx->nr_user_files))
  5338. return NULL;
  5339. fd = array_index_nospec(fd, ctx->nr_user_files);
  5340. file = io_file_from_index(ctx, fd);
  5341. if (file) {
  5342. req->fixed_file_refs = &ctx->file_data->node->refs;
  5343. percpu_ref_get(req->fixed_file_refs);
  5344. }
  5345. } else {
  5346. trace_io_uring_file_get(ctx, fd);
  5347. file = __io_file_get(state, fd);
  5348. }
  5349. if (file && file->f_op == &io_uring_fops &&
  5350. !(req->flags & REQ_F_INFLIGHT)) {
  5351. io_req_init_async(req);
  5352. req->flags |= REQ_F_INFLIGHT;
  5353. spin_lock_irq(&ctx->inflight_lock);
  5354. list_add(&req->inflight_entry, &ctx->inflight_list);
  5355. spin_unlock_irq(&ctx->inflight_lock);
  5356. }
  5357. return file;
  5358. }
  5359. static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
  5360. int fd)
  5361. {
  5362. bool fixed;
  5363. fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
  5364. if (unlikely(!fixed && io_async_submit(req->ctx)))
  5365. return -EBADF;
  5366. req->file = io_file_get(state, req, fd, fixed);
  5367. if (req->file || io_op_defs[req->opcode].needs_file_no_error)
  5368. return 0;
  5369. return -EBADF;
  5370. }
  5371. static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
  5372. {
  5373. struct io_timeout_data *data = container_of(timer,
  5374. struct io_timeout_data, timer);
  5375. struct io_kiocb *req = data->req;
  5376. struct io_ring_ctx *ctx = req->ctx;
  5377. struct io_kiocb *prev = NULL;
  5378. unsigned long flags;
  5379. spin_lock_irqsave(&ctx->completion_lock, flags);
  5380. /*
  5381. * We don't expect the list to be empty, that will only happen if we
  5382. * race with the completion of the linked work.
  5383. */
  5384. if (!list_empty(&req->link_list)) {
  5385. prev = list_entry(req->link_list.prev, struct io_kiocb,
  5386. link_list);
  5387. list_del_init(&req->link_list);
  5388. if (!refcount_inc_not_zero(&prev->refs))
  5389. prev = NULL;
  5390. }
  5391. list_del(&req->timeout.list);
  5392. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  5393. if (prev) {
  5394. io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
  5395. io_put_req_deferred(prev, 1);
  5396. } else {
  5397. io_cqring_add_event(req, -ETIME, 0);
  5398. io_put_req_deferred(req, 1);
  5399. }
  5400. return HRTIMER_NORESTART;
  5401. }
  5402. static void __io_queue_linked_timeout(struct io_kiocb *req)
  5403. {
  5404. /*
  5405. * If the list is now empty, then our linked request finished before
  5406. * we got a chance to setup the timer
  5407. */
  5408. if (!list_empty(&req->link_list)) {
  5409. struct io_timeout_data *data = req->async_data;
  5410. data->timer.function = io_link_timeout_fn;
  5411. hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
  5412. data->mode);
  5413. }
  5414. }
  5415. static void io_queue_linked_timeout(struct io_kiocb *req)
  5416. {
  5417. struct io_ring_ctx *ctx = req->ctx;
  5418. spin_lock_irq(&ctx->completion_lock);
  5419. __io_queue_linked_timeout(req);
  5420. spin_unlock_irq(&ctx->completion_lock);
  5421. /* drop submission reference */
  5422. io_put_req(req);
  5423. }
  5424. static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
  5425. {
  5426. struct io_kiocb *nxt;
  5427. if (!(req->flags & REQ_F_LINK_HEAD))
  5428. return NULL;
  5429. if (req->flags & REQ_F_LINK_TIMEOUT)
  5430. return NULL;
  5431. nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
  5432. link_list);
  5433. if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
  5434. return NULL;
  5435. nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
  5436. req->flags |= REQ_F_LINK_TIMEOUT;
  5437. return nxt;
  5438. }
  5439. static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
  5440. {
  5441. struct io_kiocb *linked_timeout;
  5442. const struct cred *old_creds = NULL;
  5443. int ret;
  5444. again:
  5445. linked_timeout = io_prep_linked_timeout(req);
  5446. if ((req->flags & REQ_F_WORK_INITIALIZED) &&
  5447. (req->work.flags & IO_WQ_WORK_CREDS) &&
  5448. req->work.identity->creds != current_cred()) {
  5449. if (old_creds)
  5450. revert_creds(old_creds);
  5451. if (old_creds == req->work.identity->creds)
  5452. old_creds = NULL; /* restored original creds */
  5453. else
  5454. old_creds = override_creds(req->work.identity->creds);
  5455. }
  5456. ret = io_issue_sqe(req, true, cs);
  5457. /*
  5458. * We async punt it if the file wasn't marked NOWAIT, or if the file
  5459. * doesn't support non-blocking read/write attempts
  5460. */
  5461. if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
  5462. if (!io_arm_poll_handler(req)) {
  5463. /*
  5464. * Queued up for async execution, worker will release
  5465. * submit reference when the iocb is actually submitted.
  5466. */
  5467. io_queue_async_work(req);
  5468. }
  5469. if (linked_timeout)
  5470. io_queue_linked_timeout(linked_timeout);
  5471. } else if (likely(!ret)) {
  5472. /* drop submission reference */
  5473. req = io_put_req_find_next(req);
  5474. if (linked_timeout)
  5475. io_queue_linked_timeout(linked_timeout);
  5476. if (req) {
  5477. if (!(req->flags & REQ_F_FORCE_ASYNC))
  5478. goto again;
  5479. io_queue_async_work(req);
  5480. }
  5481. } else {
  5482. /* un-prep timeout, so it'll be killed as any other linked */
  5483. req->flags &= ~REQ_F_LINK_TIMEOUT;
  5484. req_set_fail_links(req);
  5485. io_put_req(req);
  5486. io_req_complete(req, ret);
  5487. }
  5488. if (old_creds)
  5489. revert_creds(old_creds);
  5490. }
  5491. static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
  5492. struct io_comp_state *cs)
  5493. {
  5494. int ret;
  5495. ret = io_req_defer(req, sqe);
  5496. if (ret) {
  5497. if (ret != -EIOCBQUEUED) {
  5498. fail_req:
  5499. req_set_fail_links(req);
  5500. io_put_req(req);
  5501. io_req_complete(req, ret);
  5502. }
  5503. } else if (req->flags & REQ_F_FORCE_ASYNC) {
  5504. if (!req->async_data) {
  5505. ret = io_req_defer_prep(req, sqe);
  5506. if (unlikely(ret))
  5507. goto fail_req;
  5508. }
  5509. io_queue_async_work(req);
  5510. } else {
  5511. if (sqe) {
  5512. ret = io_req_prep(req, sqe);
  5513. if (unlikely(ret))
  5514. goto fail_req;
  5515. }
  5516. __io_queue_sqe(req, cs);
  5517. }
  5518. }
  5519. static inline void io_queue_link_head(struct io_kiocb *req,
  5520. struct io_comp_state *cs)
  5521. {
  5522. if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
  5523. io_put_req(req);
  5524. io_req_complete(req, -ECANCELED);
  5525. } else
  5526. io_queue_sqe(req, NULL, cs);
  5527. }
  5528. static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
  5529. struct io_kiocb **link, struct io_comp_state *cs)
  5530. {
  5531. struct io_ring_ctx *ctx = req->ctx;
  5532. int ret;
  5533. /*
  5534. * If we already have a head request, queue this one for async
  5535. * submittal once the head completes. If we don't have a head but
  5536. * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
  5537. * submitted sync once the chain is complete. If none of those
  5538. * conditions are true (normal request), then just queue it.
  5539. */
  5540. if (*link) {
  5541. struct io_kiocb *head = *link;
  5542. /*
  5543. * Taking sequential execution of a link, draining both sides
  5544. * of the link also fullfils IOSQE_IO_DRAIN semantics for all
  5545. * requests in the link. So, it drains the head and the
  5546. * next after the link request. The last one is done via
  5547. * drain_next flag to persist the effect across calls.
  5548. */
  5549. if (req->flags & REQ_F_IO_DRAIN) {
  5550. head->flags |= REQ_F_IO_DRAIN;
  5551. ctx->drain_next = 1;
  5552. }
  5553. ret = io_req_defer_prep(req, sqe);
  5554. if (unlikely(ret)) {
  5555. /* fail even hard links since we don't submit */
  5556. head->flags |= REQ_F_FAIL_LINK;
  5557. return ret;
  5558. }
  5559. trace_io_uring_link(ctx, req, head);
  5560. list_add_tail(&req->link_list, &head->link_list);
  5561. /* last request of a link, enqueue the link */
  5562. if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
  5563. io_queue_link_head(head, cs);
  5564. *link = NULL;
  5565. }
  5566. } else {
  5567. if (unlikely(ctx->drain_next)) {
  5568. req->flags |= REQ_F_IO_DRAIN;
  5569. ctx->drain_next = 0;
  5570. }
  5571. if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
  5572. req->flags |= REQ_F_LINK_HEAD;
  5573. INIT_LIST_HEAD(&req->link_list);
  5574. ret = io_req_defer_prep(req, sqe);
  5575. if (unlikely(ret))
  5576. req->flags |= REQ_F_FAIL_LINK;
  5577. *link = req;
  5578. } else {
  5579. io_queue_sqe(req, sqe, cs);
  5580. }
  5581. }
  5582. return 0;
  5583. }
  5584. /*
  5585. * Batched submission is done, ensure local IO is flushed out.
  5586. */
  5587. static void io_submit_state_end(struct io_submit_state *state)
  5588. {
  5589. if (!list_empty(&state->comp.list))
  5590. io_submit_flush_completions(&state->comp);
  5591. blk_finish_plug(&state->plug);
  5592. io_state_file_put(state);
  5593. if (state->free_reqs)
  5594. kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
  5595. }
  5596. /*
  5597. * Start submission side cache.
  5598. */
  5599. static void io_submit_state_start(struct io_submit_state *state,
  5600. struct io_ring_ctx *ctx, unsigned int max_ios)
  5601. {
  5602. blk_start_plug(&state->plug);
  5603. state->comp.nr = 0;
  5604. INIT_LIST_HEAD(&state->comp.list);
  5605. state->comp.ctx = ctx;
  5606. state->free_reqs = 0;
  5607. state->file = NULL;
  5608. state->ios_left = max_ios;
  5609. }
  5610. static void io_commit_sqring(struct io_ring_ctx *ctx)
  5611. {
  5612. struct io_rings *rings = ctx->rings;
  5613. /*
  5614. * Ensure any loads from the SQEs are done at this point,
  5615. * since once we write the new head, the application could
  5616. * write new data to them.
  5617. */
  5618. smp_store_release(&rings->sq.head, ctx->cached_sq_head);
  5619. }
  5620. /*
  5621. * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
  5622. * that is mapped by userspace. This means that care needs to be taken to
  5623. * ensure that reads are stable, as we cannot rely on userspace always
  5624. * being a good citizen. If members of the sqe are validated and then later
  5625. * used, it's important that those reads are done through READ_ONCE() to
  5626. * prevent a re-load down the line.
  5627. */
  5628. static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
  5629. {
  5630. u32 *sq_array = ctx->sq_array;
  5631. unsigned head;
  5632. /*
  5633. * The cached sq head (or cq tail) serves two purposes:
  5634. *
  5635. * 1) allows us to batch the cost of updating the user visible
  5636. * head updates.
  5637. * 2) allows the kernel side to track the head on its own, even
  5638. * though the application is the one updating it.
  5639. */
  5640. head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
  5641. if (likely(head < ctx->sq_entries))
  5642. return &ctx->sq_sqes[head];
  5643. /* drop invalid entries */
  5644. ctx->cached_sq_dropped++;
  5645. WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
  5646. return NULL;
  5647. }
  5648. static inline void io_consume_sqe(struct io_ring_ctx *ctx)
  5649. {
  5650. ctx->cached_sq_head++;
  5651. }
  5652. /*
  5653. * Check SQE restrictions (opcode and flags).
  5654. *
  5655. * Returns 'true' if SQE is allowed, 'false' otherwise.
  5656. */
  5657. static inline bool io_check_restriction(struct io_ring_ctx *ctx,
  5658. struct io_kiocb *req,
  5659. unsigned int sqe_flags)
  5660. {
  5661. if (!ctx->restricted)
  5662. return true;
  5663. if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
  5664. return false;
  5665. if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
  5666. ctx->restrictions.sqe_flags_required)
  5667. return false;
  5668. if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
  5669. ctx->restrictions.sqe_flags_required))
  5670. return false;
  5671. return true;
  5672. }
  5673. #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
  5674. IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
  5675. IOSQE_BUFFER_SELECT)
  5676. static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
  5677. const struct io_uring_sqe *sqe,
  5678. struct io_submit_state *state)
  5679. {
  5680. unsigned int sqe_flags;
  5681. int id, ret;
  5682. req->opcode = READ_ONCE(sqe->opcode);
  5683. req->user_data = READ_ONCE(sqe->user_data);
  5684. req->async_data = NULL;
  5685. req->file = NULL;
  5686. req->ctx = ctx;
  5687. req->flags = 0;
  5688. /* one is dropped after submission, the other at completion */
  5689. refcount_set(&req->refs, 2);
  5690. req->task = current;
  5691. req->result = 0;
  5692. if (unlikely(req->opcode >= IORING_OP_LAST))
  5693. return -EINVAL;
  5694. if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
  5695. return -EFAULT;
  5696. sqe_flags = READ_ONCE(sqe->flags);
  5697. /* enforce forwards compatibility on users */
  5698. if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
  5699. return -EINVAL;
  5700. if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
  5701. return -EACCES;
  5702. if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
  5703. !io_op_defs[req->opcode].buffer_select)
  5704. return -EOPNOTSUPP;
  5705. id = READ_ONCE(sqe->personality);
  5706. if (id) {
  5707. struct io_identity *iod;
  5708. iod = xa_load(&ctx->personalities, id);
  5709. if (unlikely(!iod))
  5710. return -EINVAL;
  5711. refcount_inc(&iod->count);
  5712. __io_req_init_async(req);
  5713. get_cred(iod->creds);
  5714. req->work.identity = iod;
  5715. req->work.flags |= IO_WQ_WORK_CREDS;
  5716. }
  5717. /* same numerical values with corresponding REQ_F_*, safe to copy */
  5718. req->flags |= sqe_flags;
  5719. if (!io_op_defs[req->opcode].needs_file)
  5720. return 0;
  5721. ret = io_req_set_file(state, req, READ_ONCE(sqe->fd));
  5722. state->ios_left--;
  5723. return ret;
  5724. }
  5725. static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
  5726. {
  5727. struct io_submit_state state;
  5728. struct io_kiocb *link = NULL;
  5729. int i, submitted = 0;
  5730. /* if we have a backlog and couldn't flush it all, return BUSY */
  5731. if (test_bit(0, &ctx->sq_check_overflow)) {
  5732. if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
  5733. return -EBUSY;
  5734. }
  5735. /* make sure SQ entry isn't read before tail */
  5736. nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
  5737. if (!percpu_ref_tryget_many(&ctx->refs, nr))
  5738. return -EAGAIN;
  5739. percpu_counter_add(&current->io_uring->inflight, nr);
  5740. refcount_add(nr, &current->usage);
  5741. io_submit_state_start(&state, ctx, nr);
  5742. for (i = 0; i < nr; i++) {
  5743. const struct io_uring_sqe *sqe;
  5744. struct io_kiocb *req;
  5745. int err;
  5746. sqe = io_get_sqe(ctx);
  5747. if (unlikely(!sqe)) {
  5748. io_consume_sqe(ctx);
  5749. break;
  5750. }
  5751. req = io_alloc_req(ctx, &state);
  5752. if (unlikely(!req)) {
  5753. if (!submitted)
  5754. submitted = -EAGAIN;
  5755. break;
  5756. }
  5757. io_consume_sqe(ctx);
  5758. /* will complete beyond this point, count as submitted */
  5759. submitted++;
  5760. err = io_init_req(ctx, req, sqe, &state);
  5761. if (unlikely(err)) {
  5762. fail_req:
  5763. io_put_req(req);
  5764. io_req_complete(req, err);
  5765. break;
  5766. }
  5767. trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
  5768. true, io_async_submit(ctx));
  5769. err = io_submit_sqe(req, sqe, &link, &state.comp);
  5770. if (err)
  5771. goto fail_req;
  5772. }
  5773. if (unlikely(submitted != nr)) {
  5774. int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
  5775. struct io_uring_task *tctx = current->io_uring;
  5776. int unused = nr - ref_used;
  5777. percpu_ref_put_many(&ctx->refs, unused);
  5778. percpu_counter_sub(&tctx->inflight, unused);
  5779. put_task_struct_many(current, unused);
  5780. }
  5781. if (link)
  5782. io_queue_link_head(link, &state.comp);
  5783. io_submit_state_end(&state);
  5784. /* Commit SQ ring head once we've consumed and submitted all SQEs */
  5785. io_commit_sqring(ctx);
  5786. return submitted;
  5787. }
  5788. static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
  5789. {
  5790. /* Tell userspace we may need a wakeup call */
  5791. spin_lock_irq(&ctx->completion_lock);
  5792. ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
  5793. spin_unlock_irq(&ctx->completion_lock);
  5794. }
  5795. static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
  5796. {
  5797. spin_lock_irq(&ctx->completion_lock);
  5798. ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
  5799. spin_unlock_irq(&ctx->completion_lock);
  5800. }
  5801. static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode,
  5802. int sync, void *key)
  5803. {
  5804. struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry);
  5805. int ret;
  5806. ret = autoremove_wake_function(wqe, mode, sync, key);
  5807. if (ret) {
  5808. unsigned long flags;
  5809. spin_lock_irqsave(&ctx->completion_lock, flags);
  5810. ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
  5811. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  5812. }
  5813. return ret;
  5814. }
  5815. enum sq_ret {
  5816. SQT_IDLE = 1,
  5817. SQT_SPIN = 2,
  5818. SQT_DID_WORK = 4,
  5819. };
  5820. static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
  5821. unsigned long start_jiffies, bool cap_entries)
  5822. {
  5823. unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
  5824. struct io_sq_data *sqd = ctx->sq_data;
  5825. unsigned int to_submit;
  5826. int ret = 0;
  5827. again:
  5828. if (!list_empty(&ctx->iopoll_list)) {
  5829. unsigned nr_events = 0;
  5830. mutex_lock(&ctx->uring_lock);
  5831. if (!list_empty(&ctx->iopoll_list) && !need_resched())
  5832. io_do_iopoll(ctx, &nr_events, 0);
  5833. mutex_unlock(&ctx->uring_lock);
  5834. }
  5835. to_submit = io_sqring_entries(ctx);
  5836. /*
  5837. * If submit got -EBUSY, flag us as needing the application
  5838. * to enter the kernel to reap and flush events.
  5839. */
  5840. if (!to_submit || ret == -EBUSY || need_resched()) {
  5841. /*
  5842. * Drop cur_mm before scheduling, we can't hold it for
  5843. * long periods (or over schedule()). Do this before
  5844. * adding ourselves to the waitqueue, as the unuse/drop
  5845. * may sleep.
  5846. */
  5847. io_sq_thread_drop_mm();
  5848. /*
  5849. * We're polling. If we're within the defined idle
  5850. * period, then let us spin without work before going
  5851. * to sleep. The exception is if we got EBUSY doing
  5852. * more IO, we should wait for the application to
  5853. * reap events and wake us up.
  5854. */
  5855. if (!list_empty(&ctx->iopoll_list) || need_resched() ||
  5856. (!time_after(jiffies, timeout) && ret != -EBUSY &&
  5857. !percpu_ref_is_dying(&ctx->refs)))
  5858. return SQT_SPIN;
  5859. prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry,
  5860. TASK_INTERRUPTIBLE);
  5861. /*
  5862. * While doing polled IO, before going to sleep, we need
  5863. * to check if there are new reqs added to iopoll_list,
  5864. * it is because reqs may have been punted to io worker
  5865. * and will be added to iopoll_list later, hence check
  5866. * the iopoll_list again.
  5867. */
  5868. if ((ctx->flags & IORING_SETUP_IOPOLL) &&
  5869. !list_empty_careful(&ctx->iopoll_list)) {
  5870. finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
  5871. goto again;
  5872. }
  5873. to_submit = io_sqring_entries(ctx);
  5874. if (!to_submit || ret == -EBUSY)
  5875. return SQT_IDLE;
  5876. }
  5877. finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
  5878. io_ring_clear_wakeup_flag(ctx);
  5879. /* if we're handling multiple rings, cap submit size for fairness */
  5880. if (cap_entries && to_submit > 8)
  5881. to_submit = 8;
  5882. mutex_lock(&ctx->uring_lock);
  5883. if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead))
  5884. ret = io_submit_sqes(ctx, to_submit);
  5885. mutex_unlock(&ctx->uring_lock);
  5886. if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
  5887. wake_up(&ctx->sqo_sq_wait);
  5888. return SQT_DID_WORK;
  5889. }
  5890. static void io_sqd_init_new(struct io_sq_data *sqd)
  5891. {
  5892. struct io_ring_ctx *ctx;
  5893. while (!list_empty(&sqd->ctx_new_list)) {
  5894. ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
  5895. init_wait(&ctx->sqo_wait_entry);
  5896. ctx->sqo_wait_entry.func = io_sq_wake_function;
  5897. list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
  5898. complete(&ctx->sq_thread_comp);
  5899. }
  5900. }
  5901. static int io_sq_thread(void *data)
  5902. {
  5903. struct cgroup_subsys_state *cur_css = NULL;
  5904. const struct cred *old_cred = NULL;
  5905. struct io_sq_data *sqd = data;
  5906. struct io_ring_ctx *ctx;
  5907. unsigned long start_jiffies;
  5908. start_jiffies = jiffies;
  5909. while (!kthread_should_stop()) {
  5910. enum sq_ret ret = 0;
  5911. bool cap_entries;
  5912. /*
  5913. * Any changes to the sqd lists are synchronized through the
  5914. * kthread parking. This synchronizes the thread vs users,
  5915. * the users are synchronized on the sqd->ctx_lock.
  5916. */
  5917. if (kthread_should_park()) {
  5918. kthread_parkme();
  5919. /*
  5920. * When sq thread is unparked, in case the previous park operation
  5921. * comes from io_put_sq_data(), which means that sq thread is going
  5922. * to be stopped, so here needs to have a check.
  5923. */
  5924. if (kthread_should_stop())
  5925. break;
  5926. }
  5927. if (unlikely(!list_empty(&sqd->ctx_new_list)))
  5928. io_sqd_init_new(sqd);
  5929. cap_entries = !list_is_singular(&sqd->ctx_list);
  5930. list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
  5931. if (current->cred != ctx->creds) {
  5932. if (old_cred)
  5933. revert_creds(old_cred);
  5934. old_cred = override_creds(ctx->creds);
  5935. }
  5936. io_sq_thread_associate_blkcg(ctx, &cur_css);
  5937. #ifdef CONFIG_AUDIT
  5938. current->loginuid = ctx->loginuid;
  5939. current->sessionid = ctx->sessionid;
  5940. #endif
  5941. ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
  5942. io_sq_thread_drop_mm();
  5943. }
  5944. if (ret & SQT_SPIN) {
  5945. io_run_task_work();
  5946. io_sq_thread_drop_mm();
  5947. cond_resched();
  5948. } else if (ret == SQT_IDLE) {
  5949. if (kthread_should_park())
  5950. continue;
  5951. list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
  5952. io_ring_set_wakeup_flag(ctx);
  5953. schedule();
  5954. start_jiffies = jiffies;
  5955. list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
  5956. io_ring_clear_wakeup_flag(ctx);
  5957. }
  5958. }
  5959. io_run_task_work();
  5960. io_sq_thread_drop_mm();
  5961. if (cur_css)
  5962. io_sq_thread_unassociate_blkcg();
  5963. if (old_cred)
  5964. revert_creds(old_cred);
  5965. kthread_parkme();
  5966. return 0;
  5967. }
  5968. struct io_wait_queue {
  5969. struct wait_queue_entry wq;
  5970. struct io_ring_ctx *ctx;
  5971. unsigned to_wait;
  5972. unsigned nr_timeouts;
  5973. };
  5974. static inline bool io_should_wake(struct io_wait_queue *iowq)
  5975. {
  5976. struct io_ring_ctx *ctx = iowq->ctx;
  5977. /*
  5978. * Wake up if we have enough events, or if a timeout occurred since we
  5979. * started waiting. For timeouts, we always want to return to userspace,
  5980. * regardless of event count.
  5981. */
  5982. return io_cqring_events(ctx) >= iowq->to_wait ||
  5983. atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
  5984. }
  5985. static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
  5986. int wake_flags, void *key)
  5987. {
  5988. struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
  5989. wq);
  5990. /*
  5991. * Cannot safely flush overflowed CQEs from here, ensure we wake up
  5992. * the task, and the next invocation will do it.
  5993. */
  5994. if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
  5995. return autoremove_wake_function(curr, mode, wake_flags, key);
  5996. return -1;
  5997. }
  5998. static int io_run_task_work_sig(void)
  5999. {
  6000. if (io_run_task_work())
  6001. return 1;
  6002. if (!signal_pending(current))
  6003. return 0;
  6004. if (current->jobctl & JOBCTL_TASK_WORK) {
  6005. spin_lock_irq(&current->sighand->siglock);
  6006. current->jobctl &= ~JOBCTL_TASK_WORK;
  6007. recalc_sigpending();
  6008. spin_unlock_irq(&current->sighand->siglock);
  6009. return 1;
  6010. }
  6011. return -EINTR;
  6012. }
  6013. /*
  6014. * Wait until events become available, if we don't already have some. The
  6015. * application must reap them itself, as they reside on the shared cq ring.
  6016. */
  6017. static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
  6018. const sigset_t __user *sig, size_t sigsz)
  6019. {
  6020. struct io_wait_queue iowq = {
  6021. .wq = {
  6022. .private = current,
  6023. .func = io_wake_function,
  6024. .entry = LIST_HEAD_INIT(iowq.wq.entry),
  6025. },
  6026. .ctx = ctx,
  6027. .to_wait = min_events,
  6028. };
  6029. struct io_rings *rings = ctx->rings;
  6030. int ret = 0;
  6031. do {
  6032. io_cqring_overflow_flush(ctx, false, NULL, NULL);
  6033. if (io_cqring_events(ctx) >= min_events)
  6034. return 0;
  6035. if (!io_run_task_work())
  6036. break;
  6037. } while (1);
  6038. if (sig) {
  6039. #ifdef CONFIG_COMPAT
  6040. if (in_compat_syscall())
  6041. ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
  6042. sigsz);
  6043. else
  6044. #endif
  6045. ret = set_user_sigmask(sig, sigsz);
  6046. if (ret)
  6047. return ret;
  6048. }
  6049. iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
  6050. trace_io_uring_cqring_wait(ctx, min_events);
  6051. do {
  6052. io_cqring_overflow_flush(ctx, false, NULL, NULL);
  6053. prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
  6054. TASK_INTERRUPTIBLE);
  6055. /* make sure we run task_work before checking for signals */
  6056. ret = io_run_task_work_sig();
  6057. if (ret > 0) {
  6058. finish_wait(&ctx->wait, &iowq.wq);
  6059. continue;
  6060. }
  6061. else if (ret < 0)
  6062. break;
  6063. if (io_should_wake(&iowq))
  6064. break;
  6065. if (test_bit(0, &ctx->cq_check_overflow)) {
  6066. finish_wait(&ctx->wait, &iowq.wq);
  6067. continue;
  6068. }
  6069. schedule();
  6070. } while (1);
  6071. finish_wait(&ctx->wait, &iowq.wq);
  6072. restore_saved_sigmask_unless(ret == -EINTR);
  6073. return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
  6074. }
  6075. static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
  6076. {
  6077. #if defined(CONFIG_UNIX)
  6078. if (ctx->ring_sock) {
  6079. struct sock *sock = ctx->ring_sock->sk;
  6080. struct sk_buff *skb;
  6081. while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
  6082. kfree_skb(skb);
  6083. }
  6084. #else
  6085. int i;
  6086. for (i = 0; i < ctx->nr_user_files; i++) {
  6087. struct file *file;
  6088. file = io_file_from_index(ctx, i);
  6089. if (file)
  6090. fput(file);
  6091. }
  6092. #endif
  6093. }
  6094. static void io_file_ref_kill(struct percpu_ref *ref)
  6095. {
  6096. struct fixed_file_data *data;
  6097. data = container_of(ref, struct fixed_file_data, refs);
  6098. complete(&data->done);
  6099. }
  6100. static void io_sqe_files_set_node(struct fixed_file_data *file_data,
  6101. struct fixed_file_ref_node *ref_node)
  6102. {
  6103. spin_lock_bh(&file_data->lock);
  6104. file_data->node = ref_node;
  6105. list_add_tail(&ref_node->node, &file_data->ref_list);
  6106. spin_unlock_bh(&file_data->lock);
  6107. percpu_ref_get(&file_data->refs);
  6108. }
  6109. static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
  6110. {
  6111. struct fixed_file_data *data = ctx->file_data;
  6112. struct fixed_file_ref_node *backup_node, *ref_node = NULL;
  6113. unsigned nr_tables, i;
  6114. int ret;
  6115. if (!data)
  6116. return -ENXIO;
  6117. backup_node = alloc_fixed_file_ref_node(ctx);
  6118. if (!backup_node)
  6119. return -ENOMEM;
  6120. spin_lock_bh(&data->lock);
  6121. ref_node = data->node;
  6122. spin_unlock_bh(&data->lock);
  6123. if (ref_node)
  6124. percpu_ref_kill(&ref_node->refs);
  6125. percpu_ref_kill(&data->refs);
  6126. /* wait for all refs nodes to complete */
  6127. flush_delayed_work(&ctx->file_put_work);
  6128. do {
  6129. ret = wait_for_completion_interruptible(&data->done);
  6130. if (!ret)
  6131. break;
  6132. ret = io_run_task_work_sig();
  6133. if (ret < 0) {
  6134. percpu_ref_resurrect(&data->refs);
  6135. reinit_completion(&data->done);
  6136. io_sqe_files_set_node(data, backup_node);
  6137. return ret;
  6138. }
  6139. } while (1);
  6140. __io_sqe_files_unregister(ctx);
  6141. nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
  6142. for (i = 0; i < nr_tables; i++)
  6143. kfree(data->table[i].files);
  6144. kfree(data->table);
  6145. percpu_ref_exit(&data->refs);
  6146. kfree(data);
  6147. ctx->file_data = NULL;
  6148. ctx->nr_user_files = 0;
  6149. destroy_fixed_file_ref_node(backup_node);
  6150. return 0;
  6151. }
  6152. static void io_put_sq_data(struct io_sq_data *sqd)
  6153. {
  6154. if (refcount_dec_and_test(&sqd->refs)) {
  6155. /*
  6156. * The park is a bit of a work-around, without it we get
  6157. * warning spews on shutdown with SQPOLL set and affinity
  6158. * set to a single CPU.
  6159. */
  6160. if (sqd->thread) {
  6161. kthread_park(sqd->thread);
  6162. kthread_stop(sqd->thread);
  6163. }
  6164. kfree(sqd);
  6165. }
  6166. }
  6167. static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
  6168. {
  6169. struct io_ring_ctx *ctx_attach;
  6170. struct io_sq_data *sqd;
  6171. struct fd f;
  6172. f = fdget(p->wq_fd);
  6173. if (!f.file)
  6174. return ERR_PTR(-ENXIO);
  6175. if (f.file->f_op != &io_uring_fops) {
  6176. fdput(f);
  6177. return ERR_PTR(-EINVAL);
  6178. }
  6179. ctx_attach = f.file->private_data;
  6180. sqd = ctx_attach->sq_data;
  6181. if (!sqd) {
  6182. fdput(f);
  6183. return ERR_PTR(-EINVAL);
  6184. }
  6185. refcount_inc(&sqd->refs);
  6186. fdput(f);
  6187. return sqd;
  6188. }
  6189. static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
  6190. {
  6191. struct io_sq_data *sqd;
  6192. if (p->flags & IORING_SETUP_ATTACH_WQ)
  6193. return io_attach_sq_data(p);
  6194. sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
  6195. if (!sqd)
  6196. return ERR_PTR(-ENOMEM);
  6197. refcount_set(&sqd->refs, 1);
  6198. INIT_LIST_HEAD(&sqd->ctx_list);
  6199. INIT_LIST_HEAD(&sqd->ctx_new_list);
  6200. mutex_init(&sqd->ctx_lock);
  6201. mutex_init(&sqd->lock);
  6202. init_waitqueue_head(&sqd->wait);
  6203. return sqd;
  6204. }
  6205. static void io_sq_thread_unpark(struct io_sq_data *sqd)
  6206. __releases(&sqd->lock)
  6207. {
  6208. if (!sqd->thread)
  6209. return;
  6210. kthread_unpark(sqd->thread);
  6211. mutex_unlock(&sqd->lock);
  6212. }
  6213. static void io_sq_thread_park(struct io_sq_data *sqd)
  6214. __acquires(&sqd->lock)
  6215. {
  6216. if (!sqd->thread)
  6217. return;
  6218. mutex_lock(&sqd->lock);
  6219. kthread_park(sqd->thread);
  6220. }
  6221. static void io_sq_thread_stop(struct io_ring_ctx *ctx)
  6222. {
  6223. struct io_sq_data *sqd = ctx->sq_data;
  6224. if (sqd) {
  6225. if (sqd->thread) {
  6226. /*
  6227. * We may arrive here from the error branch in
  6228. * io_sq_offload_create() where the kthread is created
  6229. * without being waked up, thus wake it up now to make
  6230. * sure the wait will complete.
  6231. */
  6232. wake_up_process(sqd->thread);
  6233. wait_for_completion(&ctx->sq_thread_comp);
  6234. io_sq_thread_park(sqd);
  6235. }
  6236. mutex_lock(&sqd->ctx_lock);
  6237. list_del(&ctx->sqd_list);
  6238. mutex_unlock(&sqd->ctx_lock);
  6239. if (sqd->thread) {
  6240. finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
  6241. io_sq_thread_unpark(sqd);
  6242. }
  6243. io_put_sq_data(sqd);
  6244. ctx->sq_data = NULL;
  6245. }
  6246. }
  6247. static void io_finish_async(struct io_ring_ctx *ctx)
  6248. {
  6249. io_sq_thread_stop(ctx);
  6250. if (ctx->io_wq) {
  6251. io_wq_destroy(ctx->io_wq);
  6252. ctx->io_wq = NULL;
  6253. }
  6254. }
  6255. #if defined(CONFIG_UNIX)
  6256. /*
  6257. * Ensure the UNIX gc is aware of our file set, so we are certain that
  6258. * the io_uring can be safely unregistered on process exit, even if we have
  6259. * loops in the file referencing.
  6260. */
  6261. static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
  6262. {
  6263. struct sock *sk = ctx->ring_sock->sk;
  6264. struct scm_fp_list *fpl;
  6265. struct sk_buff *skb;
  6266. int i, nr_files;
  6267. fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
  6268. if (!fpl)
  6269. return -ENOMEM;
  6270. skb = alloc_skb(0, GFP_KERNEL);
  6271. if (!skb) {
  6272. kfree(fpl);
  6273. return -ENOMEM;
  6274. }
  6275. skb->sk = sk;
  6276. nr_files = 0;
  6277. fpl->user = get_uid(ctx->user);
  6278. for (i = 0; i < nr; i++) {
  6279. struct file *file = io_file_from_index(ctx, i + offset);
  6280. if (!file)
  6281. continue;
  6282. fpl->fp[nr_files] = get_file(file);
  6283. unix_inflight(fpl->user, fpl->fp[nr_files]);
  6284. nr_files++;
  6285. }
  6286. if (nr_files) {
  6287. fpl->max = SCM_MAX_FD;
  6288. fpl->count = nr_files;
  6289. UNIXCB(skb).fp = fpl;
  6290. skb->destructor = unix_destruct_scm;
  6291. refcount_add(skb->truesize, &sk->sk_wmem_alloc);
  6292. skb_queue_head(&sk->sk_receive_queue, skb);
  6293. for (i = 0; i < nr; i++) {
  6294. struct file *file = io_file_from_index(ctx, i + offset);
  6295. if (file)
  6296. fput(file);
  6297. }
  6298. } else {
  6299. kfree_skb(skb);
  6300. free_uid(fpl->user);
  6301. kfree(fpl);
  6302. }
  6303. return 0;
  6304. }
  6305. /*
  6306. * If UNIX sockets are enabled, fd passing can cause a reference cycle which
  6307. * causes regular reference counting to break down. We rely on the UNIX
  6308. * garbage collection to take care of this problem for us.
  6309. */
  6310. static int io_sqe_files_scm(struct io_ring_ctx *ctx)
  6311. {
  6312. unsigned left, total;
  6313. int ret = 0;
  6314. total = 0;
  6315. left = ctx->nr_user_files;
  6316. while (left) {
  6317. unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
  6318. ret = __io_sqe_files_scm(ctx, this_files, total);
  6319. if (ret)
  6320. break;
  6321. left -= this_files;
  6322. total += this_files;
  6323. }
  6324. if (!ret)
  6325. return 0;
  6326. while (total < ctx->nr_user_files) {
  6327. struct file *file = io_file_from_index(ctx, total);
  6328. if (file)
  6329. fput(file);
  6330. total++;
  6331. }
  6332. return ret;
  6333. }
  6334. #else
  6335. static int io_sqe_files_scm(struct io_ring_ctx *ctx)
  6336. {
  6337. return 0;
  6338. }
  6339. #endif
  6340. static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
  6341. unsigned nr_tables, unsigned nr_files)
  6342. {
  6343. int i;
  6344. for (i = 0; i < nr_tables; i++) {
  6345. struct fixed_file_table *table = &file_data->table[i];
  6346. unsigned this_files;
  6347. this_files = min(nr_files, IORING_MAX_FILES_TABLE);
  6348. table->files = kcalloc(this_files, sizeof(struct file *),
  6349. GFP_KERNEL_ACCOUNT);
  6350. if (!table->files)
  6351. break;
  6352. nr_files -= this_files;
  6353. }
  6354. if (i == nr_tables)
  6355. return 0;
  6356. for (i = 0; i < nr_tables; i++) {
  6357. struct fixed_file_table *table = &file_data->table[i];
  6358. kfree(table->files);
  6359. }
  6360. return 1;
  6361. }
  6362. static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
  6363. {
  6364. #if defined(CONFIG_UNIX)
  6365. struct sock *sock = ctx->ring_sock->sk;
  6366. struct sk_buff_head list, *head = &sock->sk_receive_queue;
  6367. struct sk_buff *skb;
  6368. int i;
  6369. __skb_queue_head_init(&list);
  6370. /*
  6371. * Find the skb that holds this file in its SCM_RIGHTS. When found,
  6372. * remove this entry and rearrange the file array.
  6373. */
  6374. skb = skb_dequeue(head);
  6375. while (skb) {
  6376. struct scm_fp_list *fp;
  6377. fp = UNIXCB(skb).fp;
  6378. for (i = 0; i < fp->count; i++) {
  6379. int left;
  6380. if (fp->fp[i] != file)
  6381. continue;
  6382. unix_notinflight(fp->user, fp->fp[i]);
  6383. left = fp->count - 1 - i;
  6384. if (left) {
  6385. memmove(&fp->fp[i], &fp->fp[i + 1],
  6386. left * sizeof(struct file *));
  6387. }
  6388. fp->count--;
  6389. if (!fp->count) {
  6390. kfree_skb(skb);
  6391. skb = NULL;
  6392. } else {
  6393. __skb_queue_tail(&list, skb);
  6394. }
  6395. fput(file);
  6396. file = NULL;
  6397. break;
  6398. }
  6399. if (!file)
  6400. break;
  6401. __skb_queue_tail(&list, skb);
  6402. skb = skb_dequeue(head);
  6403. }
  6404. if (skb_peek(&list)) {
  6405. spin_lock_irq(&head->lock);
  6406. while ((skb = __skb_dequeue(&list)) != NULL)
  6407. __skb_queue_tail(head, skb);
  6408. spin_unlock_irq(&head->lock);
  6409. }
  6410. #else
  6411. fput(file);
  6412. #endif
  6413. }
  6414. struct io_file_put {
  6415. struct list_head list;
  6416. struct file *file;
  6417. };
  6418. static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
  6419. {
  6420. struct fixed_file_data *file_data = ref_node->file_data;
  6421. struct io_ring_ctx *ctx = file_data->ctx;
  6422. struct io_file_put *pfile, *tmp;
  6423. list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
  6424. list_del(&pfile->list);
  6425. io_ring_file_put(ctx, pfile->file);
  6426. kfree(pfile);
  6427. }
  6428. percpu_ref_exit(&ref_node->refs);
  6429. kfree(ref_node);
  6430. percpu_ref_put(&file_data->refs);
  6431. }
  6432. static void io_file_put_work(struct work_struct *work)
  6433. {
  6434. struct io_ring_ctx *ctx;
  6435. struct llist_node *node;
  6436. ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
  6437. node = llist_del_all(&ctx->file_put_llist);
  6438. while (node) {
  6439. struct fixed_file_ref_node *ref_node;
  6440. struct llist_node *next = node->next;
  6441. ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
  6442. __io_file_put_work(ref_node);
  6443. node = next;
  6444. }
  6445. }
  6446. static void io_file_data_ref_zero(struct percpu_ref *ref)
  6447. {
  6448. struct fixed_file_ref_node *ref_node;
  6449. struct fixed_file_data *data;
  6450. struct io_ring_ctx *ctx;
  6451. bool first_add = false;
  6452. int delay = HZ;
  6453. ref_node = container_of(ref, struct fixed_file_ref_node, refs);
  6454. data = ref_node->file_data;
  6455. ctx = data->ctx;
  6456. spin_lock_bh(&data->lock);
  6457. ref_node->done = true;
  6458. while (!list_empty(&data->ref_list)) {
  6459. ref_node = list_first_entry(&data->ref_list,
  6460. struct fixed_file_ref_node, node);
  6461. /* recycle ref nodes in order */
  6462. if (!ref_node->done)
  6463. break;
  6464. list_del(&ref_node->node);
  6465. first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
  6466. }
  6467. spin_unlock_bh(&data->lock);
  6468. if (percpu_ref_is_dying(&data->refs))
  6469. delay = 0;
  6470. if (!delay)
  6471. mod_delayed_work(system_wq, &ctx->file_put_work, 0);
  6472. else if (first_add)
  6473. queue_delayed_work(system_wq, &ctx->file_put_work, delay);
  6474. }
  6475. static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
  6476. struct io_ring_ctx *ctx)
  6477. {
  6478. struct fixed_file_ref_node *ref_node;
  6479. ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
  6480. if (!ref_node)
  6481. return NULL;
  6482. if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
  6483. 0, GFP_KERNEL)) {
  6484. kfree(ref_node);
  6485. return NULL;
  6486. }
  6487. INIT_LIST_HEAD(&ref_node->node);
  6488. INIT_LIST_HEAD(&ref_node->file_list);
  6489. ref_node->file_data = ctx->file_data;
  6490. ref_node->done = false;
  6491. return ref_node;
  6492. }
  6493. static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
  6494. {
  6495. percpu_ref_exit(&ref_node->refs);
  6496. kfree(ref_node);
  6497. }
  6498. static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
  6499. unsigned nr_args)
  6500. {
  6501. __s32 __user *fds = (__s32 __user *) arg;
  6502. unsigned nr_tables, i;
  6503. struct file *file;
  6504. int fd, ret = -ENOMEM;
  6505. struct fixed_file_ref_node *ref_node;
  6506. struct fixed_file_data *file_data;
  6507. if (ctx->file_data)
  6508. return -EBUSY;
  6509. if (!nr_args)
  6510. return -EINVAL;
  6511. if (nr_args > IORING_MAX_FIXED_FILES)
  6512. return -EMFILE;
  6513. if (nr_args > rlimit(RLIMIT_NOFILE))
  6514. return -EMFILE;
  6515. file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL_ACCOUNT);
  6516. if (!file_data)
  6517. return -ENOMEM;
  6518. file_data->ctx = ctx;
  6519. init_completion(&file_data->done);
  6520. INIT_LIST_HEAD(&file_data->ref_list);
  6521. spin_lock_init(&file_data->lock);
  6522. nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
  6523. file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
  6524. GFP_KERNEL_ACCOUNT);
  6525. if (!file_data->table)
  6526. goto out_free;
  6527. if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
  6528. PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
  6529. goto out_free;
  6530. if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
  6531. goto out_ref;
  6532. ctx->file_data = file_data;
  6533. for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
  6534. struct fixed_file_table *table;
  6535. unsigned index;
  6536. if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
  6537. ret = -EFAULT;
  6538. goto out_fput;
  6539. }
  6540. /* allow sparse sets */
  6541. if (fd == -1)
  6542. continue;
  6543. file = fget(fd);
  6544. ret = -EBADF;
  6545. if (!file)
  6546. goto out_fput;
  6547. /*
  6548. * Don't allow io_uring instances to be registered. If UNIX
  6549. * isn't enabled, then this causes a reference cycle and this
  6550. * instance can never get freed. If UNIX is enabled we'll
  6551. * handle it just fine, but there's still no point in allowing
  6552. * a ring fd as it doesn't support regular read/write anyway.
  6553. */
  6554. if (file->f_op == &io_uring_fops) {
  6555. fput(file);
  6556. goto out_fput;
  6557. }
  6558. table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
  6559. index = i & IORING_FILE_TABLE_MASK;
  6560. table->files[index] = file;
  6561. }
  6562. ret = io_sqe_files_scm(ctx);
  6563. if (ret) {
  6564. io_sqe_files_unregister(ctx);
  6565. return ret;
  6566. }
  6567. ref_node = alloc_fixed_file_ref_node(ctx);
  6568. if (!ref_node) {
  6569. io_sqe_files_unregister(ctx);
  6570. return -ENOMEM;
  6571. }
  6572. io_sqe_files_set_node(file_data, ref_node);
  6573. return ret;
  6574. out_fput:
  6575. for (i = 0; i < ctx->nr_user_files; i++) {
  6576. file = io_file_from_index(ctx, i);
  6577. if (file)
  6578. fput(file);
  6579. }
  6580. for (i = 0; i < nr_tables; i++)
  6581. kfree(file_data->table[i].files);
  6582. ctx->nr_user_files = 0;
  6583. out_ref:
  6584. percpu_ref_exit(&file_data->refs);
  6585. out_free:
  6586. kfree(file_data->table);
  6587. kfree(file_data);
  6588. ctx->file_data = NULL;
  6589. return ret;
  6590. }
  6591. static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
  6592. int index)
  6593. {
  6594. #if defined(CONFIG_UNIX)
  6595. struct sock *sock = ctx->ring_sock->sk;
  6596. struct sk_buff_head *head = &sock->sk_receive_queue;
  6597. struct sk_buff *skb;
  6598. /*
  6599. * See if we can merge this file into an existing skb SCM_RIGHTS
  6600. * file set. If there's no room, fall back to allocating a new skb
  6601. * and filling it in.
  6602. */
  6603. spin_lock_irq(&head->lock);
  6604. skb = skb_peek(head);
  6605. if (skb) {
  6606. struct scm_fp_list *fpl = UNIXCB(skb).fp;
  6607. if (fpl->count < SCM_MAX_FD) {
  6608. __skb_unlink(skb, head);
  6609. spin_unlock_irq(&head->lock);
  6610. fpl->fp[fpl->count] = get_file(file);
  6611. unix_inflight(fpl->user, fpl->fp[fpl->count]);
  6612. fpl->count++;
  6613. spin_lock_irq(&head->lock);
  6614. __skb_queue_head(head, skb);
  6615. } else {
  6616. skb = NULL;
  6617. }
  6618. }
  6619. spin_unlock_irq(&head->lock);
  6620. if (skb) {
  6621. fput(file);
  6622. return 0;
  6623. }
  6624. return __io_sqe_files_scm(ctx, 1, index);
  6625. #else
  6626. return 0;
  6627. #endif
  6628. }
  6629. static int io_queue_file_removal(struct fixed_file_data *data,
  6630. struct file *file)
  6631. {
  6632. struct io_file_put *pfile;
  6633. struct fixed_file_ref_node *ref_node = data->node;
  6634. pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
  6635. if (!pfile)
  6636. return -ENOMEM;
  6637. pfile->file = file;
  6638. list_add(&pfile->list, &ref_node->file_list);
  6639. return 0;
  6640. }
  6641. static int __io_sqe_files_update(struct io_ring_ctx *ctx,
  6642. struct io_uring_files_update *up,
  6643. unsigned nr_args)
  6644. {
  6645. struct fixed_file_data *data = ctx->file_data;
  6646. struct fixed_file_ref_node *ref_node;
  6647. struct file *file;
  6648. __s32 __user *fds;
  6649. int fd, i, err;
  6650. __u32 done;
  6651. bool needs_switch = false;
  6652. if (check_add_overflow(up->offset, nr_args, &done))
  6653. return -EOVERFLOW;
  6654. if (done > ctx->nr_user_files)
  6655. return -EINVAL;
  6656. ref_node = alloc_fixed_file_ref_node(ctx);
  6657. if (!ref_node)
  6658. return -ENOMEM;
  6659. done = 0;
  6660. fds = u64_to_user_ptr(up->fds);
  6661. while (nr_args) {
  6662. struct fixed_file_table *table;
  6663. unsigned index;
  6664. err = 0;
  6665. if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
  6666. err = -EFAULT;
  6667. break;
  6668. }
  6669. i = array_index_nospec(up->offset, ctx->nr_user_files);
  6670. table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
  6671. index = i & IORING_FILE_TABLE_MASK;
  6672. if (table->files[index]) {
  6673. file = table->files[index];
  6674. err = io_queue_file_removal(data, file);
  6675. if (err)
  6676. break;
  6677. table->files[index] = NULL;
  6678. needs_switch = true;
  6679. }
  6680. if (fd != -1) {
  6681. file = fget(fd);
  6682. if (!file) {
  6683. err = -EBADF;
  6684. break;
  6685. }
  6686. /*
  6687. * Don't allow io_uring instances to be registered. If
  6688. * UNIX isn't enabled, then this causes a reference
  6689. * cycle and this instance can never get freed. If UNIX
  6690. * is enabled we'll handle it just fine, but there's
  6691. * still no point in allowing a ring fd as it doesn't
  6692. * support regular read/write anyway.
  6693. */
  6694. if (file->f_op == &io_uring_fops) {
  6695. fput(file);
  6696. err = -EBADF;
  6697. break;
  6698. }
  6699. table->files[index] = file;
  6700. err = io_sqe_file_register(ctx, file, i);
  6701. if (err) {
  6702. table->files[index] = NULL;
  6703. fput(file);
  6704. break;
  6705. }
  6706. }
  6707. nr_args--;
  6708. done++;
  6709. up->offset++;
  6710. }
  6711. if (needs_switch) {
  6712. percpu_ref_kill(&data->node->refs);
  6713. io_sqe_files_set_node(data, ref_node);
  6714. } else
  6715. destroy_fixed_file_ref_node(ref_node);
  6716. return done ? done : err;
  6717. }
  6718. static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
  6719. unsigned nr_args)
  6720. {
  6721. struct io_uring_files_update up;
  6722. if (!ctx->file_data)
  6723. return -ENXIO;
  6724. if (!nr_args)
  6725. return -EINVAL;
  6726. if (copy_from_user(&up, arg, sizeof(up)))
  6727. return -EFAULT;
  6728. if (up.resv)
  6729. return -EINVAL;
  6730. return __io_sqe_files_update(ctx, &up, nr_args);
  6731. }
  6732. static void io_free_work(struct io_wq_work *work)
  6733. {
  6734. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  6735. /* Consider that io_steal_work() relies on this ref */
  6736. io_put_req(req);
  6737. }
  6738. static int io_init_wq_offload(struct io_ring_ctx *ctx,
  6739. struct io_uring_params *p)
  6740. {
  6741. struct io_wq_data data;
  6742. struct fd f;
  6743. struct io_ring_ctx *ctx_attach;
  6744. unsigned int concurrency;
  6745. int ret = 0;
  6746. data.user = ctx->user;
  6747. data.free_work = io_free_work;
  6748. data.do_work = io_wq_submit_work;
  6749. if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
  6750. /* Do QD, or 4 * CPUS, whatever is smallest */
  6751. concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
  6752. ctx->io_wq = io_wq_create(concurrency, &data);
  6753. if (IS_ERR(ctx->io_wq)) {
  6754. ret = PTR_ERR(ctx->io_wq);
  6755. ctx->io_wq = NULL;
  6756. }
  6757. return ret;
  6758. }
  6759. f = fdget(p->wq_fd);
  6760. if (!f.file)
  6761. return -EBADF;
  6762. if (f.file->f_op != &io_uring_fops) {
  6763. ret = -EINVAL;
  6764. goto out_fput;
  6765. }
  6766. ctx_attach = f.file->private_data;
  6767. /* @io_wq is protected by holding the fd */
  6768. if (!io_wq_get(ctx_attach->io_wq, &data)) {
  6769. ret = -EINVAL;
  6770. goto out_fput;
  6771. }
  6772. ctx->io_wq = ctx_attach->io_wq;
  6773. out_fput:
  6774. fdput(f);
  6775. return ret;
  6776. }
  6777. static int io_uring_alloc_task_context(struct task_struct *task)
  6778. {
  6779. struct io_uring_task *tctx;
  6780. int ret;
  6781. tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
  6782. if (unlikely(!tctx))
  6783. return -ENOMEM;
  6784. ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
  6785. if (unlikely(ret)) {
  6786. kfree(tctx);
  6787. return ret;
  6788. }
  6789. xa_init(&tctx->xa);
  6790. init_waitqueue_head(&tctx->wait);
  6791. tctx->last = NULL;
  6792. atomic_set(&tctx->in_idle, 0);
  6793. tctx->sqpoll = false;
  6794. io_init_identity(&tctx->__identity);
  6795. tctx->identity = &tctx->__identity;
  6796. task->io_uring = tctx;
  6797. return 0;
  6798. }
  6799. void __io_uring_free(struct task_struct *tsk)
  6800. {
  6801. struct io_uring_task *tctx = tsk->io_uring;
  6802. WARN_ON_ONCE(!xa_empty(&tctx->xa));
  6803. WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
  6804. if (tctx->identity != &tctx->__identity)
  6805. kfree(tctx->identity);
  6806. percpu_counter_destroy(&tctx->inflight);
  6807. kfree(tctx);
  6808. tsk->io_uring = NULL;
  6809. }
  6810. static int io_sq_offload_create(struct io_ring_ctx *ctx,
  6811. struct io_uring_params *p)
  6812. {
  6813. int ret;
  6814. if (ctx->flags & IORING_SETUP_SQPOLL) {
  6815. struct io_sq_data *sqd;
  6816. ret = -EPERM;
  6817. if (!capable(CAP_SYS_ADMIN))
  6818. goto err;
  6819. sqd = io_get_sq_data(p);
  6820. if (IS_ERR(sqd)) {
  6821. ret = PTR_ERR(sqd);
  6822. goto err;
  6823. }
  6824. ctx->sq_data = sqd;
  6825. io_sq_thread_park(sqd);
  6826. mutex_lock(&sqd->ctx_lock);
  6827. list_add(&ctx->sqd_list, &sqd->ctx_new_list);
  6828. mutex_unlock(&sqd->ctx_lock);
  6829. io_sq_thread_unpark(sqd);
  6830. ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
  6831. if (!ctx->sq_thread_idle)
  6832. ctx->sq_thread_idle = HZ;
  6833. if (sqd->thread)
  6834. goto done;
  6835. if (p->flags & IORING_SETUP_SQ_AFF) {
  6836. int cpu = p->sq_thread_cpu;
  6837. ret = -EINVAL;
  6838. if (cpu >= nr_cpu_ids)
  6839. goto err;
  6840. if (!cpu_online(cpu))
  6841. goto err;
  6842. sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
  6843. cpu, "io_uring-sq");
  6844. } else {
  6845. sqd->thread = kthread_create(io_sq_thread, sqd,
  6846. "io_uring-sq");
  6847. }
  6848. if (IS_ERR(sqd->thread)) {
  6849. ret = PTR_ERR(sqd->thread);
  6850. sqd->thread = NULL;
  6851. goto err;
  6852. }
  6853. ret = io_uring_alloc_task_context(sqd->thread);
  6854. if (ret)
  6855. goto err;
  6856. } else if (p->flags & IORING_SETUP_SQ_AFF) {
  6857. /* Can't have SQ_AFF without SQPOLL */
  6858. ret = -EINVAL;
  6859. goto err;
  6860. }
  6861. done:
  6862. ret = io_init_wq_offload(ctx, p);
  6863. if (ret)
  6864. goto err;
  6865. return 0;
  6866. err:
  6867. io_finish_async(ctx);
  6868. return ret;
  6869. }
  6870. static void io_sq_offload_start(struct io_ring_ctx *ctx)
  6871. {
  6872. struct io_sq_data *sqd = ctx->sq_data;
  6873. ctx->flags &= ~IORING_SETUP_R_DISABLED;
  6874. if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd && sqd->thread)
  6875. wake_up_process(sqd->thread);
  6876. }
  6877. static inline void __io_unaccount_mem(struct user_struct *user,
  6878. unsigned long nr_pages)
  6879. {
  6880. atomic_long_sub(nr_pages, &user->locked_vm);
  6881. }
  6882. static inline int __io_account_mem(struct user_struct *user,
  6883. unsigned long nr_pages)
  6884. {
  6885. unsigned long page_limit, cur_pages, new_pages;
  6886. /* Don't allow more pages than we can safely lock */
  6887. page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  6888. do {
  6889. cur_pages = atomic_long_read(&user->locked_vm);
  6890. new_pages = cur_pages + nr_pages;
  6891. if (new_pages > page_limit)
  6892. return -ENOMEM;
  6893. } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
  6894. new_pages) != cur_pages);
  6895. return 0;
  6896. }
  6897. static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
  6898. enum io_mem_account acct)
  6899. {
  6900. if (ctx->limit_mem)
  6901. __io_unaccount_mem(ctx->user, nr_pages);
  6902. if (ctx->mm_account) {
  6903. if (acct == ACCT_LOCKED)
  6904. ctx->mm_account->locked_vm -= nr_pages;
  6905. else if (acct == ACCT_PINNED)
  6906. atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
  6907. }
  6908. }
  6909. static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
  6910. enum io_mem_account acct)
  6911. {
  6912. int ret;
  6913. if (ctx->limit_mem) {
  6914. ret = __io_account_mem(ctx->user, nr_pages);
  6915. if (ret)
  6916. return ret;
  6917. }
  6918. if (ctx->mm_account) {
  6919. if (acct == ACCT_LOCKED)
  6920. ctx->mm_account->locked_vm += nr_pages;
  6921. else if (acct == ACCT_PINNED)
  6922. atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
  6923. }
  6924. return 0;
  6925. }
  6926. static void io_mem_free(void *ptr)
  6927. {
  6928. struct page *page;
  6929. if (!ptr)
  6930. return;
  6931. page = virt_to_head_page(ptr);
  6932. if (put_page_testzero(page))
  6933. free_compound_page(page);
  6934. }
  6935. static void *io_mem_alloc(size_t size)
  6936. {
  6937. gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
  6938. __GFP_NORETRY;
  6939. return (void *) __get_free_pages(gfp_flags, get_order(size));
  6940. }
  6941. static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
  6942. size_t *sq_offset)
  6943. {
  6944. struct io_rings *rings;
  6945. size_t off, sq_array_size;
  6946. off = struct_size(rings, cqes, cq_entries);
  6947. if (off == SIZE_MAX)
  6948. return SIZE_MAX;
  6949. #ifdef CONFIG_SMP
  6950. off = ALIGN(off, SMP_CACHE_BYTES);
  6951. if (off == 0)
  6952. return SIZE_MAX;
  6953. #endif
  6954. if (sq_offset)
  6955. *sq_offset = off;
  6956. sq_array_size = array_size(sizeof(u32), sq_entries);
  6957. if (sq_array_size == SIZE_MAX)
  6958. return SIZE_MAX;
  6959. if (check_add_overflow(off, sq_array_size, &off))
  6960. return SIZE_MAX;
  6961. return off;
  6962. }
  6963. static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
  6964. {
  6965. size_t pages;
  6966. pages = (size_t)1 << get_order(
  6967. rings_size(sq_entries, cq_entries, NULL));
  6968. pages += (size_t)1 << get_order(
  6969. array_size(sizeof(struct io_uring_sqe), sq_entries));
  6970. return pages;
  6971. }
  6972. static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
  6973. {
  6974. int i, j;
  6975. if (!ctx->user_bufs)
  6976. return -ENXIO;
  6977. for (i = 0; i < ctx->nr_user_bufs; i++) {
  6978. struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
  6979. for (j = 0; j < imu->nr_bvecs; j++)
  6980. unpin_user_page(imu->bvec[j].bv_page);
  6981. if (imu->acct_pages)
  6982. io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
  6983. kvfree(imu->bvec);
  6984. imu->nr_bvecs = 0;
  6985. }
  6986. kfree(ctx->user_bufs);
  6987. ctx->user_bufs = NULL;
  6988. ctx->nr_user_bufs = 0;
  6989. return 0;
  6990. }
  6991. static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
  6992. void __user *arg, unsigned index)
  6993. {
  6994. struct iovec __user *src;
  6995. #ifdef CONFIG_COMPAT
  6996. if (ctx->compat) {
  6997. struct compat_iovec __user *ciovs;
  6998. struct compat_iovec ciov;
  6999. ciovs = (struct compat_iovec __user *) arg;
  7000. if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
  7001. return -EFAULT;
  7002. dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
  7003. dst->iov_len = ciov.iov_len;
  7004. return 0;
  7005. }
  7006. #endif
  7007. src = (struct iovec __user *) arg;
  7008. if (copy_from_user(dst, &src[index], sizeof(*dst)))
  7009. return -EFAULT;
  7010. return 0;
  7011. }
  7012. /*
  7013. * Not super efficient, but this is just a registration time. And we do cache
  7014. * the last compound head, so generally we'll only do a full search if we don't
  7015. * match that one.
  7016. *
  7017. * We check if the given compound head page has already been accounted, to
  7018. * avoid double accounting it. This allows us to account the full size of the
  7019. * page, not just the constituent pages of a huge page.
  7020. */
  7021. static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
  7022. int nr_pages, struct page *hpage)
  7023. {
  7024. int i, j;
  7025. /* check current page array */
  7026. for (i = 0; i < nr_pages; i++) {
  7027. if (!PageCompound(pages[i]))
  7028. continue;
  7029. if (compound_head(pages[i]) == hpage)
  7030. return true;
  7031. }
  7032. /* check previously registered pages */
  7033. for (i = 0; i < ctx->nr_user_bufs; i++) {
  7034. struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
  7035. for (j = 0; j < imu->nr_bvecs; j++) {
  7036. if (!PageCompound(imu->bvec[j].bv_page))
  7037. continue;
  7038. if (compound_head(imu->bvec[j].bv_page) == hpage)
  7039. return true;
  7040. }
  7041. }
  7042. return false;
  7043. }
  7044. static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
  7045. int nr_pages, struct io_mapped_ubuf *imu,
  7046. struct page **last_hpage)
  7047. {
  7048. int i, ret;
  7049. for (i = 0; i < nr_pages; i++) {
  7050. if (!PageCompound(pages[i])) {
  7051. imu->acct_pages++;
  7052. } else {
  7053. struct page *hpage;
  7054. hpage = compound_head(pages[i]);
  7055. if (hpage == *last_hpage)
  7056. continue;
  7057. *last_hpage = hpage;
  7058. if (headpage_already_acct(ctx, pages, i, hpage))
  7059. continue;
  7060. imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
  7061. }
  7062. }
  7063. if (!imu->acct_pages)
  7064. return 0;
  7065. ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
  7066. if (ret)
  7067. imu->acct_pages = 0;
  7068. return ret;
  7069. }
  7070. static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
  7071. unsigned nr_args)
  7072. {
  7073. struct vm_area_struct **vmas = NULL;
  7074. struct page **pages = NULL;
  7075. struct page *last_hpage = NULL;
  7076. int i, j, got_pages = 0;
  7077. int ret = -EINVAL;
  7078. if (ctx->user_bufs)
  7079. return -EBUSY;
  7080. if (!nr_args || nr_args > UIO_MAXIOV)
  7081. return -EINVAL;
  7082. ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
  7083. GFP_KERNEL);
  7084. if (!ctx->user_bufs)
  7085. return -ENOMEM;
  7086. for (i = 0; i < nr_args; i++) {
  7087. struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
  7088. unsigned long off, start, end, ubuf;
  7089. int pret, nr_pages;
  7090. struct iovec iov;
  7091. size_t size;
  7092. ret = io_copy_iov(ctx, &iov, arg, i);
  7093. if (ret)
  7094. goto err;
  7095. /*
  7096. * Don't impose further limits on the size and buffer
  7097. * constraints here, we'll -EINVAL later when IO is
  7098. * submitted if they are wrong.
  7099. */
  7100. ret = -EFAULT;
  7101. if (!iov.iov_base || !iov.iov_len)
  7102. goto err;
  7103. /* arbitrary limit, but we need something */
  7104. if (iov.iov_len > SZ_1G)
  7105. goto err;
  7106. ubuf = (unsigned long) iov.iov_base;
  7107. end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  7108. start = ubuf >> PAGE_SHIFT;
  7109. nr_pages = end - start;
  7110. ret = 0;
  7111. if (!pages || nr_pages > got_pages) {
  7112. kvfree(vmas);
  7113. kvfree(pages);
  7114. pages = kvmalloc_array(nr_pages, sizeof(struct page *),
  7115. GFP_KERNEL);
  7116. vmas = kvmalloc_array(nr_pages,
  7117. sizeof(struct vm_area_struct *),
  7118. GFP_KERNEL);
  7119. if (!pages || !vmas) {
  7120. ret = -ENOMEM;
  7121. goto err;
  7122. }
  7123. got_pages = nr_pages;
  7124. }
  7125. imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
  7126. GFP_KERNEL);
  7127. ret = -ENOMEM;
  7128. if (!imu->bvec)
  7129. goto err;
  7130. ret = 0;
  7131. mmap_read_lock(current->mm);
  7132. pret = pin_user_pages(ubuf, nr_pages,
  7133. FOLL_WRITE | FOLL_LONGTERM,
  7134. pages, vmas);
  7135. if (pret == nr_pages) {
  7136. /* don't support file backed memory */
  7137. for (j = 0; j < nr_pages; j++) {
  7138. struct vm_area_struct *vma = vmas[j];
  7139. if (vma->vm_file &&
  7140. !is_file_hugepages(vma->vm_file)) {
  7141. ret = -EOPNOTSUPP;
  7142. break;
  7143. }
  7144. }
  7145. } else {
  7146. ret = pret < 0 ? pret : -EFAULT;
  7147. }
  7148. mmap_read_unlock(current->mm);
  7149. if (ret) {
  7150. /*
  7151. * if we did partial map, or found file backed vmas,
  7152. * release any pages we did get
  7153. */
  7154. if (pret > 0)
  7155. unpin_user_pages(pages, pret);
  7156. kvfree(imu->bvec);
  7157. goto err;
  7158. }
  7159. ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
  7160. if (ret) {
  7161. unpin_user_pages(pages, pret);
  7162. kvfree(imu->bvec);
  7163. goto err;
  7164. }
  7165. off = ubuf & ~PAGE_MASK;
  7166. size = iov.iov_len;
  7167. for (j = 0; j < nr_pages; j++) {
  7168. size_t vec_len;
  7169. vec_len = min_t(size_t, size, PAGE_SIZE - off);
  7170. imu->bvec[j].bv_page = pages[j];
  7171. imu->bvec[j].bv_len = vec_len;
  7172. imu->bvec[j].bv_offset = off;
  7173. off = 0;
  7174. size -= vec_len;
  7175. }
  7176. /* store original address for later verification */
  7177. imu->ubuf = ubuf;
  7178. imu->len = iov.iov_len;
  7179. imu->nr_bvecs = nr_pages;
  7180. ctx->nr_user_bufs++;
  7181. }
  7182. kvfree(pages);
  7183. kvfree(vmas);
  7184. return 0;
  7185. err:
  7186. kvfree(pages);
  7187. kvfree(vmas);
  7188. io_sqe_buffer_unregister(ctx);
  7189. return ret;
  7190. }
  7191. static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
  7192. {
  7193. __s32 __user *fds = arg;
  7194. int fd;
  7195. if (ctx->cq_ev_fd)
  7196. return -EBUSY;
  7197. if (copy_from_user(&fd, fds, sizeof(*fds)))
  7198. return -EFAULT;
  7199. ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
  7200. if (IS_ERR(ctx->cq_ev_fd)) {
  7201. int ret = PTR_ERR(ctx->cq_ev_fd);
  7202. ctx->cq_ev_fd = NULL;
  7203. return ret;
  7204. }
  7205. return 0;
  7206. }
  7207. static int io_eventfd_unregister(struct io_ring_ctx *ctx)
  7208. {
  7209. if (ctx->cq_ev_fd) {
  7210. eventfd_ctx_put(ctx->cq_ev_fd);
  7211. ctx->cq_ev_fd = NULL;
  7212. return 0;
  7213. }
  7214. return -ENXIO;
  7215. }
  7216. static void io_destroy_buffers(struct io_ring_ctx *ctx)
  7217. {
  7218. struct io_buffer *buf;
  7219. unsigned long index;
  7220. xa_for_each(&ctx->io_buffers, index, buf)
  7221. __io_remove_buffers(ctx, buf, index, -1U);
  7222. }
  7223. static void io_ring_ctx_free(struct io_ring_ctx *ctx)
  7224. {
  7225. io_finish_async(ctx);
  7226. io_sqe_buffer_unregister(ctx);
  7227. if (ctx->sqo_task) {
  7228. put_task_struct(ctx->sqo_task);
  7229. ctx->sqo_task = NULL;
  7230. mmdrop(ctx->mm_account);
  7231. ctx->mm_account = NULL;
  7232. }
  7233. #ifdef CONFIG_BLK_CGROUP
  7234. if (ctx->sqo_blkcg_css)
  7235. css_put(ctx->sqo_blkcg_css);
  7236. #endif
  7237. io_sqe_files_unregister(ctx);
  7238. io_eventfd_unregister(ctx);
  7239. io_destroy_buffers(ctx);
  7240. #if defined(CONFIG_UNIX)
  7241. if (ctx->ring_sock) {
  7242. ctx->ring_sock->file = NULL; /* so that iput() is called */
  7243. sock_release(ctx->ring_sock);
  7244. }
  7245. #endif
  7246. io_mem_free(ctx->rings);
  7247. io_mem_free(ctx->sq_sqes);
  7248. percpu_ref_exit(&ctx->refs);
  7249. free_uid(ctx->user);
  7250. put_cred(ctx->creds);
  7251. kfree(ctx->cancel_hash);
  7252. kmem_cache_free(req_cachep, ctx->fallback_req);
  7253. kfree(ctx);
  7254. }
  7255. static __poll_t io_uring_poll(struct file *file, poll_table *wait)
  7256. {
  7257. struct io_ring_ctx *ctx = file->private_data;
  7258. __poll_t mask = 0;
  7259. poll_wait(file, &ctx->cq_wait, wait);
  7260. /*
  7261. * synchronizes with barrier from wq_has_sleeper call in
  7262. * io_commit_cqring
  7263. */
  7264. smp_rmb();
  7265. if (!io_sqring_full(ctx))
  7266. mask |= EPOLLOUT | EPOLLWRNORM;
  7267. /*
  7268. * Don't flush cqring overflow list here, just do a simple check.
  7269. * Otherwise there could possible be ABBA deadlock:
  7270. * CPU0 CPU1
  7271. * ---- ----
  7272. * lock(&ctx->uring_lock);
  7273. * lock(&ep->mtx);
  7274. * lock(&ctx->uring_lock);
  7275. * lock(&ep->mtx);
  7276. *
  7277. * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
  7278. * pushs them to do the flush.
  7279. */
  7280. if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
  7281. mask |= EPOLLIN | EPOLLRDNORM;
  7282. return mask;
  7283. }
  7284. static int io_uring_fasync(int fd, struct file *file, int on)
  7285. {
  7286. struct io_ring_ctx *ctx = file->private_data;
  7287. return fasync_helper(fd, file, on, &ctx->cq_fasync);
  7288. }
  7289. static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
  7290. {
  7291. struct io_identity *iod;
  7292. iod = xa_erase(&ctx->personalities, id);
  7293. if (iod) {
  7294. put_cred(iod->creds);
  7295. if (refcount_dec_and_test(&iod->count))
  7296. kfree(iod);
  7297. return 0;
  7298. }
  7299. return -EINVAL;
  7300. }
  7301. static void io_ring_exit_work(struct work_struct *work)
  7302. {
  7303. struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
  7304. exit_work);
  7305. /*
  7306. * If we're doing polled IO and end up having requests being
  7307. * submitted async (out-of-line), then completions can come in while
  7308. * we're waiting for refs to drop. We need to reap these manually,
  7309. * as nobody else will be looking for them.
  7310. */
  7311. do {
  7312. io_iopoll_try_reap_events(ctx);
  7313. } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
  7314. io_ring_ctx_free(ctx);
  7315. }
  7316. static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
  7317. {
  7318. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  7319. return req->ctx == data;
  7320. }
  7321. static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
  7322. {
  7323. unsigned long index;
  7324. struct io_identify *iod;
  7325. mutex_lock(&ctx->uring_lock);
  7326. percpu_ref_kill(&ctx->refs);
  7327. /* if force is set, the ring is going away. always drop after that */
  7328. if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
  7329. ctx->sqo_dead = 1;
  7330. ctx->cq_overflow_flushed = 1;
  7331. if (ctx->rings)
  7332. __io_cqring_overflow_flush(ctx, true, NULL, NULL);
  7333. mutex_unlock(&ctx->uring_lock);
  7334. io_kill_timeouts(ctx, NULL, NULL);
  7335. io_poll_remove_all(ctx, NULL, NULL);
  7336. if (ctx->io_wq)
  7337. io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
  7338. /* if we failed setting up the ctx, we might not have any rings */
  7339. io_iopoll_try_reap_events(ctx);
  7340. xa_for_each(&ctx->personalities, index, iod)
  7341. io_unregister_personality(ctx, index);
  7342. /*
  7343. * Do this upfront, so we won't have a grace period where the ring
  7344. * is closed but resources aren't reaped yet. This can cause
  7345. * spurious failure in setting up a new ring.
  7346. */
  7347. io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
  7348. ACCT_LOCKED);
  7349. INIT_WORK(&ctx->exit_work, io_ring_exit_work);
  7350. /*
  7351. * Use system_unbound_wq to avoid spawning tons of event kworkers
  7352. * if we're exiting a ton of rings at the same time. It just adds
  7353. * noise and overhead, there's no discernable change in runtime
  7354. * over using system_wq.
  7355. */
  7356. queue_work(system_unbound_wq, &ctx->exit_work);
  7357. }
  7358. static int io_uring_release(struct inode *inode, struct file *file)
  7359. {
  7360. struct io_ring_ctx *ctx = file->private_data;
  7361. file->private_data = NULL;
  7362. io_ring_ctx_wait_and_kill(ctx);
  7363. return 0;
  7364. }
  7365. struct io_task_cancel {
  7366. struct task_struct *task;
  7367. struct files_struct *files;
  7368. };
  7369. static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
  7370. {
  7371. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  7372. struct io_task_cancel *cancel = data;
  7373. bool ret;
  7374. if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
  7375. unsigned long flags;
  7376. struct io_ring_ctx *ctx = req->ctx;
  7377. /* protect against races with linked timeouts */
  7378. spin_lock_irqsave(&ctx->completion_lock, flags);
  7379. ret = io_match_task(req, cancel->task, cancel->files);
  7380. spin_unlock_irqrestore(&ctx->completion_lock, flags);
  7381. } else {
  7382. ret = io_match_task(req, cancel->task, cancel->files);
  7383. }
  7384. return ret;
  7385. }
  7386. static void io_cancel_defer_files(struct io_ring_ctx *ctx,
  7387. struct task_struct *task,
  7388. struct files_struct *files)
  7389. {
  7390. struct io_defer_entry *de = NULL;
  7391. LIST_HEAD(list);
  7392. spin_lock_irq(&ctx->completion_lock);
  7393. list_for_each_entry_reverse(de, &ctx->defer_list, list) {
  7394. if (io_match_task(de->req, task, files)) {
  7395. list_cut_position(&list, &ctx->defer_list, &de->list);
  7396. break;
  7397. }
  7398. }
  7399. spin_unlock_irq(&ctx->completion_lock);
  7400. while (!list_empty(&list)) {
  7401. de = list_first_entry(&list, struct io_defer_entry, list);
  7402. list_del_init(&de->list);
  7403. req_set_fail_links(de->req);
  7404. io_put_req(de->req);
  7405. io_req_complete(de->req, -ECANCELED);
  7406. kfree(de);
  7407. }
  7408. }
  7409. static int io_uring_count_inflight(struct io_ring_ctx *ctx,
  7410. struct task_struct *task,
  7411. struct files_struct *files)
  7412. {
  7413. struct io_kiocb *req;
  7414. int cnt = 0;
  7415. spin_lock_irq(&ctx->inflight_lock);
  7416. list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
  7417. cnt += io_match_task(req, task, files);
  7418. spin_unlock_irq(&ctx->inflight_lock);
  7419. return cnt;
  7420. }
  7421. static void io_uring_cancel_files(struct io_ring_ctx *ctx,
  7422. struct task_struct *task,
  7423. struct files_struct *files)
  7424. {
  7425. while (!list_empty_careful(&ctx->inflight_list)) {
  7426. struct io_task_cancel cancel = { .task = task, .files = files };
  7427. DEFINE_WAIT(wait);
  7428. int inflight;
  7429. inflight = io_uring_count_inflight(ctx, task, files);
  7430. if (!inflight)
  7431. break;
  7432. io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
  7433. io_poll_remove_all(ctx, task, files);
  7434. io_kill_timeouts(ctx, task, files);
  7435. /* cancellations _may_ trigger task work */
  7436. io_run_task_work();
  7437. prepare_to_wait(&task->io_uring->wait, &wait,
  7438. TASK_UNINTERRUPTIBLE);
  7439. if (inflight == io_uring_count_inflight(ctx, task, files))
  7440. schedule();
  7441. finish_wait(&task->io_uring->wait, &wait);
  7442. }
  7443. }
  7444. static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
  7445. struct task_struct *task)
  7446. {
  7447. while (1) {
  7448. struct io_task_cancel cancel = { .task = task, .files = NULL, };
  7449. enum io_wq_cancel cret;
  7450. bool ret = false;
  7451. cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
  7452. if (cret != IO_WQ_CANCEL_NOTFOUND)
  7453. ret = true;
  7454. /* SQPOLL thread does its own polling */
  7455. if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
  7456. while (!list_empty_careful(&ctx->iopoll_list)) {
  7457. io_iopoll_try_reap_events(ctx);
  7458. ret = true;
  7459. }
  7460. }
  7461. ret |= io_poll_remove_all(ctx, task, NULL);
  7462. ret |= io_kill_timeouts(ctx, task, NULL);
  7463. if (!ret)
  7464. break;
  7465. io_run_task_work();
  7466. cond_resched();
  7467. }
  7468. }
  7469. static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
  7470. {
  7471. mutex_lock(&ctx->uring_lock);
  7472. ctx->sqo_dead = 1;
  7473. if (ctx->flags & IORING_SETUP_R_DISABLED)
  7474. io_sq_offload_start(ctx);
  7475. mutex_unlock(&ctx->uring_lock);
  7476. /* make sure callers enter the ring to get error */
  7477. if (ctx->rings)
  7478. io_ring_set_wakeup_flag(ctx);
  7479. }
  7480. /*
  7481. * We need to iteratively cancel requests, in case a request has dependent
  7482. * hard links. These persist even for failure of cancelations, hence keep
  7483. * looping until none are found.
  7484. */
  7485. static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
  7486. struct files_struct *files)
  7487. {
  7488. struct task_struct *task = current;
  7489. if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
  7490. io_disable_sqo_submit(ctx);
  7491. task = ctx->sq_data->thread;
  7492. atomic_inc(&task->io_uring->in_idle);
  7493. io_sq_thread_park(ctx->sq_data);
  7494. }
  7495. io_cancel_defer_files(ctx, task, files);
  7496. io_cqring_overflow_flush(ctx, true, task, files);
  7497. if (!files)
  7498. __io_uring_cancel_task_requests(ctx, task);
  7499. else
  7500. io_uring_cancel_files(ctx, task, files);
  7501. if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
  7502. atomic_dec(&task->io_uring->in_idle);
  7503. io_sq_thread_unpark(ctx->sq_data);
  7504. }
  7505. }
  7506. /*
  7507. * Note that this task has used io_uring. We use it for cancelation purposes.
  7508. */
  7509. static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
  7510. {
  7511. struct io_uring_task *tctx = current->io_uring;
  7512. int ret;
  7513. if (unlikely(!tctx)) {
  7514. ret = io_uring_alloc_task_context(current);
  7515. if (unlikely(ret))
  7516. return ret;
  7517. tctx = current->io_uring;
  7518. }
  7519. if (tctx->last != file) {
  7520. void *old = xa_load(&tctx->xa, (unsigned long)file);
  7521. if (!old) {
  7522. get_file(file);
  7523. ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
  7524. file, GFP_KERNEL));
  7525. if (ret) {
  7526. fput(file);
  7527. return ret;
  7528. }
  7529. }
  7530. tctx->last = file;
  7531. }
  7532. /*
  7533. * This is race safe in that the task itself is doing this, hence it
  7534. * cannot be going through the exit/cancel paths at the same time.
  7535. * This cannot be modified while exit/cancel is running.
  7536. */
  7537. if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
  7538. tctx->sqpoll = true;
  7539. return 0;
  7540. }
  7541. /*
  7542. * Remove this io_uring_file -> task mapping.
  7543. */
  7544. static void io_uring_del_task_file(struct file *file)
  7545. {
  7546. struct io_uring_task *tctx = current->io_uring;
  7547. if (tctx->last == file)
  7548. tctx->last = NULL;
  7549. file = xa_erase(&tctx->xa, (unsigned long)file);
  7550. if (file)
  7551. fput(file);
  7552. }
  7553. static void io_uring_remove_task_files(struct io_uring_task *tctx)
  7554. {
  7555. struct file *file;
  7556. unsigned long index;
  7557. xa_for_each(&tctx->xa, index, file)
  7558. io_uring_del_task_file(file);
  7559. }
  7560. void __io_uring_files_cancel(struct files_struct *files)
  7561. {
  7562. struct io_uring_task *tctx = current->io_uring;
  7563. struct file *file;
  7564. unsigned long index;
  7565. /* make sure overflow events are dropped */
  7566. atomic_inc(&tctx->in_idle);
  7567. xa_for_each(&tctx->xa, index, file)
  7568. io_uring_cancel_task_requests(file->private_data, files);
  7569. atomic_dec(&tctx->in_idle);
  7570. if (files)
  7571. io_uring_remove_task_files(tctx);
  7572. }
  7573. static s64 tctx_inflight(struct io_uring_task *tctx)
  7574. {
  7575. unsigned long index;
  7576. struct file *file;
  7577. s64 inflight;
  7578. inflight = percpu_counter_sum(&tctx->inflight);
  7579. if (!tctx->sqpoll)
  7580. return inflight;
  7581. /*
  7582. * If we have SQPOLL rings, then we need to iterate and find them, and
  7583. * add the pending count for those.
  7584. */
  7585. xa_for_each(&tctx->xa, index, file) {
  7586. struct io_ring_ctx *ctx = file->private_data;
  7587. if (ctx->flags & IORING_SETUP_SQPOLL) {
  7588. struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
  7589. inflight += percpu_counter_sum(&__tctx->inflight);
  7590. }
  7591. }
  7592. return inflight;
  7593. }
  7594. /*
  7595. * Find any io_uring fd that this task has registered or done IO on, and cancel
  7596. * requests.
  7597. */
  7598. void __io_uring_task_cancel(void)
  7599. {
  7600. struct io_uring_task *tctx = current->io_uring;
  7601. DEFINE_WAIT(wait);
  7602. s64 inflight;
  7603. /* make sure overflow events are dropped */
  7604. atomic_inc(&tctx->in_idle);
  7605. /* trigger io_disable_sqo_submit() */
  7606. if (tctx->sqpoll)
  7607. __io_uring_files_cancel(NULL);
  7608. do {
  7609. /* read completions before cancelations */
  7610. inflight = tctx_inflight(tctx);
  7611. if (!inflight)
  7612. break;
  7613. __io_uring_files_cancel(NULL);
  7614. prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
  7615. /*
  7616. * If we've seen completions, retry without waiting. This
  7617. * avoids a race where a completion comes in before we did
  7618. * prepare_to_wait().
  7619. */
  7620. if (inflight == tctx_inflight(tctx))
  7621. schedule();
  7622. finish_wait(&tctx->wait, &wait);
  7623. } while (1);
  7624. atomic_dec(&tctx->in_idle);
  7625. io_uring_remove_task_files(tctx);
  7626. }
  7627. static int io_uring_flush(struct file *file, void *data)
  7628. {
  7629. struct io_uring_task *tctx = current->io_uring;
  7630. struct io_ring_ctx *ctx = file->private_data;
  7631. if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
  7632. io_uring_cancel_task_requests(ctx, NULL);
  7633. if (!tctx)
  7634. return 0;
  7635. /* we should have cancelled and erased it before PF_EXITING */
  7636. WARN_ON_ONCE((current->flags & PF_EXITING) &&
  7637. xa_load(&tctx->xa, (unsigned long)file));
  7638. /*
  7639. * fput() is pending, will be 2 if the only other ref is our potential
  7640. * task file note. If the task is exiting, drop regardless of count.
  7641. */
  7642. if (atomic_long_read(&file->f_count) != 2)
  7643. return 0;
  7644. if (ctx->flags & IORING_SETUP_SQPOLL) {
  7645. /* there is only one file note, which is owned by sqo_task */
  7646. WARN_ON_ONCE(ctx->sqo_task != current &&
  7647. xa_load(&tctx->xa, (unsigned long)file));
  7648. /* sqo_dead check is for when this happens after cancellation */
  7649. WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
  7650. !xa_load(&tctx->xa, (unsigned long)file));
  7651. io_disable_sqo_submit(ctx);
  7652. }
  7653. if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
  7654. io_uring_del_task_file(file);
  7655. return 0;
  7656. }
  7657. static void *io_uring_validate_mmap_request(struct file *file,
  7658. loff_t pgoff, size_t sz)
  7659. {
  7660. struct io_ring_ctx *ctx = file->private_data;
  7661. loff_t offset = pgoff << PAGE_SHIFT;
  7662. struct page *page;
  7663. void *ptr;
  7664. switch (offset) {
  7665. case IORING_OFF_SQ_RING:
  7666. case IORING_OFF_CQ_RING:
  7667. ptr = ctx->rings;
  7668. break;
  7669. case IORING_OFF_SQES:
  7670. ptr = ctx->sq_sqes;
  7671. break;
  7672. default:
  7673. return ERR_PTR(-EINVAL);
  7674. }
  7675. page = virt_to_head_page(ptr);
  7676. if (sz > page_size(page))
  7677. return ERR_PTR(-EINVAL);
  7678. return ptr;
  7679. }
  7680. #ifdef CONFIG_MMU
  7681. static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
  7682. {
  7683. size_t sz = vma->vm_end - vma->vm_start;
  7684. unsigned long pfn;
  7685. void *ptr;
  7686. ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
  7687. if (IS_ERR(ptr))
  7688. return PTR_ERR(ptr);
  7689. pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
  7690. return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
  7691. }
  7692. #else /* !CONFIG_MMU */
  7693. static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
  7694. {
  7695. return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
  7696. }
  7697. static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
  7698. {
  7699. return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
  7700. }
  7701. static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
  7702. unsigned long addr, unsigned long len,
  7703. unsigned long pgoff, unsigned long flags)
  7704. {
  7705. void *ptr;
  7706. ptr = io_uring_validate_mmap_request(file, pgoff, len);
  7707. if (IS_ERR(ptr))
  7708. return PTR_ERR(ptr);
  7709. return (unsigned long) ptr;
  7710. }
  7711. #endif /* !CONFIG_MMU */
  7712. static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
  7713. {
  7714. int ret = 0;
  7715. DEFINE_WAIT(wait);
  7716. do {
  7717. if (!io_sqring_full(ctx))
  7718. break;
  7719. prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
  7720. if (unlikely(ctx->sqo_dead)) {
  7721. ret = -EOWNERDEAD;
  7722. goto out;
  7723. }
  7724. if (!io_sqring_full(ctx))
  7725. break;
  7726. schedule();
  7727. } while (!signal_pending(current));
  7728. finish_wait(&ctx->sqo_sq_wait, &wait);
  7729. out:
  7730. return ret;
  7731. }
  7732. SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
  7733. u32, min_complete, u32, flags, const sigset_t __user *, sig,
  7734. size_t, sigsz)
  7735. {
  7736. struct io_ring_ctx *ctx;
  7737. long ret = -EBADF;
  7738. int submitted = 0;
  7739. struct fd f;
  7740. io_run_task_work();
  7741. if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
  7742. IORING_ENTER_SQ_WAIT))
  7743. return -EINVAL;
  7744. f = fdget(fd);
  7745. if (!f.file)
  7746. return -EBADF;
  7747. ret = -EOPNOTSUPP;
  7748. if (f.file->f_op != &io_uring_fops)
  7749. goto out_fput;
  7750. ret = -ENXIO;
  7751. ctx = f.file->private_data;
  7752. if (!percpu_ref_tryget(&ctx->refs))
  7753. goto out_fput;
  7754. ret = -EBADFD;
  7755. if (ctx->flags & IORING_SETUP_R_DISABLED)
  7756. goto out;
  7757. /*
  7758. * For SQ polling, the thread will do all submissions and completions.
  7759. * Just return the requested submit count, and wake the thread if
  7760. * we were asked to.
  7761. */
  7762. ret = 0;
  7763. if (ctx->flags & IORING_SETUP_SQPOLL) {
  7764. io_cqring_overflow_flush(ctx, false, NULL, NULL);
  7765. if (unlikely(ctx->sqo_dead)) {
  7766. ret = -EOWNERDEAD;
  7767. goto out;
  7768. }
  7769. if (flags & IORING_ENTER_SQ_WAKEUP)
  7770. wake_up(&ctx->sq_data->wait);
  7771. if (flags & IORING_ENTER_SQ_WAIT) {
  7772. ret = io_sqpoll_wait_sq(ctx);
  7773. if (ret)
  7774. goto out;
  7775. }
  7776. submitted = to_submit;
  7777. } else if (to_submit) {
  7778. ret = io_uring_add_task_file(ctx, f.file);
  7779. if (unlikely(ret))
  7780. goto out;
  7781. mutex_lock(&ctx->uring_lock);
  7782. submitted = io_submit_sqes(ctx, to_submit);
  7783. mutex_unlock(&ctx->uring_lock);
  7784. if (submitted != to_submit)
  7785. goto out;
  7786. }
  7787. if (flags & IORING_ENTER_GETEVENTS) {
  7788. min_complete = min(min_complete, ctx->cq_entries);
  7789. /*
  7790. * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
  7791. * space applications don't need to do io completion events
  7792. * polling again, they can rely on io_sq_thread to do polling
  7793. * work, which can reduce cpu usage and uring_lock contention.
  7794. */
  7795. if (ctx->flags & IORING_SETUP_IOPOLL &&
  7796. !(ctx->flags & IORING_SETUP_SQPOLL)) {
  7797. ret = io_iopoll_check(ctx, min_complete);
  7798. } else {
  7799. ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
  7800. }
  7801. }
  7802. out:
  7803. percpu_ref_put(&ctx->refs);
  7804. out_fput:
  7805. fdput(f);
  7806. return submitted ? submitted : ret;
  7807. }
  7808. #ifdef CONFIG_PROC_FS
  7809. static int io_uring_show_cred(struct seq_file *m, unsigned int id,
  7810. const struct io_identity *iod)
  7811. {
  7812. const struct cred *cred = iod->creds;
  7813. struct user_namespace *uns = seq_user_ns(m);
  7814. struct group_info *gi;
  7815. kernel_cap_t cap;
  7816. unsigned __capi;
  7817. int g;
  7818. seq_printf(m, "%5d\n", id);
  7819. seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
  7820. seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
  7821. seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
  7822. seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
  7823. seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
  7824. seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
  7825. seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
  7826. seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
  7827. seq_puts(m, "\n\tGroups:\t");
  7828. gi = cred->group_info;
  7829. for (g = 0; g < gi->ngroups; g++) {
  7830. seq_put_decimal_ull(m, g ? " " : "",
  7831. from_kgid_munged(uns, gi->gid[g]));
  7832. }
  7833. seq_puts(m, "\n\tCapEff:\t");
  7834. cap = cred->cap_effective;
  7835. CAP_FOR_EACH_U32(__capi)
  7836. seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
  7837. seq_putc(m, '\n');
  7838. return 0;
  7839. }
  7840. static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
  7841. {
  7842. struct io_sq_data *sq = NULL;
  7843. bool has_lock;
  7844. int i;
  7845. /*
  7846. * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
  7847. * since fdinfo case grabs it in the opposite direction of normal use
  7848. * cases. If we fail to get the lock, we just don't iterate any
  7849. * structures that could be going away outside the io_uring mutex.
  7850. */
  7851. has_lock = mutex_trylock(&ctx->uring_lock);
  7852. if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
  7853. sq = ctx->sq_data;
  7854. seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
  7855. seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
  7856. seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
  7857. for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
  7858. struct fixed_file_table *table;
  7859. struct file *f;
  7860. table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
  7861. f = table->files[i & IORING_FILE_TABLE_MASK];
  7862. if (f)
  7863. seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
  7864. else
  7865. seq_printf(m, "%5u: <none>\n", i);
  7866. }
  7867. seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
  7868. for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
  7869. struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
  7870. seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
  7871. (unsigned int) buf->len);
  7872. }
  7873. if (has_lock && !xa_empty(&ctx->personalities)) {
  7874. unsigned long index;
  7875. const struct io_identity *iod;
  7876. seq_printf(m, "Personalities:\n");
  7877. xa_for_each(&ctx->personalities, index, iod)
  7878. io_uring_show_cred(m, index, iod);
  7879. }
  7880. seq_printf(m, "PollList:\n");
  7881. spin_lock_irq(&ctx->completion_lock);
  7882. for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
  7883. struct hlist_head *list = &ctx->cancel_hash[i];
  7884. struct io_kiocb *req;
  7885. hlist_for_each_entry(req, list, hash_node)
  7886. seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
  7887. req->task->task_works != NULL);
  7888. }
  7889. spin_unlock_irq(&ctx->completion_lock);
  7890. if (has_lock)
  7891. mutex_unlock(&ctx->uring_lock);
  7892. }
  7893. static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
  7894. {
  7895. struct io_ring_ctx *ctx = f->private_data;
  7896. if (percpu_ref_tryget(&ctx->refs)) {
  7897. __io_uring_show_fdinfo(ctx, m);
  7898. percpu_ref_put(&ctx->refs);
  7899. }
  7900. }
  7901. #endif
  7902. static const struct file_operations io_uring_fops = {
  7903. .release = io_uring_release,
  7904. .flush = io_uring_flush,
  7905. .mmap = io_uring_mmap,
  7906. #ifndef CONFIG_MMU
  7907. .get_unmapped_area = io_uring_nommu_get_unmapped_area,
  7908. .mmap_capabilities = io_uring_nommu_mmap_capabilities,
  7909. #endif
  7910. .poll = io_uring_poll,
  7911. .fasync = io_uring_fasync,
  7912. #ifdef CONFIG_PROC_FS
  7913. .show_fdinfo = io_uring_show_fdinfo,
  7914. #endif
  7915. };
  7916. static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
  7917. struct io_uring_params *p)
  7918. {
  7919. struct io_rings *rings;
  7920. size_t size, sq_array_offset;
  7921. /* make sure these are sane, as we already accounted them */
  7922. ctx->sq_entries = p->sq_entries;
  7923. ctx->cq_entries = p->cq_entries;
  7924. size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
  7925. if (size == SIZE_MAX)
  7926. return -EOVERFLOW;
  7927. rings = io_mem_alloc(size);
  7928. if (!rings)
  7929. return -ENOMEM;
  7930. ctx->rings = rings;
  7931. ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
  7932. rings->sq_ring_mask = p->sq_entries - 1;
  7933. rings->cq_ring_mask = p->cq_entries - 1;
  7934. rings->sq_ring_entries = p->sq_entries;
  7935. rings->cq_ring_entries = p->cq_entries;
  7936. ctx->sq_mask = rings->sq_ring_mask;
  7937. ctx->cq_mask = rings->cq_ring_mask;
  7938. size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
  7939. if (size == SIZE_MAX) {
  7940. io_mem_free(ctx->rings);
  7941. ctx->rings = NULL;
  7942. return -EOVERFLOW;
  7943. }
  7944. ctx->sq_sqes = io_mem_alloc(size);
  7945. if (!ctx->sq_sqes) {
  7946. io_mem_free(ctx->rings);
  7947. ctx->rings = NULL;
  7948. return -ENOMEM;
  7949. }
  7950. return 0;
  7951. }
  7952. static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
  7953. {
  7954. int ret, fd;
  7955. fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
  7956. if (fd < 0)
  7957. return fd;
  7958. ret = io_uring_add_task_file(ctx, file);
  7959. if (ret) {
  7960. put_unused_fd(fd);
  7961. return ret;
  7962. }
  7963. fd_install(fd, file);
  7964. return fd;
  7965. }
  7966. /*
  7967. * Allocate an anonymous fd, this is what constitutes the application
  7968. * visible backing of an io_uring instance. The application mmaps this
  7969. * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
  7970. * we have to tie this fd to a socket for file garbage collection purposes.
  7971. */
  7972. static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
  7973. {
  7974. struct file *file;
  7975. #if defined(CONFIG_UNIX)
  7976. int ret;
  7977. ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
  7978. &ctx->ring_sock);
  7979. if (ret)
  7980. return ERR_PTR(ret);
  7981. #endif
  7982. file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
  7983. O_RDWR | O_CLOEXEC);
  7984. #if defined(CONFIG_UNIX)
  7985. if (IS_ERR(file)) {
  7986. sock_release(ctx->ring_sock);
  7987. ctx->ring_sock = NULL;
  7988. } else {
  7989. ctx->ring_sock->file = file;
  7990. }
  7991. #endif
  7992. return file;
  7993. }
  7994. static int io_uring_create(unsigned entries, struct io_uring_params *p,
  7995. struct io_uring_params __user *params)
  7996. {
  7997. struct user_struct *user = NULL;
  7998. struct io_ring_ctx *ctx;
  7999. struct file *file;
  8000. bool limit_mem;
  8001. int ret;
  8002. if (!entries)
  8003. return -EINVAL;
  8004. if (entries > IORING_MAX_ENTRIES) {
  8005. if (!(p->flags & IORING_SETUP_CLAMP))
  8006. return -EINVAL;
  8007. entries = IORING_MAX_ENTRIES;
  8008. }
  8009. /*
  8010. * Use twice as many entries for the CQ ring. It's possible for the
  8011. * application to drive a higher depth than the size of the SQ ring,
  8012. * since the sqes are only used at submission time. This allows for
  8013. * some flexibility in overcommitting a bit. If the application has
  8014. * set IORING_SETUP_CQSIZE, it will have passed in the desired number
  8015. * of CQ ring entries manually.
  8016. */
  8017. p->sq_entries = roundup_pow_of_two(entries);
  8018. if (p->flags & IORING_SETUP_CQSIZE) {
  8019. /*
  8020. * If IORING_SETUP_CQSIZE is set, we do the same roundup
  8021. * to a power-of-two, if it isn't already. We do NOT impose
  8022. * any cq vs sq ring sizing.
  8023. */
  8024. if (!p->cq_entries)
  8025. return -EINVAL;
  8026. if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
  8027. if (!(p->flags & IORING_SETUP_CLAMP))
  8028. return -EINVAL;
  8029. p->cq_entries = IORING_MAX_CQ_ENTRIES;
  8030. }
  8031. p->cq_entries = roundup_pow_of_two(p->cq_entries);
  8032. if (p->cq_entries < p->sq_entries)
  8033. return -EINVAL;
  8034. } else {
  8035. p->cq_entries = 2 * p->sq_entries;
  8036. }
  8037. user = get_uid(current_user());
  8038. limit_mem = !capable(CAP_IPC_LOCK);
  8039. if (limit_mem) {
  8040. ret = __io_account_mem(user,
  8041. ring_pages(p->sq_entries, p->cq_entries));
  8042. if (ret) {
  8043. free_uid(user);
  8044. return ret;
  8045. }
  8046. }
  8047. ctx = io_ring_ctx_alloc(p);
  8048. if (!ctx) {
  8049. if (limit_mem)
  8050. __io_unaccount_mem(user, ring_pages(p->sq_entries,
  8051. p->cq_entries));
  8052. free_uid(user);
  8053. return -ENOMEM;
  8054. }
  8055. ctx->compat = in_compat_syscall();
  8056. ctx->user = user;
  8057. ctx->creds = get_current_cred();
  8058. #ifdef CONFIG_AUDIT
  8059. ctx->loginuid = current->loginuid;
  8060. ctx->sessionid = current->sessionid;
  8061. #endif
  8062. ctx->sqo_task = get_task_struct(current);
  8063. /*
  8064. * This is just grabbed for accounting purposes. When a process exits,
  8065. * the mm is exited and dropped before the files, hence we need to hang
  8066. * on to this mm purely for the purposes of being able to unaccount
  8067. * memory (locked/pinned vm). It's not used for anything else.
  8068. */
  8069. mmgrab(current->mm);
  8070. ctx->mm_account = current->mm;
  8071. #ifdef CONFIG_BLK_CGROUP
  8072. /*
  8073. * The sq thread will belong to the original cgroup it was inited in.
  8074. * If the cgroup goes offline (e.g. disabling the io controller), then
  8075. * issued bios will be associated with the closest cgroup later in the
  8076. * block layer.
  8077. */
  8078. rcu_read_lock();
  8079. ctx->sqo_blkcg_css = blkcg_css();
  8080. ret = css_tryget_online(ctx->sqo_blkcg_css);
  8081. rcu_read_unlock();
  8082. if (!ret) {
  8083. /* don't init against a dying cgroup, have the user try again */
  8084. ctx->sqo_blkcg_css = NULL;
  8085. ret = -ENODEV;
  8086. goto err;
  8087. }
  8088. #endif
  8089. /*
  8090. * Account memory _before_ installing the file descriptor. Once
  8091. * the descriptor is installed, it can get closed at any time. Also
  8092. * do this before hitting the general error path, as ring freeing
  8093. * will un-account as well.
  8094. */
  8095. io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
  8096. ACCT_LOCKED);
  8097. ctx->limit_mem = limit_mem;
  8098. ret = io_allocate_scq_urings(ctx, p);
  8099. if (ret)
  8100. goto err;
  8101. ret = io_sq_offload_create(ctx, p);
  8102. if (ret)
  8103. goto err;
  8104. if (!(p->flags & IORING_SETUP_R_DISABLED))
  8105. io_sq_offload_start(ctx);
  8106. memset(&p->sq_off, 0, sizeof(p->sq_off));
  8107. p->sq_off.head = offsetof(struct io_rings, sq.head);
  8108. p->sq_off.tail = offsetof(struct io_rings, sq.tail);
  8109. p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
  8110. p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
  8111. p->sq_off.flags = offsetof(struct io_rings, sq_flags);
  8112. p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
  8113. p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
  8114. memset(&p->cq_off, 0, sizeof(p->cq_off));
  8115. p->cq_off.head = offsetof(struct io_rings, cq.head);
  8116. p->cq_off.tail = offsetof(struct io_rings, cq.tail);
  8117. p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
  8118. p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
  8119. p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
  8120. p->cq_off.cqes = offsetof(struct io_rings, cqes);
  8121. p->cq_off.flags = offsetof(struct io_rings, cq_flags);
  8122. p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
  8123. IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
  8124. IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
  8125. IORING_FEAT_POLL_32BITS;
  8126. if (copy_to_user(params, p, sizeof(*p))) {
  8127. ret = -EFAULT;
  8128. goto err;
  8129. }
  8130. file = io_uring_get_file(ctx);
  8131. if (IS_ERR(file)) {
  8132. ret = PTR_ERR(file);
  8133. goto err;
  8134. }
  8135. /*
  8136. * Install ring fd as the very last thing, so we don't risk someone
  8137. * having closed it before we finish setup
  8138. */
  8139. ret = io_uring_install_fd(ctx, file);
  8140. if (ret < 0) {
  8141. io_disable_sqo_submit(ctx);
  8142. /* fput will clean it up */
  8143. fput(file);
  8144. return ret;
  8145. }
  8146. trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
  8147. return ret;
  8148. err:
  8149. io_disable_sqo_submit(ctx);
  8150. io_ring_ctx_wait_and_kill(ctx);
  8151. return ret;
  8152. }
  8153. /*
  8154. * Sets up an aio uring context, and returns the fd. Applications asks for a
  8155. * ring size, we return the actual sq/cq ring sizes (among other things) in the
  8156. * params structure passed in.
  8157. */
  8158. static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
  8159. {
  8160. struct io_uring_params p;
  8161. int i;
  8162. if (copy_from_user(&p, params, sizeof(p)))
  8163. return -EFAULT;
  8164. for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
  8165. if (p.resv[i])
  8166. return -EINVAL;
  8167. }
  8168. if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
  8169. IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
  8170. IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
  8171. IORING_SETUP_R_DISABLED))
  8172. return -EINVAL;
  8173. return io_uring_create(entries, &p, params);
  8174. }
  8175. SYSCALL_DEFINE2(io_uring_setup, u32, entries,
  8176. struct io_uring_params __user *, params)
  8177. {
  8178. return io_uring_setup(entries, params);
  8179. }
  8180. static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
  8181. {
  8182. struct io_uring_probe *p;
  8183. size_t size;
  8184. int i, ret;
  8185. size = struct_size(p, ops, nr_args);
  8186. if (size == SIZE_MAX)
  8187. return -EOVERFLOW;
  8188. p = kzalloc(size, GFP_KERNEL);
  8189. if (!p)
  8190. return -ENOMEM;
  8191. ret = -EFAULT;
  8192. if (copy_from_user(p, arg, size))
  8193. goto out;
  8194. ret = -EINVAL;
  8195. if (memchr_inv(p, 0, size))
  8196. goto out;
  8197. p->last_op = IORING_OP_LAST - 1;
  8198. if (nr_args > IORING_OP_LAST)
  8199. nr_args = IORING_OP_LAST;
  8200. for (i = 0; i < nr_args; i++) {
  8201. p->ops[i].op = i;
  8202. if (!io_op_defs[i].not_supported)
  8203. p->ops[i].flags = IO_URING_OP_SUPPORTED;
  8204. }
  8205. p->ops_len = i;
  8206. ret = 0;
  8207. if (copy_to_user(arg, p, size))
  8208. ret = -EFAULT;
  8209. out:
  8210. kfree(p);
  8211. return ret;
  8212. }
  8213. static int io_register_personality(struct io_ring_ctx *ctx)
  8214. {
  8215. struct io_identity *iod;
  8216. u32 id;
  8217. int ret;
  8218. iod = kmalloc(sizeof(*iod), GFP_KERNEL);
  8219. if (unlikely(!iod))
  8220. return -ENOMEM;
  8221. io_init_identity(iod);
  8222. iod->creds = get_current_cred();
  8223. ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)iod,
  8224. XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
  8225. if (ret < 0) {
  8226. put_cred(iod->creds);
  8227. kfree(iod);
  8228. return ret;
  8229. }
  8230. return id;
  8231. }
  8232. static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
  8233. unsigned int nr_args)
  8234. {
  8235. struct io_uring_restriction *res;
  8236. size_t size;
  8237. int i, ret;
  8238. /* Restrictions allowed only if rings started disabled */
  8239. if (!(ctx->flags & IORING_SETUP_R_DISABLED))
  8240. return -EBADFD;
  8241. /* We allow only a single restrictions registration */
  8242. if (ctx->restrictions.registered)
  8243. return -EBUSY;
  8244. if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
  8245. return -EINVAL;
  8246. size = array_size(nr_args, sizeof(*res));
  8247. if (size == SIZE_MAX)
  8248. return -EOVERFLOW;
  8249. res = memdup_user(arg, size);
  8250. if (IS_ERR(res))
  8251. return PTR_ERR(res);
  8252. ret = 0;
  8253. for (i = 0; i < nr_args; i++) {
  8254. switch (res[i].opcode) {
  8255. case IORING_RESTRICTION_REGISTER_OP:
  8256. if (res[i].register_op >= IORING_REGISTER_LAST) {
  8257. ret = -EINVAL;
  8258. goto out;
  8259. }
  8260. __set_bit(res[i].register_op,
  8261. ctx->restrictions.register_op);
  8262. break;
  8263. case IORING_RESTRICTION_SQE_OP:
  8264. if (res[i].sqe_op >= IORING_OP_LAST) {
  8265. ret = -EINVAL;
  8266. goto out;
  8267. }
  8268. __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
  8269. break;
  8270. case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
  8271. ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
  8272. break;
  8273. case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
  8274. ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
  8275. break;
  8276. default:
  8277. ret = -EINVAL;
  8278. goto out;
  8279. }
  8280. }
  8281. out:
  8282. /* Reset all restrictions if an error happened */
  8283. if (ret != 0)
  8284. memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
  8285. else
  8286. ctx->restrictions.registered = true;
  8287. kfree(res);
  8288. return ret;
  8289. }
  8290. static int io_register_enable_rings(struct io_ring_ctx *ctx)
  8291. {
  8292. if (!(ctx->flags & IORING_SETUP_R_DISABLED))
  8293. return -EBADFD;
  8294. if (ctx->restrictions.registered)
  8295. ctx->restricted = 1;
  8296. io_sq_offload_start(ctx);
  8297. return 0;
  8298. }
  8299. static bool io_register_op_must_quiesce(int op)
  8300. {
  8301. switch (op) {
  8302. case IORING_UNREGISTER_FILES:
  8303. case IORING_REGISTER_FILES_UPDATE:
  8304. case IORING_REGISTER_PROBE:
  8305. case IORING_REGISTER_PERSONALITY:
  8306. case IORING_UNREGISTER_PERSONALITY:
  8307. return false;
  8308. default:
  8309. return true;
  8310. }
  8311. }
  8312. static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
  8313. void __user *arg, unsigned nr_args)
  8314. __releases(ctx->uring_lock)
  8315. __acquires(ctx->uring_lock)
  8316. {
  8317. int ret;
  8318. /*
  8319. * We're inside the ring mutex, if the ref is already dying, then
  8320. * someone else killed the ctx or is already going through
  8321. * io_uring_register().
  8322. */
  8323. if (percpu_ref_is_dying(&ctx->refs))
  8324. return -ENXIO;
  8325. if (io_register_op_must_quiesce(opcode)) {
  8326. percpu_ref_kill(&ctx->refs);
  8327. /*
  8328. * Drop uring mutex before waiting for references to exit. If
  8329. * another thread is currently inside io_uring_enter() it might
  8330. * need to grab the uring_lock to make progress. If we hold it
  8331. * here across the drain wait, then we can deadlock. It's safe
  8332. * to drop the mutex here, since no new references will come in
  8333. * after we've killed the percpu ref.
  8334. */
  8335. mutex_unlock(&ctx->uring_lock);
  8336. do {
  8337. ret = wait_for_completion_interruptible(&ctx->ref_comp);
  8338. if (!ret)
  8339. break;
  8340. ret = io_run_task_work_sig();
  8341. if (ret < 0)
  8342. break;
  8343. } while (1);
  8344. mutex_lock(&ctx->uring_lock);
  8345. if (ret) {
  8346. io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
  8347. return ret;
  8348. }
  8349. }
  8350. if (ctx->restricted) {
  8351. if (opcode >= IORING_REGISTER_LAST) {
  8352. ret = -EINVAL;
  8353. goto out;
  8354. }
  8355. if (!test_bit(opcode, ctx->restrictions.register_op)) {
  8356. ret = -EACCES;
  8357. goto out;
  8358. }
  8359. }
  8360. switch (opcode) {
  8361. case IORING_REGISTER_BUFFERS:
  8362. ret = io_sqe_buffer_register(ctx, arg, nr_args);
  8363. break;
  8364. case IORING_UNREGISTER_BUFFERS:
  8365. ret = -EINVAL;
  8366. if (arg || nr_args)
  8367. break;
  8368. ret = io_sqe_buffer_unregister(ctx);
  8369. break;
  8370. case IORING_REGISTER_FILES:
  8371. ret = io_sqe_files_register(ctx, arg, nr_args);
  8372. break;
  8373. case IORING_UNREGISTER_FILES:
  8374. ret = -EINVAL;
  8375. if (arg || nr_args)
  8376. break;
  8377. ret = io_sqe_files_unregister(ctx);
  8378. break;
  8379. case IORING_REGISTER_FILES_UPDATE:
  8380. ret = io_sqe_files_update(ctx, arg, nr_args);
  8381. break;
  8382. case IORING_REGISTER_EVENTFD:
  8383. case IORING_REGISTER_EVENTFD_ASYNC:
  8384. ret = -EINVAL;
  8385. if (nr_args != 1)
  8386. break;
  8387. ret = io_eventfd_register(ctx, arg);
  8388. if (ret)
  8389. break;
  8390. if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
  8391. ctx->eventfd_async = 1;
  8392. else
  8393. ctx->eventfd_async = 0;
  8394. break;
  8395. case IORING_UNREGISTER_EVENTFD:
  8396. ret = -EINVAL;
  8397. if (arg || nr_args)
  8398. break;
  8399. ret = io_eventfd_unregister(ctx);
  8400. break;
  8401. case IORING_REGISTER_PROBE:
  8402. ret = -EINVAL;
  8403. if (!arg || nr_args > 256)
  8404. break;
  8405. ret = io_probe(ctx, arg, nr_args);
  8406. break;
  8407. case IORING_REGISTER_PERSONALITY:
  8408. ret = -EINVAL;
  8409. if (arg || nr_args)
  8410. break;
  8411. ret = io_register_personality(ctx);
  8412. break;
  8413. case IORING_UNREGISTER_PERSONALITY:
  8414. ret = -EINVAL;
  8415. if (arg)
  8416. break;
  8417. ret = io_unregister_personality(ctx, nr_args);
  8418. break;
  8419. case IORING_REGISTER_ENABLE_RINGS:
  8420. ret = -EINVAL;
  8421. if (arg || nr_args)
  8422. break;
  8423. ret = io_register_enable_rings(ctx);
  8424. break;
  8425. case IORING_REGISTER_RESTRICTIONS:
  8426. ret = io_register_restrictions(ctx, arg, nr_args);
  8427. break;
  8428. default:
  8429. ret = -EINVAL;
  8430. break;
  8431. }
  8432. out:
  8433. if (io_register_op_must_quiesce(opcode)) {
  8434. /* bring the ctx back to life */
  8435. percpu_ref_reinit(&ctx->refs);
  8436. reinit_completion(&ctx->ref_comp);
  8437. }
  8438. return ret;
  8439. }
  8440. SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
  8441. void __user *, arg, unsigned int, nr_args)
  8442. {
  8443. struct io_ring_ctx *ctx;
  8444. long ret = -EBADF;
  8445. struct fd f;
  8446. f = fdget(fd);
  8447. if (!f.file)
  8448. return -EBADF;
  8449. ret = -EOPNOTSUPP;
  8450. if (f.file->f_op != &io_uring_fops)
  8451. goto out_fput;
  8452. ctx = f.file->private_data;
  8453. mutex_lock(&ctx->uring_lock);
  8454. ret = __io_uring_register(ctx, opcode, arg, nr_args);
  8455. mutex_unlock(&ctx->uring_lock);
  8456. trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
  8457. ctx->cq_ev_fd != NULL, ret);
  8458. out_fput:
  8459. fdput(f);
  8460. return ret;
  8461. }
  8462. static int __init io_uring_init(void)
  8463. {
  8464. #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
  8465. BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
  8466. BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
  8467. } while (0)
  8468. #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
  8469. __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
  8470. BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
  8471. BUILD_BUG_SQE_ELEM(0, __u8, opcode);
  8472. BUILD_BUG_SQE_ELEM(1, __u8, flags);
  8473. BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
  8474. BUILD_BUG_SQE_ELEM(4, __s32, fd);
  8475. BUILD_BUG_SQE_ELEM(8, __u64, off);
  8476. BUILD_BUG_SQE_ELEM(8, __u64, addr2);
  8477. BUILD_BUG_SQE_ELEM(16, __u64, addr);
  8478. BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
  8479. BUILD_BUG_SQE_ELEM(24, __u32, len);
  8480. BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
  8481. BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
  8482. BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
  8483. BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
  8484. BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
  8485. BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
  8486. BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
  8487. BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
  8488. BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
  8489. BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
  8490. BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
  8491. BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
  8492. BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
  8493. BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
  8494. BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
  8495. BUILD_BUG_SQE_ELEM(32, __u64, user_data);
  8496. BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
  8497. BUILD_BUG_SQE_ELEM(42, __u16, personality);
  8498. BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
  8499. BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
  8500. BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
  8501. req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
  8502. return 0;
  8503. };
  8504. __initcall(io_uring_init);