nfs4state.c 194 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580
  1. /*
  2. * Copyright (c) 2001 The Regents of the University of Michigan.
  3. * All rights reserved.
  4. *
  5. * Kendrick Smith <kmsmith@umich.edu>
  6. * Andy Adamson <kandros@umich.edu>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the name of the University nor the names of its
  18. * contributors may be used to endorse or promote products derived
  19. * from this software without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  22. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  23. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  28. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  29. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  30. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  31. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32. *
  33. */
  34. #include <linux/file.h>
  35. #include <linux/fs.h>
  36. #include <linux/slab.h>
  37. #include <linux/namei.h>
  38. #include <linux/swap.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/ratelimit.h>
  41. #include <linux/sunrpc/svcauth_gss.h>
  42. #include <linux/sunrpc/addr.h>
  43. #include <linux/jhash.h>
  44. #include <linux/string_helpers.h>
  45. #include "xdr4.h"
  46. #include "xdr4cb.h"
  47. #include "vfs.h"
  48. #include "current_stateid.h"
  49. #include "netns.h"
  50. #include "pnfs.h"
  51. #include "filecache.h"
  52. #include "trace.h"
  53. #define NFSDDBG_FACILITY NFSDDBG_PROC
  54. #define all_ones {{~0,~0},~0}
  55. static const stateid_t one_stateid = {
  56. .si_generation = ~0,
  57. .si_opaque = all_ones,
  58. };
  59. static const stateid_t zero_stateid = {
  60. /* all fields zero */
  61. };
  62. static const stateid_t currentstateid = {
  63. .si_generation = 1,
  64. };
  65. static const stateid_t close_stateid = {
  66. .si_generation = 0xffffffffU,
  67. };
  68. static u64 current_sessionid = 1;
  69. #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
  70. #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
  71. #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
  72. #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
  73. /* forward declarations */
  74. static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
  75. static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
  76. void nfsd4_end_grace(struct nfsd_net *nn);
  77. static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
  78. /* Locking: */
  79. /*
  80. * Currently used for the del_recall_lru and file hash table. In an
  81. * effort to decrease the scope of the client_mutex, this spinlock may
  82. * eventually cover more:
  83. */
  84. static DEFINE_SPINLOCK(state_lock);
  85. enum nfsd4_st_mutex_lock_subclass {
  86. OPEN_STATEID_MUTEX = 0,
  87. LOCK_STATEID_MUTEX = 1,
  88. };
  89. /*
  90. * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
  91. * the refcount on the open stateid to drop.
  92. */
  93. static DECLARE_WAIT_QUEUE_HEAD(close_wq);
  94. /*
  95. * A waitqueue where a writer to clients/#/ctl destroying a client can
  96. * wait for cl_rpc_users to drop to 0 and then for the client to be
  97. * unhashed.
  98. */
  99. static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
  100. static struct kmem_cache *client_slab;
  101. static struct kmem_cache *openowner_slab;
  102. static struct kmem_cache *lockowner_slab;
  103. static struct kmem_cache *file_slab;
  104. static struct kmem_cache *stateid_slab;
  105. static struct kmem_cache *deleg_slab;
  106. static struct kmem_cache *odstate_slab;
  107. static void free_session(struct nfsd4_session *);
  108. static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
  109. static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
  110. static bool is_session_dead(struct nfsd4_session *ses)
  111. {
  112. return ses->se_flags & NFS4_SESSION_DEAD;
  113. }
  114. static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
  115. {
  116. if (atomic_read(&ses->se_ref) > ref_held_by_me)
  117. return nfserr_jukebox;
  118. ses->se_flags |= NFS4_SESSION_DEAD;
  119. return nfs_ok;
  120. }
  121. static bool is_client_expired(struct nfs4_client *clp)
  122. {
  123. return clp->cl_time == 0;
  124. }
  125. static __be32 get_client_locked(struct nfs4_client *clp)
  126. {
  127. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  128. lockdep_assert_held(&nn->client_lock);
  129. if (is_client_expired(clp))
  130. return nfserr_expired;
  131. atomic_inc(&clp->cl_rpc_users);
  132. return nfs_ok;
  133. }
  134. /* must be called under the client_lock */
  135. static inline void
  136. renew_client_locked(struct nfs4_client *clp)
  137. {
  138. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  139. if (is_client_expired(clp)) {
  140. WARN_ON(1);
  141. printk("%s: client (clientid %08x/%08x) already expired\n",
  142. __func__,
  143. clp->cl_clientid.cl_boot,
  144. clp->cl_clientid.cl_id);
  145. return;
  146. }
  147. list_move_tail(&clp->cl_lru, &nn->client_lru);
  148. clp->cl_time = ktime_get_boottime_seconds();
  149. }
  150. static void put_client_renew_locked(struct nfs4_client *clp)
  151. {
  152. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  153. lockdep_assert_held(&nn->client_lock);
  154. if (!atomic_dec_and_test(&clp->cl_rpc_users))
  155. return;
  156. if (!is_client_expired(clp))
  157. renew_client_locked(clp);
  158. else
  159. wake_up_all(&expiry_wq);
  160. }
  161. static void put_client_renew(struct nfs4_client *clp)
  162. {
  163. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  164. if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
  165. return;
  166. if (!is_client_expired(clp))
  167. renew_client_locked(clp);
  168. else
  169. wake_up_all(&expiry_wq);
  170. spin_unlock(&nn->client_lock);
  171. }
  172. static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
  173. {
  174. __be32 status;
  175. if (is_session_dead(ses))
  176. return nfserr_badsession;
  177. status = get_client_locked(ses->se_client);
  178. if (status)
  179. return status;
  180. atomic_inc(&ses->se_ref);
  181. return nfs_ok;
  182. }
  183. static void nfsd4_put_session_locked(struct nfsd4_session *ses)
  184. {
  185. struct nfs4_client *clp = ses->se_client;
  186. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  187. lockdep_assert_held(&nn->client_lock);
  188. if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
  189. free_session(ses);
  190. put_client_renew_locked(clp);
  191. }
  192. static void nfsd4_put_session(struct nfsd4_session *ses)
  193. {
  194. struct nfs4_client *clp = ses->se_client;
  195. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  196. spin_lock(&nn->client_lock);
  197. nfsd4_put_session_locked(ses);
  198. spin_unlock(&nn->client_lock);
  199. }
  200. static struct nfsd4_blocked_lock *
  201. find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
  202. struct nfsd_net *nn)
  203. {
  204. struct nfsd4_blocked_lock *cur, *found = NULL;
  205. spin_lock(&nn->blocked_locks_lock);
  206. list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
  207. if (fh_match(fh, &cur->nbl_fh)) {
  208. list_del_init(&cur->nbl_list);
  209. list_del_init(&cur->nbl_lru);
  210. found = cur;
  211. break;
  212. }
  213. }
  214. spin_unlock(&nn->blocked_locks_lock);
  215. if (found)
  216. locks_delete_block(&found->nbl_lock);
  217. return found;
  218. }
  219. static struct nfsd4_blocked_lock *
  220. find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
  221. struct nfsd_net *nn)
  222. {
  223. struct nfsd4_blocked_lock *nbl;
  224. nbl = find_blocked_lock(lo, fh, nn);
  225. if (!nbl) {
  226. nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
  227. if (nbl) {
  228. INIT_LIST_HEAD(&nbl->nbl_list);
  229. INIT_LIST_HEAD(&nbl->nbl_lru);
  230. fh_copy_shallow(&nbl->nbl_fh, fh);
  231. locks_init_lock(&nbl->nbl_lock);
  232. nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
  233. &nfsd4_cb_notify_lock_ops,
  234. NFSPROC4_CLNT_CB_NOTIFY_LOCK);
  235. }
  236. }
  237. return nbl;
  238. }
  239. static void
  240. free_blocked_lock(struct nfsd4_blocked_lock *nbl)
  241. {
  242. locks_delete_block(&nbl->nbl_lock);
  243. locks_release_private(&nbl->nbl_lock);
  244. kfree(nbl);
  245. }
  246. static void
  247. remove_blocked_locks(struct nfs4_lockowner *lo)
  248. {
  249. struct nfs4_client *clp = lo->lo_owner.so_client;
  250. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  251. struct nfsd4_blocked_lock *nbl;
  252. LIST_HEAD(reaplist);
  253. /* Dequeue all blocked locks */
  254. spin_lock(&nn->blocked_locks_lock);
  255. while (!list_empty(&lo->lo_blocked)) {
  256. nbl = list_first_entry(&lo->lo_blocked,
  257. struct nfsd4_blocked_lock,
  258. nbl_list);
  259. list_del_init(&nbl->nbl_list);
  260. list_move(&nbl->nbl_lru, &reaplist);
  261. }
  262. spin_unlock(&nn->blocked_locks_lock);
  263. /* Now free them */
  264. while (!list_empty(&reaplist)) {
  265. nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
  266. nbl_lru);
  267. list_del_init(&nbl->nbl_lru);
  268. free_blocked_lock(nbl);
  269. }
  270. }
  271. static void
  272. nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
  273. {
  274. struct nfsd4_blocked_lock *nbl = container_of(cb,
  275. struct nfsd4_blocked_lock, nbl_cb);
  276. locks_delete_block(&nbl->nbl_lock);
  277. }
  278. static int
  279. nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
  280. {
  281. /*
  282. * Since this is just an optimization, we don't try very hard if it
  283. * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
  284. * just quit trying on anything else.
  285. */
  286. switch (task->tk_status) {
  287. case -NFS4ERR_DELAY:
  288. rpc_delay(task, 1 * HZ);
  289. return 0;
  290. default:
  291. return 1;
  292. }
  293. }
  294. static void
  295. nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
  296. {
  297. struct nfsd4_blocked_lock *nbl = container_of(cb,
  298. struct nfsd4_blocked_lock, nbl_cb);
  299. free_blocked_lock(nbl);
  300. }
  301. static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
  302. .prepare = nfsd4_cb_notify_lock_prepare,
  303. .done = nfsd4_cb_notify_lock_done,
  304. .release = nfsd4_cb_notify_lock_release,
  305. };
  306. static inline struct nfs4_stateowner *
  307. nfs4_get_stateowner(struct nfs4_stateowner *sop)
  308. {
  309. atomic_inc(&sop->so_count);
  310. return sop;
  311. }
  312. static int
  313. same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
  314. {
  315. return (sop->so_owner.len == owner->len) &&
  316. 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
  317. }
  318. static struct nfs4_openowner *
  319. find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
  320. struct nfs4_client *clp)
  321. {
  322. struct nfs4_stateowner *so;
  323. lockdep_assert_held(&clp->cl_lock);
  324. list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
  325. so_strhash) {
  326. if (!so->so_is_open_owner)
  327. continue;
  328. if (same_owner_str(so, &open->op_owner))
  329. return openowner(nfs4_get_stateowner(so));
  330. }
  331. return NULL;
  332. }
  333. static struct nfs4_openowner *
  334. find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
  335. struct nfs4_client *clp)
  336. {
  337. struct nfs4_openowner *oo;
  338. spin_lock(&clp->cl_lock);
  339. oo = find_openstateowner_str_locked(hashval, open, clp);
  340. spin_unlock(&clp->cl_lock);
  341. return oo;
  342. }
  343. static inline u32
  344. opaque_hashval(const void *ptr, int nbytes)
  345. {
  346. unsigned char *cptr = (unsigned char *) ptr;
  347. u32 x = 0;
  348. while (nbytes--) {
  349. x *= 37;
  350. x += *cptr++;
  351. }
  352. return x;
  353. }
  354. static void nfsd4_free_file_rcu(struct rcu_head *rcu)
  355. {
  356. struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
  357. kmem_cache_free(file_slab, fp);
  358. }
  359. void
  360. put_nfs4_file(struct nfs4_file *fi)
  361. {
  362. might_lock(&state_lock);
  363. if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
  364. hlist_del_rcu(&fi->fi_hash);
  365. spin_unlock(&state_lock);
  366. WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
  367. WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
  368. call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
  369. }
  370. }
  371. static struct nfsd_file *
  372. __nfs4_get_fd(struct nfs4_file *f, int oflag)
  373. {
  374. if (f->fi_fds[oflag])
  375. return nfsd_file_get(f->fi_fds[oflag]);
  376. return NULL;
  377. }
  378. static struct nfsd_file *
  379. find_writeable_file_locked(struct nfs4_file *f)
  380. {
  381. struct nfsd_file *ret;
  382. lockdep_assert_held(&f->fi_lock);
  383. ret = __nfs4_get_fd(f, O_WRONLY);
  384. if (!ret)
  385. ret = __nfs4_get_fd(f, O_RDWR);
  386. return ret;
  387. }
  388. static struct nfsd_file *
  389. find_writeable_file(struct nfs4_file *f)
  390. {
  391. struct nfsd_file *ret;
  392. spin_lock(&f->fi_lock);
  393. ret = find_writeable_file_locked(f);
  394. spin_unlock(&f->fi_lock);
  395. return ret;
  396. }
  397. static struct nfsd_file *
  398. find_readable_file_locked(struct nfs4_file *f)
  399. {
  400. struct nfsd_file *ret;
  401. lockdep_assert_held(&f->fi_lock);
  402. ret = __nfs4_get_fd(f, O_RDONLY);
  403. if (!ret)
  404. ret = __nfs4_get_fd(f, O_RDWR);
  405. return ret;
  406. }
  407. static struct nfsd_file *
  408. find_readable_file(struct nfs4_file *f)
  409. {
  410. struct nfsd_file *ret;
  411. spin_lock(&f->fi_lock);
  412. ret = find_readable_file_locked(f);
  413. spin_unlock(&f->fi_lock);
  414. return ret;
  415. }
  416. struct nfsd_file *
  417. find_any_file(struct nfs4_file *f)
  418. {
  419. struct nfsd_file *ret;
  420. if (!f)
  421. return NULL;
  422. spin_lock(&f->fi_lock);
  423. ret = __nfs4_get_fd(f, O_RDWR);
  424. if (!ret) {
  425. ret = __nfs4_get_fd(f, O_WRONLY);
  426. if (!ret)
  427. ret = __nfs4_get_fd(f, O_RDONLY);
  428. }
  429. spin_unlock(&f->fi_lock);
  430. return ret;
  431. }
  432. static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
  433. {
  434. struct nfsd_file *ret = NULL;
  435. spin_lock(&f->fi_lock);
  436. if (f->fi_deleg_file)
  437. ret = nfsd_file_get(f->fi_deleg_file);
  438. spin_unlock(&f->fi_lock);
  439. return ret;
  440. }
  441. static atomic_long_t num_delegations;
  442. unsigned long max_delegations;
  443. /*
  444. * Open owner state (share locks)
  445. */
  446. /* hash tables for lock and open owners */
  447. #define OWNER_HASH_BITS 8
  448. #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
  449. #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
  450. static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
  451. {
  452. unsigned int ret;
  453. ret = opaque_hashval(ownername->data, ownername->len);
  454. return ret & OWNER_HASH_MASK;
  455. }
  456. /* hash table for nfs4_file */
  457. #define FILE_HASH_BITS 8
  458. #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
  459. static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
  460. {
  461. return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
  462. }
  463. static unsigned int file_hashval(struct knfsd_fh *fh)
  464. {
  465. return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
  466. }
  467. static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
  468. static void
  469. __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
  470. {
  471. lockdep_assert_held(&fp->fi_lock);
  472. if (access & NFS4_SHARE_ACCESS_WRITE)
  473. atomic_inc(&fp->fi_access[O_WRONLY]);
  474. if (access & NFS4_SHARE_ACCESS_READ)
  475. atomic_inc(&fp->fi_access[O_RDONLY]);
  476. }
  477. static __be32
  478. nfs4_file_get_access(struct nfs4_file *fp, u32 access)
  479. {
  480. lockdep_assert_held(&fp->fi_lock);
  481. /* Does this access mode make sense? */
  482. if (access & ~NFS4_SHARE_ACCESS_BOTH)
  483. return nfserr_inval;
  484. /* Does it conflict with a deny mode already set? */
  485. if ((access & fp->fi_share_deny) != 0)
  486. return nfserr_share_denied;
  487. __nfs4_file_get_access(fp, access);
  488. return nfs_ok;
  489. }
  490. static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
  491. {
  492. /* Common case is that there is no deny mode. */
  493. if (deny) {
  494. /* Does this deny mode make sense? */
  495. if (deny & ~NFS4_SHARE_DENY_BOTH)
  496. return nfserr_inval;
  497. if ((deny & NFS4_SHARE_DENY_READ) &&
  498. atomic_read(&fp->fi_access[O_RDONLY]))
  499. return nfserr_share_denied;
  500. if ((deny & NFS4_SHARE_DENY_WRITE) &&
  501. atomic_read(&fp->fi_access[O_WRONLY]))
  502. return nfserr_share_denied;
  503. }
  504. return nfs_ok;
  505. }
  506. static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
  507. {
  508. might_lock(&fp->fi_lock);
  509. if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
  510. struct nfsd_file *f1 = NULL;
  511. struct nfsd_file *f2 = NULL;
  512. swap(f1, fp->fi_fds[oflag]);
  513. if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
  514. swap(f2, fp->fi_fds[O_RDWR]);
  515. spin_unlock(&fp->fi_lock);
  516. if (f1)
  517. nfsd_file_put(f1);
  518. if (f2)
  519. nfsd_file_put(f2);
  520. }
  521. }
  522. static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
  523. {
  524. WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
  525. if (access & NFS4_SHARE_ACCESS_WRITE)
  526. __nfs4_file_put_access(fp, O_WRONLY);
  527. if (access & NFS4_SHARE_ACCESS_READ)
  528. __nfs4_file_put_access(fp, O_RDONLY);
  529. }
  530. /*
  531. * Allocate a new open/delegation state counter. This is needed for
  532. * pNFS for proper return on close semantics.
  533. *
  534. * Note that we only allocate it for pNFS-enabled exports, otherwise
  535. * all pointers to struct nfs4_clnt_odstate are always NULL.
  536. */
  537. static struct nfs4_clnt_odstate *
  538. alloc_clnt_odstate(struct nfs4_client *clp)
  539. {
  540. struct nfs4_clnt_odstate *co;
  541. co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
  542. if (co) {
  543. co->co_client = clp;
  544. refcount_set(&co->co_odcount, 1);
  545. }
  546. return co;
  547. }
  548. static void
  549. hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
  550. {
  551. struct nfs4_file *fp = co->co_file;
  552. lockdep_assert_held(&fp->fi_lock);
  553. list_add(&co->co_perfile, &fp->fi_clnt_odstate);
  554. }
  555. static inline void
  556. get_clnt_odstate(struct nfs4_clnt_odstate *co)
  557. {
  558. if (co)
  559. refcount_inc(&co->co_odcount);
  560. }
  561. static void
  562. put_clnt_odstate(struct nfs4_clnt_odstate *co)
  563. {
  564. struct nfs4_file *fp;
  565. if (!co)
  566. return;
  567. fp = co->co_file;
  568. if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
  569. list_del(&co->co_perfile);
  570. spin_unlock(&fp->fi_lock);
  571. nfsd4_return_all_file_layouts(co->co_client, fp);
  572. kmem_cache_free(odstate_slab, co);
  573. }
  574. }
  575. static struct nfs4_clnt_odstate *
  576. find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
  577. {
  578. struct nfs4_clnt_odstate *co;
  579. struct nfs4_client *cl;
  580. if (!new)
  581. return NULL;
  582. cl = new->co_client;
  583. spin_lock(&fp->fi_lock);
  584. list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
  585. if (co->co_client == cl) {
  586. get_clnt_odstate(co);
  587. goto out;
  588. }
  589. }
  590. co = new;
  591. co->co_file = fp;
  592. hash_clnt_odstate_locked(new);
  593. out:
  594. spin_unlock(&fp->fi_lock);
  595. return co;
  596. }
  597. struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
  598. void (*sc_free)(struct nfs4_stid *))
  599. {
  600. struct nfs4_stid *stid;
  601. int new_id;
  602. stid = kmem_cache_zalloc(slab, GFP_KERNEL);
  603. if (!stid)
  604. return NULL;
  605. idr_preload(GFP_KERNEL);
  606. spin_lock(&cl->cl_lock);
  607. /* Reserving 0 for start of file in nfsdfs "states" file: */
  608. new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
  609. spin_unlock(&cl->cl_lock);
  610. idr_preload_end();
  611. if (new_id < 0)
  612. goto out_free;
  613. stid->sc_free = sc_free;
  614. stid->sc_client = cl;
  615. stid->sc_stateid.si_opaque.so_id = new_id;
  616. stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
  617. /* Will be incremented before return to client: */
  618. refcount_set(&stid->sc_count, 1);
  619. spin_lock_init(&stid->sc_lock);
  620. INIT_LIST_HEAD(&stid->sc_cp_list);
  621. /*
  622. * It shouldn't be a problem to reuse an opaque stateid value.
  623. * I don't think it is for 4.1. But with 4.0 I worry that, for
  624. * example, a stray write retransmission could be accepted by
  625. * the server when it should have been rejected. Therefore,
  626. * adopt a trick from the sctp code to attempt to maximize the
  627. * amount of time until an id is reused, by ensuring they always
  628. * "increase" (mod INT_MAX):
  629. */
  630. return stid;
  631. out_free:
  632. kmem_cache_free(slab, stid);
  633. return NULL;
  634. }
  635. /*
  636. * Create a unique stateid_t to represent each COPY.
  637. */
  638. static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
  639. unsigned char sc_type)
  640. {
  641. int new_id;
  642. stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
  643. stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
  644. stid->sc_type = sc_type;
  645. idr_preload(GFP_KERNEL);
  646. spin_lock(&nn->s2s_cp_lock);
  647. new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
  648. stid->stid.si_opaque.so_id = new_id;
  649. stid->stid.si_generation = 1;
  650. spin_unlock(&nn->s2s_cp_lock);
  651. idr_preload_end();
  652. if (new_id < 0)
  653. return 0;
  654. return 1;
  655. }
  656. int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
  657. {
  658. return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
  659. }
  660. struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
  661. struct nfs4_stid *p_stid)
  662. {
  663. struct nfs4_cpntf_state *cps;
  664. cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
  665. if (!cps)
  666. return NULL;
  667. cps->cpntf_time = ktime_get_boottime_seconds();
  668. refcount_set(&cps->cp_stateid.sc_count, 1);
  669. if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
  670. goto out_free;
  671. spin_lock(&nn->s2s_cp_lock);
  672. list_add(&cps->cp_list, &p_stid->sc_cp_list);
  673. spin_unlock(&nn->s2s_cp_lock);
  674. return cps;
  675. out_free:
  676. kfree(cps);
  677. return NULL;
  678. }
  679. void nfs4_free_copy_state(struct nfsd4_copy *copy)
  680. {
  681. struct nfsd_net *nn;
  682. WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
  683. nn = net_generic(copy->cp_clp->net, nfsd_net_id);
  684. spin_lock(&nn->s2s_cp_lock);
  685. idr_remove(&nn->s2s_cp_stateids,
  686. copy->cp_stateid.stid.si_opaque.so_id);
  687. spin_unlock(&nn->s2s_cp_lock);
  688. }
  689. static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
  690. {
  691. struct nfs4_cpntf_state *cps;
  692. struct nfsd_net *nn;
  693. nn = net_generic(net, nfsd_net_id);
  694. spin_lock(&nn->s2s_cp_lock);
  695. while (!list_empty(&stid->sc_cp_list)) {
  696. cps = list_first_entry(&stid->sc_cp_list,
  697. struct nfs4_cpntf_state, cp_list);
  698. _free_cpntf_state_locked(nn, cps);
  699. }
  700. spin_unlock(&nn->s2s_cp_lock);
  701. }
  702. static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
  703. {
  704. struct nfs4_stid *stid;
  705. stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
  706. if (!stid)
  707. return NULL;
  708. return openlockstateid(stid);
  709. }
  710. static void nfs4_free_deleg(struct nfs4_stid *stid)
  711. {
  712. kmem_cache_free(deleg_slab, stid);
  713. atomic_long_dec(&num_delegations);
  714. }
  715. /*
  716. * When we recall a delegation, we should be careful not to hand it
  717. * out again straight away.
  718. * To ensure this we keep a pair of bloom filters ('new' and 'old')
  719. * in which the filehandles of recalled delegations are "stored".
  720. * If a filehandle appear in either filter, a delegation is blocked.
  721. * When a delegation is recalled, the filehandle is stored in the "new"
  722. * filter.
  723. * Every 30 seconds we swap the filters and clear the "new" one,
  724. * unless both are empty of course.
  725. *
  726. * Each filter is 256 bits. We hash the filehandle to 32bit and use the
  727. * low 3 bytes as hash-table indices.
  728. *
  729. * 'blocked_delegations_lock', which is always taken in block_delegations(),
  730. * is used to manage concurrent access. Testing does not need the lock
  731. * except when swapping the two filters.
  732. */
  733. static DEFINE_SPINLOCK(blocked_delegations_lock);
  734. static struct bloom_pair {
  735. int entries, old_entries;
  736. time64_t swap_time;
  737. int new; /* index into 'set' */
  738. DECLARE_BITMAP(set[2], 256);
  739. } blocked_delegations;
  740. static int delegation_blocked(struct knfsd_fh *fh)
  741. {
  742. u32 hash;
  743. struct bloom_pair *bd = &blocked_delegations;
  744. if (bd->entries == 0)
  745. return 0;
  746. if (ktime_get_seconds() - bd->swap_time > 30) {
  747. spin_lock(&blocked_delegations_lock);
  748. if (ktime_get_seconds() - bd->swap_time > 30) {
  749. bd->entries -= bd->old_entries;
  750. bd->old_entries = bd->entries;
  751. memset(bd->set[bd->new], 0,
  752. sizeof(bd->set[0]));
  753. bd->new = 1-bd->new;
  754. bd->swap_time = ktime_get_seconds();
  755. }
  756. spin_unlock(&blocked_delegations_lock);
  757. }
  758. hash = jhash(&fh->fh_base, fh->fh_size, 0);
  759. if (test_bit(hash&255, bd->set[0]) &&
  760. test_bit((hash>>8)&255, bd->set[0]) &&
  761. test_bit((hash>>16)&255, bd->set[0]))
  762. return 1;
  763. if (test_bit(hash&255, bd->set[1]) &&
  764. test_bit((hash>>8)&255, bd->set[1]) &&
  765. test_bit((hash>>16)&255, bd->set[1]))
  766. return 1;
  767. return 0;
  768. }
  769. static void block_delegations(struct knfsd_fh *fh)
  770. {
  771. u32 hash;
  772. struct bloom_pair *bd = &blocked_delegations;
  773. hash = jhash(&fh->fh_base, fh->fh_size, 0);
  774. spin_lock(&blocked_delegations_lock);
  775. __set_bit(hash&255, bd->set[bd->new]);
  776. __set_bit((hash>>8)&255, bd->set[bd->new]);
  777. __set_bit((hash>>16)&255, bd->set[bd->new]);
  778. if (bd->entries == 0)
  779. bd->swap_time = ktime_get_seconds();
  780. bd->entries += 1;
  781. spin_unlock(&blocked_delegations_lock);
  782. }
  783. static struct nfs4_delegation *
  784. alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
  785. struct svc_fh *current_fh,
  786. struct nfs4_clnt_odstate *odstate)
  787. {
  788. struct nfs4_delegation *dp;
  789. long n;
  790. dprintk("NFSD alloc_init_deleg\n");
  791. n = atomic_long_inc_return(&num_delegations);
  792. if (n < 0 || n > max_delegations)
  793. goto out_dec;
  794. if (delegation_blocked(&current_fh->fh_handle))
  795. goto out_dec;
  796. dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
  797. if (dp == NULL)
  798. goto out_dec;
  799. /*
  800. * delegation seqid's are never incremented. The 4.1 special
  801. * meaning of seqid 0 isn't meaningful, really, but let's avoid
  802. * 0 anyway just for consistency and use 1:
  803. */
  804. dp->dl_stid.sc_stateid.si_generation = 1;
  805. INIT_LIST_HEAD(&dp->dl_perfile);
  806. INIT_LIST_HEAD(&dp->dl_perclnt);
  807. INIT_LIST_HEAD(&dp->dl_recall_lru);
  808. dp->dl_clnt_odstate = odstate;
  809. get_clnt_odstate(odstate);
  810. dp->dl_type = NFS4_OPEN_DELEGATE_READ;
  811. dp->dl_retries = 1;
  812. nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
  813. &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
  814. get_nfs4_file(fp);
  815. dp->dl_stid.sc_file = fp;
  816. return dp;
  817. out_dec:
  818. atomic_long_dec(&num_delegations);
  819. return NULL;
  820. }
  821. void
  822. nfs4_put_stid(struct nfs4_stid *s)
  823. {
  824. struct nfs4_file *fp = s->sc_file;
  825. struct nfs4_client *clp = s->sc_client;
  826. might_lock(&clp->cl_lock);
  827. if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
  828. wake_up_all(&close_wq);
  829. return;
  830. }
  831. idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
  832. nfs4_free_cpntf_statelist(clp->net, s);
  833. spin_unlock(&clp->cl_lock);
  834. s->sc_free(s);
  835. if (fp)
  836. put_nfs4_file(fp);
  837. }
  838. void
  839. nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
  840. {
  841. stateid_t *src = &stid->sc_stateid;
  842. spin_lock(&stid->sc_lock);
  843. if (unlikely(++src->si_generation == 0))
  844. src->si_generation = 1;
  845. memcpy(dst, src, sizeof(*dst));
  846. spin_unlock(&stid->sc_lock);
  847. }
  848. static void put_deleg_file(struct nfs4_file *fp)
  849. {
  850. struct nfsd_file *nf = NULL;
  851. spin_lock(&fp->fi_lock);
  852. if (--fp->fi_delegees == 0)
  853. swap(nf, fp->fi_deleg_file);
  854. spin_unlock(&fp->fi_lock);
  855. if (nf)
  856. nfsd_file_put(nf);
  857. }
  858. static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
  859. {
  860. struct nfs4_file *fp = dp->dl_stid.sc_file;
  861. struct nfsd_file *nf = fp->fi_deleg_file;
  862. WARN_ON_ONCE(!fp->fi_delegees);
  863. vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
  864. put_deleg_file(fp);
  865. }
  866. static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
  867. {
  868. put_clnt_odstate(dp->dl_clnt_odstate);
  869. nfs4_unlock_deleg_lease(dp);
  870. nfs4_put_stid(&dp->dl_stid);
  871. }
  872. void nfs4_unhash_stid(struct nfs4_stid *s)
  873. {
  874. s->sc_type = 0;
  875. }
  876. /**
  877. * nfs4_delegation_exists - Discover if this delegation already exists
  878. * @clp: a pointer to the nfs4_client we're granting a delegation to
  879. * @fp: a pointer to the nfs4_file we're granting a delegation on
  880. *
  881. * Return:
  882. * On success: true iff an existing delegation is found
  883. */
  884. static bool
  885. nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
  886. {
  887. struct nfs4_delegation *searchdp = NULL;
  888. struct nfs4_client *searchclp = NULL;
  889. lockdep_assert_held(&state_lock);
  890. lockdep_assert_held(&fp->fi_lock);
  891. list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
  892. searchclp = searchdp->dl_stid.sc_client;
  893. if (clp == searchclp) {
  894. return true;
  895. }
  896. }
  897. return false;
  898. }
  899. /**
  900. * hash_delegation_locked - Add a delegation to the appropriate lists
  901. * @dp: a pointer to the nfs4_delegation we are adding.
  902. * @fp: a pointer to the nfs4_file we're granting a delegation on
  903. *
  904. * Return:
  905. * On success: NULL if the delegation was successfully hashed.
  906. *
  907. * On error: -EAGAIN if one was previously granted to this
  908. * nfs4_client for this nfs4_file. Delegation is not hashed.
  909. *
  910. */
  911. static int
  912. hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
  913. {
  914. struct nfs4_client *clp = dp->dl_stid.sc_client;
  915. lockdep_assert_held(&state_lock);
  916. lockdep_assert_held(&fp->fi_lock);
  917. if (nfs4_delegation_exists(clp, fp))
  918. return -EAGAIN;
  919. refcount_inc(&dp->dl_stid.sc_count);
  920. dp->dl_stid.sc_type = NFS4_DELEG_STID;
  921. list_add(&dp->dl_perfile, &fp->fi_delegations);
  922. list_add(&dp->dl_perclnt, &clp->cl_delegations);
  923. return 0;
  924. }
  925. static bool delegation_hashed(struct nfs4_delegation *dp)
  926. {
  927. return !(list_empty(&dp->dl_perfile));
  928. }
  929. static bool
  930. unhash_delegation_locked(struct nfs4_delegation *dp)
  931. {
  932. struct nfs4_file *fp = dp->dl_stid.sc_file;
  933. lockdep_assert_held(&state_lock);
  934. if (!delegation_hashed(dp))
  935. return false;
  936. dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
  937. /* Ensure that deleg break won't try to requeue it */
  938. ++dp->dl_time;
  939. spin_lock(&fp->fi_lock);
  940. list_del_init(&dp->dl_perclnt);
  941. list_del_init(&dp->dl_recall_lru);
  942. list_del_init(&dp->dl_perfile);
  943. spin_unlock(&fp->fi_lock);
  944. return true;
  945. }
  946. static void destroy_delegation(struct nfs4_delegation *dp)
  947. {
  948. bool unhashed;
  949. spin_lock(&state_lock);
  950. unhashed = unhash_delegation_locked(dp);
  951. spin_unlock(&state_lock);
  952. if (unhashed)
  953. destroy_unhashed_deleg(dp);
  954. }
  955. static void revoke_delegation(struct nfs4_delegation *dp)
  956. {
  957. struct nfs4_client *clp = dp->dl_stid.sc_client;
  958. WARN_ON(!list_empty(&dp->dl_recall_lru));
  959. if (clp->cl_minorversion) {
  960. dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
  961. refcount_inc(&dp->dl_stid.sc_count);
  962. spin_lock(&clp->cl_lock);
  963. list_add(&dp->dl_recall_lru, &clp->cl_revoked);
  964. spin_unlock(&clp->cl_lock);
  965. }
  966. destroy_unhashed_deleg(dp);
  967. }
  968. /*
  969. * SETCLIENTID state
  970. */
  971. static unsigned int clientid_hashval(u32 id)
  972. {
  973. return id & CLIENT_HASH_MASK;
  974. }
  975. static unsigned int clientstr_hashval(struct xdr_netobj name)
  976. {
  977. return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
  978. }
  979. /*
  980. * We store the NONE, READ, WRITE, and BOTH bits separately in the
  981. * st_{access,deny}_bmap field of the stateid, in order to track not
  982. * only what share bits are currently in force, but also what
  983. * combinations of share bits previous opens have used. This allows us
  984. * to enforce the recommendation of rfc 3530 14.2.19 that the server
  985. * return an error if the client attempt to downgrade to a combination
  986. * of share bits not explicable by closing some of its previous opens.
  987. *
  988. * XXX: This enforcement is actually incomplete, since we don't keep
  989. * track of access/deny bit combinations; so, e.g., we allow:
  990. *
  991. * OPEN allow read, deny write
  992. * OPEN allow both, deny none
  993. * DOWNGRADE allow read, deny none
  994. *
  995. * which we should reject.
  996. */
  997. static unsigned int
  998. bmap_to_share_mode(unsigned long bmap) {
  999. int i;
  1000. unsigned int access = 0;
  1001. for (i = 1; i < 4; i++) {
  1002. if (test_bit(i, &bmap))
  1003. access |= i;
  1004. }
  1005. return access;
  1006. }
  1007. /* set share access for a given stateid */
  1008. static inline void
  1009. set_access(u32 access, struct nfs4_ol_stateid *stp)
  1010. {
  1011. unsigned char mask = 1 << access;
  1012. WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
  1013. stp->st_access_bmap |= mask;
  1014. }
  1015. /* clear share access for a given stateid */
  1016. static inline void
  1017. clear_access(u32 access, struct nfs4_ol_stateid *stp)
  1018. {
  1019. unsigned char mask = 1 << access;
  1020. WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
  1021. stp->st_access_bmap &= ~mask;
  1022. }
  1023. /* test whether a given stateid has access */
  1024. static inline bool
  1025. test_access(u32 access, struct nfs4_ol_stateid *stp)
  1026. {
  1027. unsigned char mask = 1 << access;
  1028. return (bool)(stp->st_access_bmap & mask);
  1029. }
  1030. /* set share deny for a given stateid */
  1031. static inline void
  1032. set_deny(u32 deny, struct nfs4_ol_stateid *stp)
  1033. {
  1034. unsigned char mask = 1 << deny;
  1035. WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
  1036. stp->st_deny_bmap |= mask;
  1037. }
  1038. /* clear share deny for a given stateid */
  1039. static inline void
  1040. clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
  1041. {
  1042. unsigned char mask = 1 << deny;
  1043. WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
  1044. stp->st_deny_bmap &= ~mask;
  1045. }
  1046. /* test whether a given stateid is denying specific access */
  1047. static inline bool
  1048. test_deny(u32 deny, struct nfs4_ol_stateid *stp)
  1049. {
  1050. unsigned char mask = 1 << deny;
  1051. return (bool)(stp->st_deny_bmap & mask);
  1052. }
  1053. static int nfs4_access_to_omode(u32 access)
  1054. {
  1055. switch (access & NFS4_SHARE_ACCESS_BOTH) {
  1056. case NFS4_SHARE_ACCESS_READ:
  1057. return O_RDONLY;
  1058. case NFS4_SHARE_ACCESS_WRITE:
  1059. return O_WRONLY;
  1060. case NFS4_SHARE_ACCESS_BOTH:
  1061. return O_RDWR;
  1062. }
  1063. WARN_ON_ONCE(1);
  1064. return O_RDONLY;
  1065. }
  1066. /*
  1067. * A stateid that had a deny mode associated with it is being released
  1068. * or downgraded. Recalculate the deny mode on the file.
  1069. */
  1070. static void
  1071. recalculate_deny_mode(struct nfs4_file *fp)
  1072. {
  1073. struct nfs4_ol_stateid *stp;
  1074. spin_lock(&fp->fi_lock);
  1075. fp->fi_share_deny = 0;
  1076. list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
  1077. fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
  1078. spin_unlock(&fp->fi_lock);
  1079. }
  1080. static void
  1081. reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
  1082. {
  1083. int i;
  1084. bool change = false;
  1085. for (i = 1; i < 4; i++) {
  1086. if ((i & deny) != i) {
  1087. change = true;
  1088. clear_deny(i, stp);
  1089. }
  1090. }
  1091. /* Recalculate per-file deny mode if there was a change */
  1092. if (change)
  1093. recalculate_deny_mode(stp->st_stid.sc_file);
  1094. }
  1095. /* release all access and file references for a given stateid */
  1096. static void
  1097. release_all_access(struct nfs4_ol_stateid *stp)
  1098. {
  1099. int i;
  1100. struct nfs4_file *fp = stp->st_stid.sc_file;
  1101. if (fp && stp->st_deny_bmap != 0)
  1102. recalculate_deny_mode(fp);
  1103. for (i = 1; i < 4; i++) {
  1104. if (test_access(i, stp))
  1105. nfs4_file_put_access(stp->st_stid.sc_file, i);
  1106. clear_access(i, stp);
  1107. }
  1108. }
  1109. static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
  1110. {
  1111. kfree(sop->so_owner.data);
  1112. sop->so_ops->so_free(sop);
  1113. }
  1114. static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
  1115. {
  1116. struct nfs4_client *clp = sop->so_client;
  1117. might_lock(&clp->cl_lock);
  1118. if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
  1119. return;
  1120. sop->so_ops->so_unhash(sop);
  1121. spin_unlock(&clp->cl_lock);
  1122. nfs4_free_stateowner(sop);
  1123. }
  1124. static bool
  1125. nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
  1126. {
  1127. return list_empty(&stp->st_perfile);
  1128. }
  1129. static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
  1130. {
  1131. struct nfs4_file *fp = stp->st_stid.sc_file;
  1132. lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
  1133. if (list_empty(&stp->st_perfile))
  1134. return false;
  1135. spin_lock(&fp->fi_lock);
  1136. list_del_init(&stp->st_perfile);
  1137. spin_unlock(&fp->fi_lock);
  1138. list_del(&stp->st_perstateowner);
  1139. return true;
  1140. }
  1141. static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
  1142. {
  1143. struct nfs4_ol_stateid *stp = openlockstateid(stid);
  1144. put_clnt_odstate(stp->st_clnt_odstate);
  1145. release_all_access(stp);
  1146. if (stp->st_stateowner)
  1147. nfs4_put_stateowner(stp->st_stateowner);
  1148. kmem_cache_free(stateid_slab, stid);
  1149. }
  1150. static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
  1151. {
  1152. struct nfs4_ol_stateid *stp = openlockstateid(stid);
  1153. struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
  1154. struct nfsd_file *nf;
  1155. nf = find_any_file(stp->st_stid.sc_file);
  1156. if (nf) {
  1157. get_file(nf->nf_file);
  1158. filp_close(nf->nf_file, (fl_owner_t)lo);
  1159. nfsd_file_put(nf);
  1160. }
  1161. nfs4_free_ol_stateid(stid);
  1162. }
  1163. /*
  1164. * Put the persistent reference to an already unhashed generic stateid, while
  1165. * holding the cl_lock. If it's the last reference, then put it onto the
  1166. * reaplist for later destruction.
  1167. */
  1168. static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
  1169. struct list_head *reaplist)
  1170. {
  1171. struct nfs4_stid *s = &stp->st_stid;
  1172. struct nfs4_client *clp = s->sc_client;
  1173. lockdep_assert_held(&clp->cl_lock);
  1174. WARN_ON_ONCE(!list_empty(&stp->st_locks));
  1175. if (!refcount_dec_and_test(&s->sc_count)) {
  1176. wake_up_all(&close_wq);
  1177. return;
  1178. }
  1179. idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
  1180. list_add(&stp->st_locks, reaplist);
  1181. }
  1182. static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
  1183. {
  1184. lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
  1185. if (!unhash_ol_stateid(stp))
  1186. return false;
  1187. list_del_init(&stp->st_locks);
  1188. nfs4_unhash_stid(&stp->st_stid);
  1189. return true;
  1190. }
  1191. static void release_lock_stateid(struct nfs4_ol_stateid *stp)
  1192. {
  1193. struct nfs4_client *clp = stp->st_stid.sc_client;
  1194. bool unhashed;
  1195. spin_lock(&clp->cl_lock);
  1196. unhashed = unhash_lock_stateid(stp);
  1197. spin_unlock(&clp->cl_lock);
  1198. if (unhashed)
  1199. nfs4_put_stid(&stp->st_stid);
  1200. }
  1201. static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
  1202. {
  1203. struct nfs4_client *clp = lo->lo_owner.so_client;
  1204. lockdep_assert_held(&clp->cl_lock);
  1205. list_del_init(&lo->lo_owner.so_strhash);
  1206. }
  1207. /*
  1208. * Free a list of generic stateids that were collected earlier after being
  1209. * fully unhashed.
  1210. */
  1211. static void
  1212. free_ol_stateid_reaplist(struct list_head *reaplist)
  1213. {
  1214. struct nfs4_ol_stateid *stp;
  1215. struct nfs4_file *fp;
  1216. might_sleep();
  1217. while (!list_empty(reaplist)) {
  1218. stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
  1219. st_locks);
  1220. list_del(&stp->st_locks);
  1221. fp = stp->st_stid.sc_file;
  1222. stp->st_stid.sc_free(&stp->st_stid);
  1223. if (fp)
  1224. put_nfs4_file(fp);
  1225. }
  1226. }
  1227. static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
  1228. struct list_head *reaplist)
  1229. {
  1230. struct nfs4_ol_stateid *stp;
  1231. lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
  1232. while (!list_empty(&open_stp->st_locks)) {
  1233. stp = list_entry(open_stp->st_locks.next,
  1234. struct nfs4_ol_stateid, st_locks);
  1235. WARN_ON(!unhash_lock_stateid(stp));
  1236. put_ol_stateid_locked(stp, reaplist);
  1237. }
  1238. }
  1239. static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
  1240. struct list_head *reaplist)
  1241. {
  1242. lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
  1243. if (!unhash_ol_stateid(stp))
  1244. return false;
  1245. release_open_stateid_locks(stp, reaplist);
  1246. return true;
  1247. }
  1248. static void release_open_stateid(struct nfs4_ol_stateid *stp)
  1249. {
  1250. LIST_HEAD(reaplist);
  1251. spin_lock(&stp->st_stid.sc_client->cl_lock);
  1252. if (unhash_open_stateid(stp, &reaplist))
  1253. put_ol_stateid_locked(stp, &reaplist);
  1254. spin_unlock(&stp->st_stid.sc_client->cl_lock);
  1255. free_ol_stateid_reaplist(&reaplist);
  1256. }
  1257. static void unhash_openowner_locked(struct nfs4_openowner *oo)
  1258. {
  1259. struct nfs4_client *clp = oo->oo_owner.so_client;
  1260. lockdep_assert_held(&clp->cl_lock);
  1261. list_del_init(&oo->oo_owner.so_strhash);
  1262. list_del_init(&oo->oo_perclient);
  1263. }
  1264. static void release_last_closed_stateid(struct nfs4_openowner *oo)
  1265. {
  1266. struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
  1267. nfsd_net_id);
  1268. struct nfs4_ol_stateid *s;
  1269. spin_lock(&nn->client_lock);
  1270. s = oo->oo_last_closed_stid;
  1271. if (s) {
  1272. list_del_init(&oo->oo_close_lru);
  1273. oo->oo_last_closed_stid = NULL;
  1274. }
  1275. spin_unlock(&nn->client_lock);
  1276. if (s)
  1277. nfs4_put_stid(&s->st_stid);
  1278. }
  1279. static void release_openowner(struct nfs4_openowner *oo)
  1280. {
  1281. struct nfs4_ol_stateid *stp;
  1282. struct nfs4_client *clp = oo->oo_owner.so_client;
  1283. struct list_head reaplist;
  1284. INIT_LIST_HEAD(&reaplist);
  1285. spin_lock(&clp->cl_lock);
  1286. unhash_openowner_locked(oo);
  1287. while (!list_empty(&oo->oo_owner.so_stateids)) {
  1288. stp = list_first_entry(&oo->oo_owner.so_stateids,
  1289. struct nfs4_ol_stateid, st_perstateowner);
  1290. if (unhash_open_stateid(stp, &reaplist))
  1291. put_ol_stateid_locked(stp, &reaplist);
  1292. }
  1293. spin_unlock(&clp->cl_lock);
  1294. free_ol_stateid_reaplist(&reaplist);
  1295. release_last_closed_stateid(oo);
  1296. nfs4_put_stateowner(&oo->oo_owner);
  1297. }
  1298. static inline int
  1299. hash_sessionid(struct nfs4_sessionid *sessionid)
  1300. {
  1301. struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
  1302. return sid->sequence % SESSION_HASH_SIZE;
  1303. }
  1304. #ifdef CONFIG_SUNRPC_DEBUG
  1305. static inline void
  1306. dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
  1307. {
  1308. u32 *ptr = (u32 *)(&sessionid->data[0]);
  1309. dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
  1310. }
  1311. #else
  1312. static inline void
  1313. dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
  1314. {
  1315. }
  1316. #endif
  1317. /*
  1318. * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
  1319. * won't be used for replay.
  1320. */
  1321. void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
  1322. {
  1323. struct nfs4_stateowner *so = cstate->replay_owner;
  1324. if (nfserr == nfserr_replay_me)
  1325. return;
  1326. if (!seqid_mutating_err(ntohl(nfserr))) {
  1327. nfsd4_cstate_clear_replay(cstate);
  1328. return;
  1329. }
  1330. if (!so)
  1331. return;
  1332. if (so->so_is_open_owner)
  1333. release_last_closed_stateid(openowner(so));
  1334. so->so_seqid++;
  1335. return;
  1336. }
  1337. static void
  1338. gen_sessionid(struct nfsd4_session *ses)
  1339. {
  1340. struct nfs4_client *clp = ses->se_client;
  1341. struct nfsd4_sessionid *sid;
  1342. sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
  1343. sid->clientid = clp->cl_clientid;
  1344. sid->sequence = current_sessionid++;
  1345. sid->reserved = 0;
  1346. }
  1347. /*
  1348. * The protocol defines ca_maxresponssize_cached to include the size of
  1349. * the rpc header, but all we need to cache is the data starting after
  1350. * the end of the initial SEQUENCE operation--the rest we regenerate
  1351. * each time. Therefore we can advertise a ca_maxresponssize_cached
  1352. * value that is the number of bytes in our cache plus a few additional
  1353. * bytes. In order to stay on the safe side, and not promise more than
  1354. * we can cache, those additional bytes must be the minimum possible: 24
  1355. * bytes of rpc header (xid through accept state, with AUTH_NULL
  1356. * verifier), 12 for the compound header (with zero-length tag), and 44
  1357. * for the SEQUENCE op response:
  1358. */
  1359. #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
  1360. static void
  1361. free_session_slots(struct nfsd4_session *ses)
  1362. {
  1363. int i;
  1364. for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
  1365. free_svc_cred(&ses->se_slots[i]->sl_cred);
  1366. kfree(ses->se_slots[i]);
  1367. }
  1368. }
  1369. /*
  1370. * We don't actually need to cache the rpc and session headers, so we
  1371. * can allocate a little less for each slot:
  1372. */
  1373. static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
  1374. {
  1375. u32 size;
  1376. if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
  1377. size = 0;
  1378. else
  1379. size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
  1380. return size + sizeof(struct nfsd4_slot);
  1381. }
  1382. /*
  1383. * XXX: If we run out of reserved DRC memory we could (up to a point)
  1384. * re-negotiate active sessions and reduce their slot usage to make
  1385. * room for new connections. For now we just fail the create session.
  1386. */
  1387. static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
  1388. {
  1389. u32 slotsize = slot_bytes(ca);
  1390. u32 num = ca->maxreqs;
  1391. unsigned long avail, total_avail;
  1392. unsigned int scale_factor;
  1393. spin_lock(&nfsd_drc_lock);
  1394. if (nfsd_drc_max_mem > nfsd_drc_mem_used)
  1395. total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
  1396. else
  1397. /* We have handed out more space than we chose in
  1398. * set_max_drc() to allow. That isn't really a
  1399. * problem as long as that doesn't make us think we
  1400. * have lots more due to integer overflow.
  1401. */
  1402. total_avail = 0;
  1403. avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
  1404. /*
  1405. * Never use more than a fraction of the remaining memory,
  1406. * unless it's the only way to give this client a slot.
  1407. * The chosen fraction is either 1/8 or 1/number of threads,
  1408. * whichever is smaller. This ensures there are adequate
  1409. * slots to support multiple clients per thread.
  1410. * Give the client one slot even if that would require
  1411. * over-allocation--it is better than failure.
  1412. */
  1413. scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
  1414. avail = clamp_t(unsigned long, avail, slotsize,
  1415. total_avail/scale_factor);
  1416. num = min_t(int, num, avail / slotsize);
  1417. num = max_t(int, num, 1);
  1418. nfsd_drc_mem_used += num * slotsize;
  1419. spin_unlock(&nfsd_drc_lock);
  1420. return num;
  1421. }
  1422. static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
  1423. {
  1424. int slotsize = slot_bytes(ca);
  1425. spin_lock(&nfsd_drc_lock);
  1426. nfsd_drc_mem_used -= slotsize * ca->maxreqs;
  1427. spin_unlock(&nfsd_drc_lock);
  1428. }
  1429. static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
  1430. struct nfsd4_channel_attrs *battrs)
  1431. {
  1432. int numslots = fattrs->maxreqs;
  1433. int slotsize = slot_bytes(fattrs);
  1434. struct nfsd4_session *new;
  1435. int mem, i;
  1436. BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
  1437. + sizeof(struct nfsd4_session) > PAGE_SIZE);
  1438. mem = numslots * sizeof(struct nfsd4_slot *);
  1439. new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
  1440. if (!new)
  1441. return NULL;
  1442. /* allocate each struct nfsd4_slot and data cache in one piece */
  1443. for (i = 0; i < numslots; i++) {
  1444. new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
  1445. if (!new->se_slots[i])
  1446. goto out_free;
  1447. }
  1448. memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
  1449. memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
  1450. return new;
  1451. out_free:
  1452. while (i--)
  1453. kfree(new->se_slots[i]);
  1454. kfree(new);
  1455. return NULL;
  1456. }
  1457. static void free_conn(struct nfsd4_conn *c)
  1458. {
  1459. svc_xprt_put(c->cn_xprt);
  1460. kfree(c);
  1461. }
  1462. static void nfsd4_conn_lost(struct svc_xpt_user *u)
  1463. {
  1464. struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
  1465. struct nfs4_client *clp = c->cn_session->se_client;
  1466. spin_lock(&clp->cl_lock);
  1467. if (!list_empty(&c->cn_persession)) {
  1468. list_del(&c->cn_persession);
  1469. free_conn(c);
  1470. }
  1471. nfsd4_probe_callback(clp);
  1472. spin_unlock(&clp->cl_lock);
  1473. }
  1474. static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
  1475. {
  1476. struct nfsd4_conn *conn;
  1477. conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
  1478. if (!conn)
  1479. return NULL;
  1480. svc_xprt_get(rqstp->rq_xprt);
  1481. conn->cn_xprt = rqstp->rq_xprt;
  1482. conn->cn_flags = flags;
  1483. INIT_LIST_HEAD(&conn->cn_xpt_user.list);
  1484. return conn;
  1485. }
  1486. static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
  1487. {
  1488. conn->cn_session = ses;
  1489. list_add(&conn->cn_persession, &ses->se_conns);
  1490. }
  1491. static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
  1492. {
  1493. struct nfs4_client *clp = ses->se_client;
  1494. spin_lock(&clp->cl_lock);
  1495. __nfsd4_hash_conn(conn, ses);
  1496. spin_unlock(&clp->cl_lock);
  1497. }
  1498. static int nfsd4_register_conn(struct nfsd4_conn *conn)
  1499. {
  1500. conn->cn_xpt_user.callback = nfsd4_conn_lost;
  1501. return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
  1502. }
  1503. static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
  1504. {
  1505. int ret;
  1506. nfsd4_hash_conn(conn, ses);
  1507. ret = nfsd4_register_conn(conn);
  1508. if (ret)
  1509. /* oops; xprt is already down: */
  1510. nfsd4_conn_lost(&conn->cn_xpt_user);
  1511. /* We may have gained or lost a callback channel: */
  1512. nfsd4_probe_callback_sync(ses->se_client);
  1513. }
  1514. static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
  1515. {
  1516. u32 dir = NFS4_CDFC4_FORE;
  1517. if (cses->flags & SESSION4_BACK_CHAN)
  1518. dir |= NFS4_CDFC4_BACK;
  1519. return alloc_conn(rqstp, dir);
  1520. }
  1521. /* must be called under client_lock */
  1522. static void nfsd4_del_conns(struct nfsd4_session *s)
  1523. {
  1524. struct nfs4_client *clp = s->se_client;
  1525. struct nfsd4_conn *c;
  1526. spin_lock(&clp->cl_lock);
  1527. while (!list_empty(&s->se_conns)) {
  1528. c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
  1529. list_del_init(&c->cn_persession);
  1530. spin_unlock(&clp->cl_lock);
  1531. unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
  1532. free_conn(c);
  1533. spin_lock(&clp->cl_lock);
  1534. }
  1535. spin_unlock(&clp->cl_lock);
  1536. }
  1537. static void __free_session(struct nfsd4_session *ses)
  1538. {
  1539. free_session_slots(ses);
  1540. kfree(ses);
  1541. }
  1542. static void free_session(struct nfsd4_session *ses)
  1543. {
  1544. nfsd4_del_conns(ses);
  1545. nfsd4_put_drc_mem(&ses->se_fchannel);
  1546. __free_session(ses);
  1547. }
  1548. static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
  1549. {
  1550. int idx;
  1551. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  1552. new->se_client = clp;
  1553. gen_sessionid(new);
  1554. INIT_LIST_HEAD(&new->se_conns);
  1555. new->se_cb_seq_nr = 1;
  1556. new->se_flags = cses->flags;
  1557. new->se_cb_prog = cses->callback_prog;
  1558. new->se_cb_sec = cses->cb_sec;
  1559. atomic_set(&new->se_ref, 0);
  1560. idx = hash_sessionid(&new->se_sessionid);
  1561. list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
  1562. spin_lock(&clp->cl_lock);
  1563. list_add(&new->se_perclnt, &clp->cl_sessions);
  1564. spin_unlock(&clp->cl_lock);
  1565. {
  1566. struct sockaddr *sa = svc_addr(rqstp);
  1567. /*
  1568. * This is a little silly; with sessions there's no real
  1569. * use for the callback address. Use the peer address
  1570. * as a reasonable default for now, but consider fixing
  1571. * the rpc client not to require an address in the
  1572. * future:
  1573. */
  1574. rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
  1575. clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
  1576. }
  1577. }
  1578. /* caller must hold client_lock */
  1579. static struct nfsd4_session *
  1580. __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
  1581. {
  1582. struct nfsd4_session *elem;
  1583. int idx;
  1584. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  1585. lockdep_assert_held(&nn->client_lock);
  1586. dump_sessionid(__func__, sessionid);
  1587. idx = hash_sessionid(sessionid);
  1588. /* Search in the appropriate list */
  1589. list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
  1590. if (!memcmp(elem->se_sessionid.data, sessionid->data,
  1591. NFS4_MAX_SESSIONID_LEN)) {
  1592. return elem;
  1593. }
  1594. }
  1595. dprintk("%s: session not found\n", __func__);
  1596. return NULL;
  1597. }
  1598. static struct nfsd4_session *
  1599. find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
  1600. __be32 *ret)
  1601. {
  1602. struct nfsd4_session *session;
  1603. __be32 status = nfserr_badsession;
  1604. session = __find_in_sessionid_hashtbl(sessionid, net);
  1605. if (!session)
  1606. goto out;
  1607. status = nfsd4_get_session_locked(session);
  1608. if (status)
  1609. session = NULL;
  1610. out:
  1611. *ret = status;
  1612. return session;
  1613. }
  1614. /* caller must hold client_lock */
  1615. static void
  1616. unhash_session(struct nfsd4_session *ses)
  1617. {
  1618. struct nfs4_client *clp = ses->se_client;
  1619. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1620. lockdep_assert_held(&nn->client_lock);
  1621. list_del(&ses->se_hash);
  1622. spin_lock(&ses->se_client->cl_lock);
  1623. list_del(&ses->se_perclnt);
  1624. spin_unlock(&ses->se_client->cl_lock);
  1625. }
  1626. /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
  1627. static int
  1628. STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
  1629. {
  1630. /*
  1631. * We're assuming the clid was not given out from a boot
  1632. * precisely 2^32 (about 136 years) before this one. That seems
  1633. * a safe assumption:
  1634. */
  1635. if (clid->cl_boot == (u32)nn->boot_time)
  1636. return 0;
  1637. trace_nfsd_clid_stale(clid);
  1638. return 1;
  1639. }
  1640. /*
  1641. * XXX Should we use a slab cache ?
  1642. * This type of memory management is somewhat inefficient, but we use it
  1643. * anyway since SETCLIENTID is not a common operation.
  1644. */
  1645. static struct nfs4_client *alloc_client(struct xdr_netobj name)
  1646. {
  1647. struct nfs4_client *clp;
  1648. int i;
  1649. clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
  1650. if (clp == NULL)
  1651. return NULL;
  1652. xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
  1653. if (clp->cl_name.data == NULL)
  1654. goto err_no_name;
  1655. clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
  1656. sizeof(struct list_head),
  1657. GFP_KERNEL);
  1658. if (!clp->cl_ownerstr_hashtbl)
  1659. goto err_no_hashtbl;
  1660. for (i = 0; i < OWNER_HASH_SIZE; i++)
  1661. INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
  1662. INIT_LIST_HEAD(&clp->cl_sessions);
  1663. idr_init(&clp->cl_stateids);
  1664. atomic_set(&clp->cl_rpc_users, 0);
  1665. clp->cl_cb_state = NFSD4_CB_UNKNOWN;
  1666. INIT_LIST_HEAD(&clp->cl_idhash);
  1667. INIT_LIST_HEAD(&clp->cl_openowners);
  1668. INIT_LIST_HEAD(&clp->cl_delegations);
  1669. INIT_LIST_HEAD(&clp->cl_lru);
  1670. INIT_LIST_HEAD(&clp->cl_revoked);
  1671. #ifdef CONFIG_NFSD_PNFS
  1672. INIT_LIST_HEAD(&clp->cl_lo_states);
  1673. #endif
  1674. INIT_LIST_HEAD(&clp->async_copies);
  1675. spin_lock_init(&clp->async_lock);
  1676. spin_lock_init(&clp->cl_lock);
  1677. rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
  1678. return clp;
  1679. err_no_hashtbl:
  1680. kfree(clp->cl_name.data);
  1681. err_no_name:
  1682. kmem_cache_free(client_slab, clp);
  1683. return NULL;
  1684. }
  1685. static void __free_client(struct kref *k)
  1686. {
  1687. struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
  1688. struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
  1689. free_svc_cred(&clp->cl_cred);
  1690. kfree(clp->cl_ownerstr_hashtbl);
  1691. kfree(clp->cl_name.data);
  1692. kfree(clp->cl_nii_domain.data);
  1693. kfree(clp->cl_nii_name.data);
  1694. idr_destroy(&clp->cl_stateids);
  1695. kmem_cache_free(client_slab, clp);
  1696. }
  1697. static void drop_client(struct nfs4_client *clp)
  1698. {
  1699. kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
  1700. }
  1701. static void
  1702. free_client(struct nfs4_client *clp)
  1703. {
  1704. while (!list_empty(&clp->cl_sessions)) {
  1705. struct nfsd4_session *ses;
  1706. ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
  1707. se_perclnt);
  1708. list_del(&ses->se_perclnt);
  1709. WARN_ON_ONCE(atomic_read(&ses->se_ref));
  1710. free_session(ses);
  1711. }
  1712. rpc_destroy_wait_queue(&clp->cl_cb_waitq);
  1713. if (clp->cl_nfsd_dentry) {
  1714. nfsd_client_rmdir(clp->cl_nfsd_dentry);
  1715. clp->cl_nfsd_dentry = NULL;
  1716. wake_up_all(&expiry_wq);
  1717. }
  1718. drop_client(clp);
  1719. }
  1720. /* must be called under the client_lock */
  1721. static void
  1722. unhash_client_locked(struct nfs4_client *clp)
  1723. {
  1724. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1725. struct nfsd4_session *ses;
  1726. lockdep_assert_held(&nn->client_lock);
  1727. /* Mark the client as expired! */
  1728. clp->cl_time = 0;
  1729. /* Make it invisible */
  1730. if (!list_empty(&clp->cl_idhash)) {
  1731. list_del_init(&clp->cl_idhash);
  1732. if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
  1733. rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
  1734. else
  1735. rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
  1736. }
  1737. list_del_init(&clp->cl_lru);
  1738. spin_lock(&clp->cl_lock);
  1739. list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
  1740. list_del_init(&ses->se_hash);
  1741. spin_unlock(&clp->cl_lock);
  1742. }
  1743. static void
  1744. unhash_client(struct nfs4_client *clp)
  1745. {
  1746. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1747. spin_lock(&nn->client_lock);
  1748. unhash_client_locked(clp);
  1749. spin_unlock(&nn->client_lock);
  1750. }
  1751. static __be32 mark_client_expired_locked(struct nfs4_client *clp)
  1752. {
  1753. if (atomic_read(&clp->cl_rpc_users))
  1754. return nfserr_jukebox;
  1755. unhash_client_locked(clp);
  1756. return nfs_ok;
  1757. }
  1758. static void
  1759. __destroy_client(struct nfs4_client *clp)
  1760. {
  1761. int i;
  1762. struct nfs4_openowner *oo;
  1763. struct nfs4_delegation *dp;
  1764. struct list_head reaplist;
  1765. INIT_LIST_HEAD(&reaplist);
  1766. spin_lock(&state_lock);
  1767. while (!list_empty(&clp->cl_delegations)) {
  1768. dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
  1769. WARN_ON(!unhash_delegation_locked(dp));
  1770. list_add(&dp->dl_recall_lru, &reaplist);
  1771. }
  1772. spin_unlock(&state_lock);
  1773. while (!list_empty(&reaplist)) {
  1774. dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
  1775. list_del_init(&dp->dl_recall_lru);
  1776. destroy_unhashed_deleg(dp);
  1777. }
  1778. while (!list_empty(&clp->cl_revoked)) {
  1779. dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
  1780. list_del_init(&dp->dl_recall_lru);
  1781. nfs4_put_stid(&dp->dl_stid);
  1782. }
  1783. while (!list_empty(&clp->cl_openowners)) {
  1784. oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
  1785. nfs4_get_stateowner(&oo->oo_owner);
  1786. release_openowner(oo);
  1787. }
  1788. for (i = 0; i < OWNER_HASH_SIZE; i++) {
  1789. struct nfs4_stateowner *so, *tmp;
  1790. list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
  1791. so_strhash) {
  1792. /* Should be no openowners at this point */
  1793. WARN_ON_ONCE(so->so_is_open_owner);
  1794. remove_blocked_locks(lockowner(so));
  1795. }
  1796. }
  1797. nfsd4_return_all_client_layouts(clp);
  1798. nfsd4_shutdown_copy(clp);
  1799. nfsd4_shutdown_callback(clp);
  1800. if (clp->cl_cb_conn.cb_xprt)
  1801. svc_xprt_put(clp->cl_cb_conn.cb_xprt);
  1802. free_client(clp);
  1803. wake_up_all(&expiry_wq);
  1804. }
  1805. static void
  1806. destroy_client(struct nfs4_client *clp)
  1807. {
  1808. unhash_client(clp);
  1809. __destroy_client(clp);
  1810. }
  1811. static void inc_reclaim_complete(struct nfs4_client *clp)
  1812. {
  1813. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1814. if (!nn->track_reclaim_completes)
  1815. return;
  1816. if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
  1817. return;
  1818. if (atomic_inc_return(&nn->nr_reclaim_complete) ==
  1819. nn->reclaim_str_hashtbl_size) {
  1820. printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
  1821. clp->net->ns.inum);
  1822. nfsd4_end_grace(nn);
  1823. }
  1824. }
  1825. static void expire_client(struct nfs4_client *clp)
  1826. {
  1827. unhash_client(clp);
  1828. nfsd4_client_record_remove(clp);
  1829. __destroy_client(clp);
  1830. }
  1831. static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
  1832. {
  1833. memcpy(target->cl_verifier.data, source->data,
  1834. sizeof(target->cl_verifier.data));
  1835. }
  1836. static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
  1837. {
  1838. target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
  1839. target->cl_clientid.cl_id = source->cl_clientid.cl_id;
  1840. }
  1841. static int copy_cred(struct svc_cred *target, struct svc_cred *source)
  1842. {
  1843. target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
  1844. target->cr_raw_principal = kstrdup(source->cr_raw_principal,
  1845. GFP_KERNEL);
  1846. target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
  1847. if ((source->cr_principal && !target->cr_principal) ||
  1848. (source->cr_raw_principal && !target->cr_raw_principal) ||
  1849. (source->cr_targ_princ && !target->cr_targ_princ))
  1850. return -ENOMEM;
  1851. target->cr_flavor = source->cr_flavor;
  1852. target->cr_uid = source->cr_uid;
  1853. target->cr_gid = source->cr_gid;
  1854. target->cr_group_info = source->cr_group_info;
  1855. get_group_info(target->cr_group_info);
  1856. target->cr_gss_mech = source->cr_gss_mech;
  1857. if (source->cr_gss_mech)
  1858. gss_mech_get(source->cr_gss_mech);
  1859. return 0;
  1860. }
  1861. static int
  1862. compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
  1863. {
  1864. if (o1->len < o2->len)
  1865. return -1;
  1866. if (o1->len > o2->len)
  1867. return 1;
  1868. return memcmp(o1->data, o2->data, o1->len);
  1869. }
  1870. static int
  1871. same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
  1872. {
  1873. return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
  1874. }
  1875. static int
  1876. same_clid(clientid_t *cl1, clientid_t *cl2)
  1877. {
  1878. return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
  1879. }
  1880. static bool groups_equal(struct group_info *g1, struct group_info *g2)
  1881. {
  1882. int i;
  1883. if (g1->ngroups != g2->ngroups)
  1884. return false;
  1885. for (i=0; i<g1->ngroups; i++)
  1886. if (!gid_eq(g1->gid[i], g2->gid[i]))
  1887. return false;
  1888. return true;
  1889. }
  1890. /*
  1891. * RFC 3530 language requires clid_inuse be returned when the
  1892. * "principal" associated with a requests differs from that previously
  1893. * used. We use uid, gid's, and gss principal string as our best
  1894. * approximation. We also don't want to allow non-gss use of a client
  1895. * established using gss: in theory cr_principal should catch that
  1896. * change, but in practice cr_principal can be null even in the gss case
  1897. * since gssd doesn't always pass down a principal string.
  1898. */
  1899. static bool is_gss_cred(struct svc_cred *cr)
  1900. {
  1901. /* Is cr_flavor one of the gss "pseudoflavors"?: */
  1902. return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
  1903. }
  1904. static bool
  1905. same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
  1906. {
  1907. if ((is_gss_cred(cr1) != is_gss_cred(cr2))
  1908. || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
  1909. || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
  1910. || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
  1911. return false;
  1912. /* XXX: check that cr_targ_princ fields match ? */
  1913. if (cr1->cr_principal == cr2->cr_principal)
  1914. return true;
  1915. if (!cr1->cr_principal || !cr2->cr_principal)
  1916. return false;
  1917. return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
  1918. }
  1919. static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
  1920. {
  1921. struct svc_cred *cr = &rqstp->rq_cred;
  1922. u32 service;
  1923. if (!cr->cr_gss_mech)
  1924. return false;
  1925. service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
  1926. return service == RPC_GSS_SVC_INTEGRITY ||
  1927. service == RPC_GSS_SVC_PRIVACY;
  1928. }
  1929. bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
  1930. {
  1931. struct svc_cred *cr = &rqstp->rq_cred;
  1932. if (!cl->cl_mach_cred)
  1933. return true;
  1934. if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
  1935. return false;
  1936. if (!svc_rqst_integrity_protected(rqstp))
  1937. return false;
  1938. if (cl->cl_cred.cr_raw_principal)
  1939. return 0 == strcmp(cl->cl_cred.cr_raw_principal,
  1940. cr->cr_raw_principal);
  1941. if (!cr->cr_principal)
  1942. return false;
  1943. return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
  1944. }
  1945. static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
  1946. {
  1947. __be32 verf[2];
  1948. /*
  1949. * This is opaque to client, so no need to byte-swap. Use
  1950. * __force to keep sparse happy
  1951. */
  1952. verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
  1953. verf[1] = (__force __be32)nn->clverifier_counter++;
  1954. memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
  1955. }
  1956. static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
  1957. {
  1958. clp->cl_clientid.cl_boot = (u32)nn->boot_time;
  1959. clp->cl_clientid.cl_id = nn->clientid_counter++;
  1960. gen_confirm(clp, nn);
  1961. }
  1962. static struct nfs4_stid *
  1963. find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
  1964. {
  1965. struct nfs4_stid *ret;
  1966. ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
  1967. if (!ret || !ret->sc_type)
  1968. return NULL;
  1969. return ret;
  1970. }
  1971. static struct nfs4_stid *
  1972. find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
  1973. {
  1974. struct nfs4_stid *s;
  1975. spin_lock(&cl->cl_lock);
  1976. s = find_stateid_locked(cl, t);
  1977. if (s != NULL) {
  1978. if (typemask & s->sc_type)
  1979. refcount_inc(&s->sc_count);
  1980. else
  1981. s = NULL;
  1982. }
  1983. spin_unlock(&cl->cl_lock);
  1984. return s;
  1985. }
  1986. static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
  1987. {
  1988. struct nfsdfs_client *nc;
  1989. nc = get_nfsdfs_client(inode);
  1990. if (!nc)
  1991. return NULL;
  1992. return container_of(nc, struct nfs4_client, cl_nfsdfs);
  1993. }
  1994. static void seq_quote_mem(struct seq_file *m, char *data, int len)
  1995. {
  1996. seq_printf(m, "\"");
  1997. seq_escape_mem_ascii(m, data, len);
  1998. seq_printf(m, "\"");
  1999. }
  2000. static int client_info_show(struct seq_file *m, void *v)
  2001. {
  2002. struct inode *inode = m->private;
  2003. struct nfs4_client *clp;
  2004. u64 clid;
  2005. clp = get_nfsdfs_clp(inode);
  2006. if (!clp)
  2007. return -ENXIO;
  2008. memcpy(&clid, &clp->cl_clientid, sizeof(clid));
  2009. seq_printf(m, "clientid: 0x%llx\n", clid);
  2010. seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
  2011. seq_printf(m, "name: ");
  2012. seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
  2013. seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
  2014. if (clp->cl_nii_domain.data) {
  2015. seq_printf(m, "Implementation domain: ");
  2016. seq_quote_mem(m, clp->cl_nii_domain.data,
  2017. clp->cl_nii_domain.len);
  2018. seq_printf(m, "\nImplementation name: ");
  2019. seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
  2020. seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
  2021. clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
  2022. }
  2023. drop_client(clp);
  2024. return 0;
  2025. }
  2026. static int client_info_open(struct inode *inode, struct file *file)
  2027. {
  2028. return single_open(file, client_info_show, inode);
  2029. }
  2030. static const struct file_operations client_info_fops = {
  2031. .open = client_info_open,
  2032. .read = seq_read,
  2033. .llseek = seq_lseek,
  2034. .release = single_release,
  2035. };
  2036. static void *states_start(struct seq_file *s, loff_t *pos)
  2037. __acquires(&clp->cl_lock)
  2038. {
  2039. struct nfs4_client *clp = s->private;
  2040. unsigned long id = *pos;
  2041. void *ret;
  2042. spin_lock(&clp->cl_lock);
  2043. ret = idr_get_next_ul(&clp->cl_stateids, &id);
  2044. *pos = id;
  2045. return ret;
  2046. }
  2047. static void *states_next(struct seq_file *s, void *v, loff_t *pos)
  2048. {
  2049. struct nfs4_client *clp = s->private;
  2050. unsigned long id = *pos;
  2051. void *ret;
  2052. id = *pos;
  2053. id++;
  2054. ret = idr_get_next_ul(&clp->cl_stateids, &id);
  2055. *pos = id;
  2056. return ret;
  2057. }
  2058. static void states_stop(struct seq_file *s, void *v)
  2059. __releases(&clp->cl_lock)
  2060. {
  2061. struct nfs4_client *clp = s->private;
  2062. spin_unlock(&clp->cl_lock);
  2063. }
  2064. static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
  2065. {
  2066. seq_printf(s, "filename: \"%pD2\"", f->nf_file);
  2067. }
  2068. static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
  2069. {
  2070. struct inode *inode = f->nf_inode;
  2071. seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
  2072. MAJOR(inode->i_sb->s_dev),
  2073. MINOR(inode->i_sb->s_dev),
  2074. inode->i_ino);
  2075. }
  2076. static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
  2077. {
  2078. seq_printf(s, "owner: ");
  2079. seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
  2080. }
  2081. static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
  2082. {
  2083. seq_printf(s, "0x%.8x", stid->si_generation);
  2084. seq_printf(s, "%12phN", &stid->si_opaque);
  2085. }
  2086. static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
  2087. {
  2088. struct nfs4_ol_stateid *ols;
  2089. struct nfs4_file *nf;
  2090. struct nfsd_file *file;
  2091. struct nfs4_stateowner *oo;
  2092. unsigned int access, deny;
  2093. if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
  2094. return 0; /* XXX: or SEQ_SKIP? */
  2095. ols = openlockstateid(st);
  2096. oo = ols->st_stateowner;
  2097. nf = st->sc_file;
  2098. file = find_any_file(nf);
  2099. if (!file)
  2100. return 0;
  2101. seq_printf(s, "- ");
  2102. nfs4_show_stateid(s, &st->sc_stateid);
  2103. seq_printf(s, ": { type: open, ");
  2104. access = bmap_to_share_mode(ols->st_access_bmap);
  2105. deny = bmap_to_share_mode(ols->st_deny_bmap);
  2106. seq_printf(s, "access: %s%s, ",
  2107. access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
  2108. access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
  2109. seq_printf(s, "deny: %s%s, ",
  2110. deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
  2111. deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
  2112. nfs4_show_superblock(s, file);
  2113. seq_printf(s, ", ");
  2114. nfs4_show_fname(s, file);
  2115. seq_printf(s, ", ");
  2116. nfs4_show_owner(s, oo);
  2117. seq_printf(s, " }\n");
  2118. nfsd_file_put(file);
  2119. return 0;
  2120. }
  2121. static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
  2122. {
  2123. struct nfs4_ol_stateid *ols;
  2124. struct nfs4_file *nf;
  2125. struct nfsd_file *file;
  2126. struct nfs4_stateowner *oo;
  2127. ols = openlockstateid(st);
  2128. oo = ols->st_stateowner;
  2129. nf = st->sc_file;
  2130. file = find_any_file(nf);
  2131. if (!file)
  2132. return 0;
  2133. seq_printf(s, "- ");
  2134. nfs4_show_stateid(s, &st->sc_stateid);
  2135. seq_printf(s, ": { type: lock, ");
  2136. /*
  2137. * Note: a lock stateid isn't really the same thing as a lock,
  2138. * it's the locking state held by one owner on a file, and there
  2139. * may be multiple (or no) lock ranges associated with it.
  2140. * (Same for the matter is true of open stateids.)
  2141. */
  2142. nfs4_show_superblock(s, file);
  2143. /* XXX: open stateid? */
  2144. seq_printf(s, ", ");
  2145. nfs4_show_fname(s, file);
  2146. seq_printf(s, ", ");
  2147. nfs4_show_owner(s, oo);
  2148. seq_printf(s, " }\n");
  2149. nfsd_file_put(file);
  2150. return 0;
  2151. }
  2152. static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
  2153. {
  2154. struct nfs4_delegation *ds;
  2155. struct nfs4_file *nf;
  2156. struct nfsd_file *file;
  2157. ds = delegstateid(st);
  2158. nf = st->sc_file;
  2159. file = find_deleg_file(nf);
  2160. if (!file)
  2161. return 0;
  2162. seq_printf(s, "- ");
  2163. nfs4_show_stateid(s, &st->sc_stateid);
  2164. seq_printf(s, ": { type: deleg, ");
  2165. /* Kinda dead code as long as we only support read delegs: */
  2166. seq_printf(s, "access: %s, ",
  2167. ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
  2168. /* XXX: lease time, whether it's being recalled. */
  2169. nfs4_show_superblock(s, file);
  2170. seq_printf(s, ", ");
  2171. nfs4_show_fname(s, file);
  2172. seq_printf(s, " }\n");
  2173. nfsd_file_put(file);
  2174. return 0;
  2175. }
  2176. static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
  2177. {
  2178. struct nfs4_layout_stateid *ls;
  2179. struct nfsd_file *file;
  2180. ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
  2181. file = ls->ls_file;
  2182. seq_printf(s, "- ");
  2183. nfs4_show_stateid(s, &st->sc_stateid);
  2184. seq_printf(s, ": { type: layout, ");
  2185. /* XXX: What else would be useful? */
  2186. nfs4_show_superblock(s, file);
  2187. seq_printf(s, ", ");
  2188. nfs4_show_fname(s, file);
  2189. seq_printf(s, " }\n");
  2190. return 0;
  2191. }
  2192. static int states_show(struct seq_file *s, void *v)
  2193. {
  2194. struct nfs4_stid *st = v;
  2195. switch (st->sc_type) {
  2196. case NFS4_OPEN_STID:
  2197. return nfs4_show_open(s, st);
  2198. case NFS4_LOCK_STID:
  2199. return nfs4_show_lock(s, st);
  2200. case NFS4_DELEG_STID:
  2201. return nfs4_show_deleg(s, st);
  2202. case NFS4_LAYOUT_STID:
  2203. return nfs4_show_layout(s, st);
  2204. default:
  2205. return 0; /* XXX: or SEQ_SKIP? */
  2206. }
  2207. /* XXX: copy stateids? */
  2208. }
  2209. static struct seq_operations states_seq_ops = {
  2210. .start = states_start,
  2211. .next = states_next,
  2212. .stop = states_stop,
  2213. .show = states_show
  2214. };
  2215. static int client_states_open(struct inode *inode, struct file *file)
  2216. {
  2217. struct seq_file *s;
  2218. struct nfs4_client *clp;
  2219. int ret;
  2220. clp = get_nfsdfs_clp(inode);
  2221. if (!clp)
  2222. return -ENXIO;
  2223. ret = seq_open(file, &states_seq_ops);
  2224. if (ret)
  2225. return ret;
  2226. s = file->private_data;
  2227. s->private = clp;
  2228. return 0;
  2229. }
  2230. static int client_opens_release(struct inode *inode, struct file *file)
  2231. {
  2232. struct seq_file *m = file->private_data;
  2233. struct nfs4_client *clp = m->private;
  2234. /* XXX: alternatively, we could get/drop in seq start/stop */
  2235. drop_client(clp);
  2236. return 0;
  2237. }
  2238. static const struct file_operations client_states_fops = {
  2239. .open = client_states_open,
  2240. .read = seq_read,
  2241. .llseek = seq_lseek,
  2242. .release = client_opens_release,
  2243. };
  2244. /*
  2245. * Normally we refuse to destroy clients that are in use, but here the
  2246. * administrator is telling us to just do it. We also want to wait
  2247. * so the caller has a guarantee that the client's locks are gone by
  2248. * the time the write returns:
  2249. */
  2250. static void force_expire_client(struct nfs4_client *clp)
  2251. {
  2252. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  2253. bool already_expired;
  2254. spin_lock(&nn->client_lock);
  2255. clp->cl_time = 0;
  2256. spin_unlock(&nn->client_lock);
  2257. wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
  2258. spin_lock(&nn->client_lock);
  2259. already_expired = list_empty(&clp->cl_lru);
  2260. if (!already_expired)
  2261. unhash_client_locked(clp);
  2262. spin_unlock(&nn->client_lock);
  2263. if (!already_expired)
  2264. expire_client(clp);
  2265. else
  2266. wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
  2267. }
  2268. static ssize_t client_ctl_write(struct file *file, const char __user *buf,
  2269. size_t size, loff_t *pos)
  2270. {
  2271. char *data;
  2272. struct nfs4_client *clp;
  2273. data = simple_transaction_get(file, buf, size);
  2274. if (IS_ERR(data))
  2275. return PTR_ERR(data);
  2276. if (size != 7 || 0 != memcmp(data, "expire\n", 7))
  2277. return -EINVAL;
  2278. clp = get_nfsdfs_clp(file_inode(file));
  2279. if (!clp)
  2280. return -ENXIO;
  2281. force_expire_client(clp);
  2282. drop_client(clp);
  2283. return 7;
  2284. }
  2285. static const struct file_operations client_ctl_fops = {
  2286. .write = client_ctl_write,
  2287. .release = simple_transaction_release,
  2288. };
  2289. static const struct tree_descr client_files[] = {
  2290. [0] = {"info", &client_info_fops, S_IRUSR},
  2291. [1] = {"states", &client_states_fops, S_IRUSR},
  2292. [2] = {"ctl", &client_ctl_fops, S_IWUSR},
  2293. [3] = {""},
  2294. };
  2295. static struct nfs4_client *create_client(struct xdr_netobj name,
  2296. struct svc_rqst *rqstp, nfs4_verifier *verf)
  2297. {
  2298. struct nfs4_client *clp;
  2299. struct sockaddr *sa = svc_addr(rqstp);
  2300. int ret;
  2301. struct net *net = SVC_NET(rqstp);
  2302. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  2303. clp = alloc_client(name);
  2304. if (clp == NULL)
  2305. return NULL;
  2306. ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
  2307. if (ret) {
  2308. free_client(clp);
  2309. return NULL;
  2310. }
  2311. gen_clid(clp, nn);
  2312. kref_init(&clp->cl_nfsdfs.cl_ref);
  2313. nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
  2314. clp->cl_time = ktime_get_boottime_seconds();
  2315. clear_bit(0, &clp->cl_cb_slot_busy);
  2316. copy_verf(clp, verf);
  2317. memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
  2318. clp->cl_cb_session = NULL;
  2319. clp->net = net;
  2320. clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs,
  2321. clp->cl_clientid.cl_id - nn->clientid_base,
  2322. client_files);
  2323. if (!clp->cl_nfsd_dentry) {
  2324. free_client(clp);
  2325. return NULL;
  2326. }
  2327. return clp;
  2328. }
  2329. static void
  2330. add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
  2331. {
  2332. struct rb_node **new = &(root->rb_node), *parent = NULL;
  2333. struct nfs4_client *clp;
  2334. while (*new) {
  2335. clp = rb_entry(*new, struct nfs4_client, cl_namenode);
  2336. parent = *new;
  2337. if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
  2338. new = &((*new)->rb_left);
  2339. else
  2340. new = &((*new)->rb_right);
  2341. }
  2342. rb_link_node(&new_clp->cl_namenode, parent, new);
  2343. rb_insert_color(&new_clp->cl_namenode, root);
  2344. }
  2345. static struct nfs4_client *
  2346. find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
  2347. {
  2348. int cmp;
  2349. struct rb_node *node = root->rb_node;
  2350. struct nfs4_client *clp;
  2351. while (node) {
  2352. clp = rb_entry(node, struct nfs4_client, cl_namenode);
  2353. cmp = compare_blob(&clp->cl_name, name);
  2354. if (cmp > 0)
  2355. node = node->rb_left;
  2356. else if (cmp < 0)
  2357. node = node->rb_right;
  2358. else
  2359. return clp;
  2360. }
  2361. return NULL;
  2362. }
  2363. static void
  2364. add_to_unconfirmed(struct nfs4_client *clp)
  2365. {
  2366. unsigned int idhashval;
  2367. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  2368. lockdep_assert_held(&nn->client_lock);
  2369. clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
  2370. add_clp_to_name_tree(clp, &nn->unconf_name_tree);
  2371. idhashval = clientid_hashval(clp->cl_clientid.cl_id);
  2372. list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
  2373. renew_client_locked(clp);
  2374. }
  2375. static void
  2376. move_to_confirmed(struct nfs4_client *clp)
  2377. {
  2378. unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
  2379. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  2380. lockdep_assert_held(&nn->client_lock);
  2381. dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
  2382. list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
  2383. rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
  2384. add_clp_to_name_tree(clp, &nn->conf_name_tree);
  2385. set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
  2386. renew_client_locked(clp);
  2387. }
  2388. static struct nfs4_client *
  2389. find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
  2390. {
  2391. struct nfs4_client *clp;
  2392. unsigned int idhashval = clientid_hashval(clid->cl_id);
  2393. list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
  2394. if (same_clid(&clp->cl_clientid, clid)) {
  2395. if ((bool)clp->cl_minorversion != sessions)
  2396. return NULL;
  2397. renew_client_locked(clp);
  2398. return clp;
  2399. }
  2400. }
  2401. return NULL;
  2402. }
  2403. static struct nfs4_client *
  2404. find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
  2405. {
  2406. struct list_head *tbl = nn->conf_id_hashtbl;
  2407. lockdep_assert_held(&nn->client_lock);
  2408. return find_client_in_id_table(tbl, clid, sessions);
  2409. }
  2410. static struct nfs4_client *
  2411. find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
  2412. {
  2413. struct list_head *tbl = nn->unconf_id_hashtbl;
  2414. lockdep_assert_held(&nn->client_lock);
  2415. return find_client_in_id_table(tbl, clid, sessions);
  2416. }
  2417. static bool clp_used_exchangeid(struct nfs4_client *clp)
  2418. {
  2419. return clp->cl_exchange_flags != 0;
  2420. }
  2421. static struct nfs4_client *
  2422. find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
  2423. {
  2424. lockdep_assert_held(&nn->client_lock);
  2425. return find_clp_in_name_tree(name, &nn->conf_name_tree);
  2426. }
  2427. static struct nfs4_client *
  2428. find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
  2429. {
  2430. lockdep_assert_held(&nn->client_lock);
  2431. return find_clp_in_name_tree(name, &nn->unconf_name_tree);
  2432. }
  2433. static void
  2434. gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
  2435. {
  2436. struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
  2437. struct sockaddr *sa = svc_addr(rqstp);
  2438. u32 scopeid = rpc_get_scope_id(sa);
  2439. unsigned short expected_family;
  2440. /* Currently, we only support tcp and tcp6 for the callback channel */
  2441. if (se->se_callback_netid_len == 3 &&
  2442. !memcmp(se->se_callback_netid_val, "tcp", 3))
  2443. expected_family = AF_INET;
  2444. else if (se->se_callback_netid_len == 4 &&
  2445. !memcmp(se->se_callback_netid_val, "tcp6", 4))
  2446. expected_family = AF_INET6;
  2447. else
  2448. goto out_err;
  2449. conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
  2450. se->se_callback_addr_len,
  2451. (struct sockaddr *)&conn->cb_addr,
  2452. sizeof(conn->cb_addr));
  2453. if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
  2454. goto out_err;
  2455. if (conn->cb_addr.ss_family == AF_INET6)
  2456. ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
  2457. conn->cb_prog = se->se_callback_prog;
  2458. conn->cb_ident = se->se_callback_ident;
  2459. memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
  2460. trace_nfsd_cb_args(clp, conn);
  2461. return;
  2462. out_err:
  2463. conn->cb_addr.ss_family = AF_UNSPEC;
  2464. conn->cb_addrlen = 0;
  2465. trace_nfsd_cb_nodelegs(clp);
  2466. return;
  2467. }
  2468. /*
  2469. * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
  2470. */
  2471. static void
  2472. nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
  2473. {
  2474. struct xdr_buf *buf = resp->xdr.buf;
  2475. struct nfsd4_slot *slot = resp->cstate.slot;
  2476. unsigned int base;
  2477. dprintk("--> %s slot %p\n", __func__, slot);
  2478. slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
  2479. slot->sl_opcnt = resp->opcnt;
  2480. slot->sl_status = resp->cstate.status;
  2481. free_svc_cred(&slot->sl_cred);
  2482. copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
  2483. if (!nfsd4_cache_this(resp)) {
  2484. slot->sl_flags &= ~NFSD4_SLOT_CACHED;
  2485. return;
  2486. }
  2487. slot->sl_flags |= NFSD4_SLOT_CACHED;
  2488. base = resp->cstate.data_offset;
  2489. slot->sl_datalen = buf->len - base;
  2490. if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
  2491. WARN(1, "%s: sessions DRC could not cache compound\n",
  2492. __func__);
  2493. return;
  2494. }
  2495. /*
  2496. * Encode the replay sequence operation from the slot values.
  2497. * If cachethis is FALSE encode the uncached rep error on the next
  2498. * operation which sets resp->p and increments resp->opcnt for
  2499. * nfs4svc_encode_compoundres.
  2500. *
  2501. */
  2502. static __be32
  2503. nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
  2504. struct nfsd4_compoundres *resp)
  2505. {
  2506. struct nfsd4_op *op;
  2507. struct nfsd4_slot *slot = resp->cstate.slot;
  2508. /* Encode the replayed sequence operation */
  2509. op = &args->ops[resp->opcnt - 1];
  2510. nfsd4_encode_operation(resp, op);
  2511. if (slot->sl_flags & NFSD4_SLOT_CACHED)
  2512. return op->status;
  2513. if (args->opcnt == 1) {
  2514. /*
  2515. * The original operation wasn't a solo sequence--we
  2516. * always cache those--so this retry must not match the
  2517. * original:
  2518. */
  2519. op->status = nfserr_seq_false_retry;
  2520. } else {
  2521. op = &args->ops[resp->opcnt++];
  2522. op->status = nfserr_retry_uncached_rep;
  2523. nfsd4_encode_operation(resp, op);
  2524. }
  2525. return op->status;
  2526. }
  2527. /*
  2528. * The sequence operation is not cached because we can use the slot and
  2529. * session values.
  2530. */
  2531. static __be32
  2532. nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
  2533. struct nfsd4_sequence *seq)
  2534. {
  2535. struct nfsd4_slot *slot = resp->cstate.slot;
  2536. struct xdr_stream *xdr = &resp->xdr;
  2537. __be32 *p;
  2538. __be32 status;
  2539. dprintk("--> %s slot %p\n", __func__, slot);
  2540. status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
  2541. if (status)
  2542. return status;
  2543. p = xdr_reserve_space(xdr, slot->sl_datalen);
  2544. if (!p) {
  2545. WARN_ON_ONCE(1);
  2546. return nfserr_serverfault;
  2547. }
  2548. xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
  2549. xdr_commit_encode(xdr);
  2550. resp->opcnt = slot->sl_opcnt;
  2551. return slot->sl_status;
  2552. }
  2553. /*
  2554. * Set the exchange_id flags returned by the server.
  2555. */
  2556. static void
  2557. nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
  2558. {
  2559. #ifdef CONFIG_NFSD_PNFS
  2560. new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
  2561. #else
  2562. new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
  2563. #endif
  2564. /* Referrals are supported, Migration is not. */
  2565. new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
  2566. /* set the wire flags to return to client. */
  2567. clid->flags = new->cl_exchange_flags;
  2568. }
  2569. static bool client_has_openowners(struct nfs4_client *clp)
  2570. {
  2571. struct nfs4_openowner *oo;
  2572. list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
  2573. if (!list_empty(&oo->oo_owner.so_stateids))
  2574. return true;
  2575. }
  2576. return false;
  2577. }
  2578. static bool client_has_state(struct nfs4_client *clp)
  2579. {
  2580. return client_has_openowners(clp)
  2581. #ifdef CONFIG_NFSD_PNFS
  2582. || !list_empty(&clp->cl_lo_states)
  2583. #endif
  2584. || !list_empty(&clp->cl_delegations)
  2585. || !list_empty(&clp->cl_sessions)
  2586. || !list_empty(&clp->async_copies);
  2587. }
  2588. static __be32 copy_impl_id(struct nfs4_client *clp,
  2589. struct nfsd4_exchange_id *exid)
  2590. {
  2591. if (!exid->nii_domain.data)
  2592. return 0;
  2593. xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
  2594. if (!clp->cl_nii_domain.data)
  2595. return nfserr_jukebox;
  2596. xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
  2597. if (!clp->cl_nii_name.data)
  2598. return nfserr_jukebox;
  2599. clp->cl_nii_time = exid->nii_time;
  2600. return 0;
  2601. }
  2602. __be32
  2603. nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  2604. union nfsd4_op_u *u)
  2605. {
  2606. struct nfsd4_exchange_id *exid = &u->exchange_id;
  2607. struct nfs4_client *conf, *new;
  2608. struct nfs4_client *unconf = NULL;
  2609. __be32 status;
  2610. char addr_str[INET6_ADDRSTRLEN];
  2611. nfs4_verifier verf = exid->verifier;
  2612. struct sockaddr *sa = svc_addr(rqstp);
  2613. bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
  2614. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2615. rpc_ntop(sa, addr_str, sizeof(addr_str));
  2616. dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
  2617. "ip_addr=%s flags %x, spa_how %d\n",
  2618. __func__, rqstp, exid, exid->clname.len, exid->clname.data,
  2619. addr_str, exid->flags, exid->spa_how);
  2620. if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
  2621. return nfserr_inval;
  2622. new = create_client(exid->clname, rqstp, &verf);
  2623. if (new == NULL)
  2624. return nfserr_jukebox;
  2625. status = copy_impl_id(new, exid);
  2626. if (status)
  2627. goto out_nolock;
  2628. switch (exid->spa_how) {
  2629. case SP4_MACH_CRED:
  2630. exid->spo_must_enforce[0] = 0;
  2631. exid->spo_must_enforce[1] = (
  2632. 1 << (OP_BIND_CONN_TO_SESSION - 32) |
  2633. 1 << (OP_EXCHANGE_ID - 32) |
  2634. 1 << (OP_CREATE_SESSION - 32) |
  2635. 1 << (OP_DESTROY_SESSION - 32) |
  2636. 1 << (OP_DESTROY_CLIENTID - 32));
  2637. exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
  2638. 1 << (OP_OPEN_DOWNGRADE) |
  2639. 1 << (OP_LOCKU) |
  2640. 1 << (OP_DELEGRETURN));
  2641. exid->spo_must_allow[1] &= (
  2642. 1 << (OP_TEST_STATEID - 32) |
  2643. 1 << (OP_FREE_STATEID - 32));
  2644. if (!svc_rqst_integrity_protected(rqstp)) {
  2645. status = nfserr_inval;
  2646. goto out_nolock;
  2647. }
  2648. /*
  2649. * Sometimes userspace doesn't give us a principal.
  2650. * Which is a bug, really. Anyway, we can't enforce
  2651. * MACH_CRED in that case, better to give up now:
  2652. */
  2653. if (!new->cl_cred.cr_principal &&
  2654. !new->cl_cred.cr_raw_principal) {
  2655. status = nfserr_serverfault;
  2656. goto out_nolock;
  2657. }
  2658. new->cl_mach_cred = true;
  2659. case SP4_NONE:
  2660. break;
  2661. default: /* checked by xdr code */
  2662. WARN_ON_ONCE(1);
  2663. fallthrough;
  2664. case SP4_SSV:
  2665. status = nfserr_encr_alg_unsupp;
  2666. goto out_nolock;
  2667. }
  2668. /* Cases below refer to rfc 5661 section 18.35.4: */
  2669. spin_lock(&nn->client_lock);
  2670. conf = find_confirmed_client_by_name(&exid->clname, nn);
  2671. if (conf) {
  2672. bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
  2673. bool verfs_match = same_verf(&verf, &conf->cl_verifier);
  2674. if (update) {
  2675. if (!clp_used_exchangeid(conf)) { /* buggy client */
  2676. status = nfserr_inval;
  2677. goto out;
  2678. }
  2679. if (!nfsd4_mach_creds_match(conf, rqstp)) {
  2680. status = nfserr_wrong_cred;
  2681. goto out;
  2682. }
  2683. if (!creds_match) { /* case 9 */
  2684. status = nfserr_perm;
  2685. goto out;
  2686. }
  2687. if (!verfs_match) { /* case 8 */
  2688. status = nfserr_not_same;
  2689. goto out;
  2690. }
  2691. /* case 6 */
  2692. exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
  2693. goto out_copy;
  2694. }
  2695. if (!creds_match) { /* case 3 */
  2696. if (client_has_state(conf)) {
  2697. status = nfserr_clid_inuse;
  2698. goto out;
  2699. }
  2700. goto out_new;
  2701. }
  2702. if (verfs_match) { /* case 2 */
  2703. conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
  2704. goto out_copy;
  2705. }
  2706. /* case 5, client reboot */
  2707. conf = NULL;
  2708. goto out_new;
  2709. }
  2710. if (update) { /* case 7 */
  2711. status = nfserr_noent;
  2712. goto out;
  2713. }
  2714. unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
  2715. if (unconf) /* case 4, possible retry or client restart */
  2716. unhash_client_locked(unconf);
  2717. /* case 1 (normal case) */
  2718. out_new:
  2719. if (conf) {
  2720. status = mark_client_expired_locked(conf);
  2721. if (status)
  2722. goto out;
  2723. }
  2724. new->cl_minorversion = cstate->minorversion;
  2725. new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
  2726. new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
  2727. add_to_unconfirmed(new);
  2728. swap(new, conf);
  2729. out_copy:
  2730. exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
  2731. exid->clientid.cl_id = conf->cl_clientid.cl_id;
  2732. exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
  2733. nfsd4_set_ex_flags(conf, exid);
  2734. dprintk("nfsd4_exchange_id seqid %d flags %x\n",
  2735. conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
  2736. status = nfs_ok;
  2737. out:
  2738. spin_unlock(&nn->client_lock);
  2739. out_nolock:
  2740. if (new)
  2741. expire_client(new);
  2742. if (unconf)
  2743. expire_client(unconf);
  2744. return status;
  2745. }
  2746. static __be32
  2747. check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
  2748. {
  2749. dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
  2750. slot_seqid);
  2751. /* The slot is in use, and no response has been sent. */
  2752. if (slot_inuse) {
  2753. if (seqid == slot_seqid)
  2754. return nfserr_jukebox;
  2755. else
  2756. return nfserr_seq_misordered;
  2757. }
  2758. /* Note unsigned 32-bit arithmetic handles wraparound: */
  2759. if (likely(seqid == slot_seqid + 1))
  2760. return nfs_ok;
  2761. if (seqid == slot_seqid)
  2762. return nfserr_replay_cache;
  2763. return nfserr_seq_misordered;
  2764. }
  2765. /*
  2766. * Cache the create session result into the create session single DRC
  2767. * slot cache by saving the xdr structure. sl_seqid has been set.
  2768. * Do this for solo or embedded create session operations.
  2769. */
  2770. static void
  2771. nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
  2772. struct nfsd4_clid_slot *slot, __be32 nfserr)
  2773. {
  2774. slot->sl_status = nfserr;
  2775. memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
  2776. }
  2777. static __be32
  2778. nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
  2779. struct nfsd4_clid_slot *slot)
  2780. {
  2781. memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
  2782. return slot->sl_status;
  2783. }
  2784. #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
  2785. 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
  2786. 1 + /* MIN tag is length with zero, only length */ \
  2787. 3 + /* version, opcount, opcode */ \
  2788. XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
  2789. /* seqid, slotID, slotID, cache */ \
  2790. 4 ) * sizeof(__be32))
  2791. #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
  2792. 2 + /* verifier: AUTH_NULL, length 0 */\
  2793. 1 + /* status */ \
  2794. 1 + /* MIN tag is length with zero, only length */ \
  2795. 3 + /* opcount, opcode, opstatus*/ \
  2796. XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
  2797. /* seqid, slotID, slotID, slotID, status */ \
  2798. 5 ) * sizeof(__be32))
  2799. static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
  2800. {
  2801. u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
  2802. if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
  2803. return nfserr_toosmall;
  2804. if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
  2805. return nfserr_toosmall;
  2806. ca->headerpadsz = 0;
  2807. ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
  2808. ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
  2809. ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
  2810. ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
  2811. NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
  2812. ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
  2813. /*
  2814. * Note decreasing slot size below client's request may make it
  2815. * difficult for client to function correctly, whereas
  2816. * decreasing the number of slots will (just?) affect
  2817. * performance. When short on memory we therefore prefer to
  2818. * decrease number of slots instead of their size. Clients that
  2819. * request larger slots than they need will get poor results:
  2820. * Note that we always allow at least one slot, because our
  2821. * accounting is soft and provides no guarantees either way.
  2822. */
  2823. ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
  2824. return nfs_ok;
  2825. }
  2826. /*
  2827. * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
  2828. * These are based on similar macros in linux/sunrpc/msg_prot.h .
  2829. */
  2830. #define RPC_MAX_HEADER_WITH_AUTH_SYS \
  2831. (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
  2832. #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
  2833. (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
  2834. #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
  2835. RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
  2836. #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
  2837. RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
  2838. sizeof(__be32))
  2839. static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
  2840. {
  2841. ca->headerpadsz = 0;
  2842. if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
  2843. return nfserr_toosmall;
  2844. if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
  2845. return nfserr_toosmall;
  2846. ca->maxresp_cached = 0;
  2847. if (ca->maxops < 2)
  2848. return nfserr_toosmall;
  2849. return nfs_ok;
  2850. }
  2851. static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
  2852. {
  2853. switch (cbs->flavor) {
  2854. case RPC_AUTH_NULL:
  2855. case RPC_AUTH_UNIX:
  2856. return nfs_ok;
  2857. default:
  2858. /*
  2859. * GSS case: the spec doesn't allow us to return this
  2860. * error. But it also doesn't allow us not to support
  2861. * GSS.
  2862. * I'd rather this fail hard than return some error the
  2863. * client might think it can already handle:
  2864. */
  2865. return nfserr_encr_alg_unsupp;
  2866. }
  2867. }
  2868. __be32
  2869. nfsd4_create_session(struct svc_rqst *rqstp,
  2870. struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
  2871. {
  2872. struct nfsd4_create_session *cr_ses = &u->create_session;
  2873. struct sockaddr *sa = svc_addr(rqstp);
  2874. struct nfs4_client *conf, *unconf;
  2875. struct nfs4_client *old = NULL;
  2876. struct nfsd4_session *new;
  2877. struct nfsd4_conn *conn;
  2878. struct nfsd4_clid_slot *cs_slot = NULL;
  2879. __be32 status = 0;
  2880. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2881. if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
  2882. return nfserr_inval;
  2883. status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
  2884. if (status)
  2885. return status;
  2886. status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
  2887. if (status)
  2888. return status;
  2889. status = check_backchannel_attrs(&cr_ses->back_channel);
  2890. if (status)
  2891. goto out_release_drc_mem;
  2892. status = nfserr_jukebox;
  2893. new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
  2894. if (!new)
  2895. goto out_release_drc_mem;
  2896. conn = alloc_conn_from_crses(rqstp, cr_ses);
  2897. if (!conn)
  2898. goto out_free_session;
  2899. spin_lock(&nn->client_lock);
  2900. unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
  2901. conf = find_confirmed_client(&cr_ses->clientid, true, nn);
  2902. WARN_ON_ONCE(conf && unconf);
  2903. if (conf) {
  2904. status = nfserr_wrong_cred;
  2905. if (!nfsd4_mach_creds_match(conf, rqstp))
  2906. goto out_free_conn;
  2907. cs_slot = &conf->cl_cs_slot;
  2908. status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
  2909. if (status) {
  2910. if (status == nfserr_replay_cache)
  2911. status = nfsd4_replay_create_session(cr_ses, cs_slot);
  2912. goto out_free_conn;
  2913. }
  2914. } else if (unconf) {
  2915. if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
  2916. !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
  2917. status = nfserr_clid_inuse;
  2918. goto out_free_conn;
  2919. }
  2920. status = nfserr_wrong_cred;
  2921. if (!nfsd4_mach_creds_match(unconf, rqstp))
  2922. goto out_free_conn;
  2923. cs_slot = &unconf->cl_cs_slot;
  2924. status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
  2925. if (status) {
  2926. /* an unconfirmed replay returns misordered */
  2927. status = nfserr_seq_misordered;
  2928. goto out_free_conn;
  2929. }
  2930. old = find_confirmed_client_by_name(&unconf->cl_name, nn);
  2931. if (old) {
  2932. status = mark_client_expired_locked(old);
  2933. if (status) {
  2934. old = NULL;
  2935. goto out_free_conn;
  2936. }
  2937. }
  2938. move_to_confirmed(unconf);
  2939. conf = unconf;
  2940. } else {
  2941. status = nfserr_stale_clientid;
  2942. goto out_free_conn;
  2943. }
  2944. status = nfs_ok;
  2945. /* Persistent sessions are not supported */
  2946. cr_ses->flags &= ~SESSION4_PERSIST;
  2947. /* Upshifting from TCP to RDMA is not supported */
  2948. cr_ses->flags &= ~SESSION4_RDMA;
  2949. init_session(rqstp, new, conf, cr_ses);
  2950. nfsd4_get_session_locked(new);
  2951. memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
  2952. NFS4_MAX_SESSIONID_LEN);
  2953. cs_slot->sl_seqid++;
  2954. cr_ses->seqid = cs_slot->sl_seqid;
  2955. /* cache solo and embedded create sessions under the client_lock */
  2956. nfsd4_cache_create_session(cr_ses, cs_slot, status);
  2957. spin_unlock(&nn->client_lock);
  2958. /* init connection and backchannel */
  2959. nfsd4_init_conn(rqstp, conn, new);
  2960. nfsd4_put_session(new);
  2961. if (old)
  2962. expire_client(old);
  2963. return status;
  2964. out_free_conn:
  2965. spin_unlock(&nn->client_lock);
  2966. free_conn(conn);
  2967. if (old)
  2968. expire_client(old);
  2969. out_free_session:
  2970. __free_session(new);
  2971. out_release_drc_mem:
  2972. nfsd4_put_drc_mem(&cr_ses->fore_channel);
  2973. return status;
  2974. }
  2975. static __be32 nfsd4_map_bcts_dir(u32 *dir)
  2976. {
  2977. switch (*dir) {
  2978. case NFS4_CDFC4_FORE:
  2979. case NFS4_CDFC4_BACK:
  2980. return nfs_ok;
  2981. case NFS4_CDFC4_FORE_OR_BOTH:
  2982. case NFS4_CDFC4_BACK_OR_BOTH:
  2983. *dir = NFS4_CDFC4_BOTH;
  2984. return nfs_ok;
  2985. }
  2986. return nfserr_inval;
  2987. }
  2988. __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
  2989. struct nfsd4_compound_state *cstate,
  2990. union nfsd4_op_u *u)
  2991. {
  2992. struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
  2993. struct nfsd4_session *session = cstate->session;
  2994. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2995. __be32 status;
  2996. status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
  2997. if (status)
  2998. return status;
  2999. spin_lock(&nn->client_lock);
  3000. session->se_cb_prog = bc->bc_cb_program;
  3001. session->se_cb_sec = bc->bc_cb_sec;
  3002. spin_unlock(&nn->client_lock);
  3003. nfsd4_probe_callback(session->se_client);
  3004. return nfs_ok;
  3005. }
  3006. static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
  3007. {
  3008. struct nfsd4_conn *c;
  3009. list_for_each_entry(c, &s->se_conns, cn_persession) {
  3010. if (c->cn_xprt == xpt) {
  3011. return c;
  3012. }
  3013. }
  3014. return NULL;
  3015. }
  3016. static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
  3017. struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
  3018. {
  3019. struct nfs4_client *clp = session->se_client;
  3020. struct svc_xprt *xpt = rqst->rq_xprt;
  3021. struct nfsd4_conn *c;
  3022. __be32 status;
  3023. /* Following the last paragraph of RFC 5661 Section 18.34.3: */
  3024. spin_lock(&clp->cl_lock);
  3025. c = __nfsd4_find_conn(xpt, session);
  3026. if (!c)
  3027. status = nfserr_noent;
  3028. else if (req == c->cn_flags)
  3029. status = nfs_ok;
  3030. else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
  3031. c->cn_flags != NFS4_CDFC4_BACK)
  3032. status = nfs_ok;
  3033. else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
  3034. c->cn_flags != NFS4_CDFC4_FORE)
  3035. status = nfs_ok;
  3036. else
  3037. status = nfserr_inval;
  3038. spin_unlock(&clp->cl_lock);
  3039. if (status == nfs_ok && conn)
  3040. *conn = c;
  3041. return status;
  3042. }
  3043. __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
  3044. struct nfsd4_compound_state *cstate,
  3045. union nfsd4_op_u *u)
  3046. {
  3047. struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
  3048. __be32 status;
  3049. struct nfsd4_conn *conn;
  3050. struct nfsd4_session *session;
  3051. struct net *net = SVC_NET(rqstp);
  3052. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  3053. if (!nfsd4_last_compound_op(rqstp))
  3054. return nfserr_not_only_op;
  3055. spin_lock(&nn->client_lock);
  3056. session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
  3057. spin_unlock(&nn->client_lock);
  3058. if (!session)
  3059. goto out_no_session;
  3060. status = nfserr_wrong_cred;
  3061. if (!nfsd4_mach_creds_match(session->se_client, rqstp))
  3062. goto out;
  3063. status = nfsd4_match_existing_connection(rqstp, session,
  3064. bcts->dir, &conn);
  3065. if (status == nfs_ok) {
  3066. if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
  3067. bcts->dir == NFS4_CDFC4_BACK)
  3068. conn->cn_flags |= NFS4_CDFC4_BACK;
  3069. nfsd4_probe_callback(session->se_client);
  3070. goto out;
  3071. }
  3072. if (status == nfserr_inval)
  3073. goto out;
  3074. status = nfsd4_map_bcts_dir(&bcts->dir);
  3075. if (status)
  3076. goto out;
  3077. conn = alloc_conn(rqstp, bcts->dir);
  3078. status = nfserr_jukebox;
  3079. if (!conn)
  3080. goto out;
  3081. nfsd4_init_conn(rqstp, conn, session);
  3082. status = nfs_ok;
  3083. out:
  3084. nfsd4_put_session(session);
  3085. out_no_session:
  3086. return status;
  3087. }
  3088. static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
  3089. {
  3090. if (!cstate->session)
  3091. return false;
  3092. return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
  3093. }
  3094. __be32
  3095. nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
  3096. union nfsd4_op_u *u)
  3097. {
  3098. struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
  3099. struct nfsd4_session *ses;
  3100. __be32 status;
  3101. int ref_held_by_me = 0;
  3102. struct net *net = SVC_NET(r);
  3103. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  3104. status = nfserr_not_only_op;
  3105. if (nfsd4_compound_in_session(cstate, sessionid)) {
  3106. if (!nfsd4_last_compound_op(r))
  3107. goto out;
  3108. ref_held_by_me++;
  3109. }
  3110. dump_sessionid(__func__, sessionid);
  3111. spin_lock(&nn->client_lock);
  3112. ses = find_in_sessionid_hashtbl(sessionid, net, &status);
  3113. if (!ses)
  3114. goto out_client_lock;
  3115. status = nfserr_wrong_cred;
  3116. if (!nfsd4_mach_creds_match(ses->se_client, r))
  3117. goto out_put_session;
  3118. status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
  3119. if (status)
  3120. goto out_put_session;
  3121. unhash_session(ses);
  3122. spin_unlock(&nn->client_lock);
  3123. nfsd4_probe_callback_sync(ses->se_client);
  3124. spin_lock(&nn->client_lock);
  3125. status = nfs_ok;
  3126. out_put_session:
  3127. nfsd4_put_session_locked(ses);
  3128. out_client_lock:
  3129. spin_unlock(&nn->client_lock);
  3130. out:
  3131. return status;
  3132. }
  3133. static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
  3134. {
  3135. struct nfs4_client *clp = ses->se_client;
  3136. struct nfsd4_conn *c;
  3137. __be32 status = nfs_ok;
  3138. int ret;
  3139. spin_lock(&clp->cl_lock);
  3140. c = __nfsd4_find_conn(new->cn_xprt, ses);
  3141. if (c)
  3142. goto out_free;
  3143. status = nfserr_conn_not_bound_to_session;
  3144. if (clp->cl_mach_cred)
  3145. goto out_free;
  3146. __nfsd4_hash_conn(new, ses);
  3147. spin_unlock(&clp->cl_lock);
  3148. ret = nfsd4_register_conn(new);
  3149. if (ret)
  3150. /* oops; xprt is already down: */
  3151. nfsd4_conn_lost(&new->cn_xpt_user);
  3152. return nfs_ok;
  3153. out_free:
  3154. spin_unlock(&clp->cl_lock);
  3155. free_conn(new);
  3156. return status;
  3157. }
  3158. static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
  3159. {
  3160. struct nfsd4_compoundargs *args = rqstp->rq_argp;
  3161. return args->opcnt > session->se_fchannel.maxops;
  3162. }
  3163. static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
  3164. struct nfsd4_session *session)
  3165. {
  3166. struct xdr_buf *xb = &rqstp->rq_arg;
  3167. return xb->len > session->se_fchannel.maxreq_sz;
  3168. }
  3169. static bool replay_matches_cache(struct svc_rqst *rqstp,
  3170. struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
  3171. {
  3172. struct nfsd4_compoundargs *argp = rqstp->rq_argp;
  3173. if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
  3174. (bool)seq->cachethis)
  3175. return false;
  3176. /*
  3177. * If there's an error then the reply can have fewer ops than
  3178. * the call.
  3179. */
  3180. if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
  3181. return false;
  3182. /*
  3183. * But if we cached a reply with *more* ops than the call you're
  3184. * sending us now, then this new call is clearly not really a
  3185. * replay of the old one:
  3186. */
  3187. if (slot->sl_opcnt > argp->opcnt)
  3188. return false;
  3189. /* This is the only check explicitly called by spec: */
  3190. if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
  3191. return false;
  3192. /*
  3193. * There may be more comparisons we could actually do, but the
  3194. * spec doesn't require us to catch every case where the calls
  3195. * don't match (that would require caching the call as well as
  3196. * the reply), so we don't bother.
  3197. */
  3198. return true;
  3199. }
  3200. __be32
  3201. nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  3202. union nfsd4_op_u *u)
  3203. {
  3204. struct nfsd4_sequence *seq = &u->sequence;
  3205. struct nfsd4_compoundres *resp = rqstp->rq_resp;
  3206. struct xdr_stream *xdr = &resp->xdr;
  3207. struct nfsd4_session *session;
  3208. struct nfs4_client *clp;
  3209. struct nfsd4_slot *slot;
  3210. struct nfsd4_conn *conn;
  3211. __be32 status;
  3212. int buflen;
  3213. struct net *net = SVC_NET(rqstp);
  3214. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  3215. if (resp->opcnt != 1)
  3216. return nfserr_sequence_pos;
  3217. /*
  3218. * Will be either used or freed by nfsd4_sequence_check_conn
  3219. * below.
  3220. */
  3221. conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
  3222. if (!conn)
  3223. return nfserr_jukebox;
  3224. spin_lock(&nn->client_lock);
  3225. session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
  3226. if (!session)
  3227. goto out_no_session;
  3228. clp = session->se_client;
  3229. status = nfserr_too_many_ops;
  3230. if (nfsd4_session_too_many_ops(rqstp, session))
  3231. goto out_put_session;
  3232. status = nfserr_req_too_big;
  3233. if (nfsd4_request_too_big(rqstp, session))
  3234. goto out_put_session;
  3235. status = nfserr_badslot;
  3236. if (seq->slotid >= session->se_fchannel.maxreqs)
  3237. goto out_put_session;
  3238. slot = session->se_slots[seq->slotid];
  3239. dprintk("%s: slotid %d\n", __func__, seq->slotid);
  3240. /* We do not negotiate the number of slots yet, so set the
  3241. * maxslots to the session maxreqs which is used to encode
  3242. * sr_highest_slotid and the sr_target_slot id to maxslots */
  3243. seq->maxslots = session->se_fchannel.maxreqs;
  3244. status = check_slot_seqid(seq->seqid, slot->sl_seqid,
  3245. slot->sl_flags & NFSD4_SLOT_INUSE);
  3246. if (status == nfserr_replay_cache) {
  3247. status = nfserr_seq_misordered;
  3248. if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
  3249. goto out_put_session;
  3250. status = nfserr_seq_false_retry;
  3251. if (!replay_matches_cache(rqstp, seq, slot))
  3252. goto out_put_session;
  3253. cstate->slot = slot;
  3254. cstate->session = session;
  3255. cstate->clp = clp;
  3256. /* Return the cached reply status and set cstate->status
  3257. * for nfsd4_proc_compound processing */
  3258. status = nfsd4_replay_cache_entry(resp, seq);
  3259. cstate->status = nfserr_replay_cache;
  3260. goto out;
  3261. }
  3262. if (status)
  3263. goto out_put_session;
  3264. status = nfsd4_sequence_check_conn(conn, session);
  3265. conn = NULL;
  3266. if (status)
  3267. goto out_put_session;
  3268. buflen = (seq->cachethis) ?
  3269. session->se_fchannel.maxresp_cached :
  3270. session->se_fchannel.maxresp_sz;
  3271. status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
  3272. nfserr_rep_too_big;
  3273. if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
  3274. goto out_put_session;
  3275. svc_reserve(rqstp, buflen);
  3276. status = nfs_ok;
  3277. /* Success! bump slot seqid */
  3278. slot->sl_seqid = seq->seqid;
  3279. slot->sl_flags |= NFSD4_SLOT_INUSE;
  3280. if (seq->cachethis)
  3281. slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
  3282. else
  3283. slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
  3284. cstate->slot = slot;
  3285. cstate->session = session;
  3286. cstate->clp = clp;
  3287. out:
  3288. switch (clp->cl_cb_state) {
  3289. case NFSD4_CB_DOWN:
  3290. seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
  3291. break;
  3292. case NFSD4_CB_FAULT:
  3293. seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
  3294. break;
  3295. default:
  3296. seq->status_flags = 0;
  3297. }
  3298. if (!list_empty(&clp->cl_revoked))
  3299. seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
  3300. out_no_session:
  3301. if (conn)
  3302. free_conn(conn);
  3303. spin_unlock(&nn->client_lock);
  3304. return status;
  3305. out_put_session:
  3306. nfsd4_put_session_locked(session);
  3307. goto out_no_session;
  3308. }
  3309. void
  3310. nfsd4_sequence_done(struct nfsd4_compoundres *resp)
  3311. {
  3312. struct nfsd4_compound_state *cs = &resp->cstate;
  3313. if (nfsd4_has_session(cs)) {
  3314. if (cs->status != nfserr_replay_cache) {
  3315. nfsd4_store_cache_entry(resp);
  3316. cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
  3317. }
  3318. /* Drop session reference that was taken in nfsd4_sequence() */
  3319. nfsd4_put_session(cs->session);
  3320. } else if (cs->clp)
  3321. put_client_renew(cs->clp);
  3322. }
  3323. __be32
  3324. nfsd4_destroy_clientid(struct svc_rqst *rqstp,
  3325. struct nfsd4_compound_state *cstate,
  3326. union nfsd4_op_u *u)
  3327. {
  3328. struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
  3329. struct nfs4_client *conf, *unconf;
  3330. struct nfs4_client *clp = NULL;
  3331. __be32 status = 0;
  3332. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  3333. spin_lock(&nn->client_lock);
  3334. unconf = find_unconfirmed_client(&dc->clientid, true, nn);
  3335. conf = find_confirmed_client(&dc->clientid, true, nn);
  3336. WARN_ON_ONCE(conf && unconf);
  3337. if (conf) {
  3338. if (client_has_state(conf)) {
  3339. status = nfserr_clientid_busy;
  3340. goto out;
  3341. }
  3342. status = mark_client_expired_locked(conf);
  3343. if (status)
  3344. goto out;
  3345. clp = conf;
  3346. } else if (unconf)
  3347. clp = unconf;
  3348. else {
  3349. status = nfserr_stale_clientid;
  3350. goto out;
  3351. }
  3352. if (!nfsd4_mach_creds_match(clp, rqstp)) {
  3353. clp = NULL;
  3354. status = nfserr_wrong_cred;
  3355. goto out;
  3356. }
  3357. unhash_client_locked(clp);
  3358. out:
  3359. spin_unlock(&nn->client_lock);
  3360. if (clp)
  3361. expire_client(clp);
  3362. return status;
  3363. }
  3364. __be32
  3365. nfsd4_reclaim_complete(struct svc_rqst *rqstp,
  3366. struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
  3367. {
  3368. struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
  3369. __be32 status = 0;
  3370. if (rc->rca_one_fs) {
  3371. if (!cstate->current_fh.fh_dentry)
  3372. return nfserr_nofilehandle;
  3373. /*
  3374. * We don't take advantage of the rca_one_fs case.
  3375. * That's OK, it's optional, we can safely ignore it.
  3376. */
  3377. return nfs_ok;
  3378. }
  3379. status = nfserr_complete_already;
  3380. if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
  3381. &cstate->session->se_client->cl_flags))
  3382. goto out;
  3383. status = nfserr_stale_clientid;
  3384. if (is_client_expired(cstate->session->se_client))
  3385. /*
  3386. * The following error isn't really legal.
  3387. * But we only get here if the client just explicitly
  3388. * destroyed the client. Surely it no longer cares what
  3389. * error it gets back on an operation for the dead
  3390. * client.
  3391. */
  3392. goto out;
  3393. status = nfs_ok;
  3394. nfsd4_client_record_create(cstate->session->se_client);
  3395. inc_reclaim_complete(cstate->session->se_client);
  3396. out:
  3397. return status;
  3398. }
  3399. __be32
  3400. nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  3401. union nfsd4_op_u *u)
  3402. {
  3403. struct nfsd4_setclientid *setclid = &u->setclientid;
  3404. struct xdr_netobj clname = setclid->se_name;
  3405. nfs4_verifier clverifier = setclid->se_verf;
  3406. struct nfs4_client *conf, *new;
  3407. struct nfs4_client *unconf = NULL;
  3408. __be32 status;
  3409. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  3410. new = create_client(clname, rqstp, &clverifier);
  3411. if (new == NULL)
  3412. return nfserr_jukebox;
  3413. /* Cases below refer to rfc 3530 section 14.2.33: */
  3414. spin_lock(&nn->client_lock);
  3415. conf = find_confirmed_client_by_name(&clname, nn);
  3416. if (conf && client_has_state(conf)) {
  3417. /* case 0: */
  3418. status = nfserr_clid_inuse;
  3419. if (clp_used_exchangeid(conf))
  3420. goto out;
  3421. if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
  3422. trace_nfsd_clid_inuse_err(conf);
  3423. goto out;
  3424. }
  3425. }
  3426. unconf = find_unconfirmed_client_by_name(&clname, nn);
  3427. if (unconf)
  3428. unhash_client_locked(unconf);
  3429. /* We need to handle only case 1: probable callback update */
  3430. if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
  3431. copy_clid(new, conf);
  3432. gen_confirm(new, nn);
  3433. }
  3434. new->cl_minorversion = 0;
  3435. gen_callback(new, setclid, rqstp);
  3436. add_to_unconfirmed(new);
  3437. setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
  3438. setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
  3439. memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
  3440. new = NULL;
  3441. status = nfs_ok;
  3442. out:
  3443. spin_unlock(&nn->client_lock);
  3444. if (new)
  3445. free_client(new);
  3446. if (unconf)
  3447. expire_client(unconf);
  3448. return status;
  3449. }
  3450. __be32
  3451. nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
  3452. struct nfsd4_compound_state *cstate,
  3453. union nfsd4_op_u *u)
  3454. {
  3455. struct nfsd4_setclientid_confirm *setclientid_confirm =
  3456. &u->setclientid_confirm;
  3457. struct nfs4_client *conf, *unconf;
  3458. struct nfs4_client *old = NULL;
  3459. nfs4_verifier confirm = setclientid_confirm->sc_confirm;
  3460. clientid_t * clid = &setclientid_confirm->sc_clientid;
  3461. __be32 status;
  3462. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  3463. if (STALE_CLIENTID(clid, nn))
  3464. return nfserr_stale_clientid;
  3465. spin_lock(&nn->client_lock);
  3466. conf = find_confirmed_client(clid, false, nn);
  3467. unconf = find_unconfirmed_client(clid, false, nn);
  3468. /*
  3469. * We try hard to give out unique clientid's, so if we get an
  3470. * attempt to confirm the same clientid with a different cred,
  3471. * the client may be buggy; this should never happen.
  3472. *
  3473. * Nevertheless, RFC 7530 recommends INUSE for this case:
  3474. */
  3475. status = nfserr_clid_inuse;
  3476. if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
  3477. goto out;
  3478. if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
  3479. goto out;
  3480. /* cases below refer to rfc 3530 section 14.2.34: */
  3481. if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
  3482. if (conf && same_verf(&confirm, &conf->cl_confirm)) {
  3483. /* case 2: probable retransmit */
  3484. status = nfs_ok;
  3485. } else /* case 4: client hasn't noticed we rebooted yet? */
  3486. status = nfserr_stale_clientid;
  3487. goto out;
  3488. }
  3489. status = nfs_ok;
  3490. if (conf) { /* case 1: callback update */
  3491. old = unconf;
  3492. unhash_client_locked(old);
  3493. nfsd4_change_callback(conf, &unconf->cl_cb_conn);
  3494. } else { /* case 3: normal case; new or rebooted client */
  3495. old = find_confirmed_client_by_name(&unconf->cl_name, nn);
  3496. if (old) {
  3497. status = nfserr_clid_inuse;
  3498. if (client_has_state(old)
  3499. && !same_creds(&unconf->cl_cred,
  3500. &old->cl_cred)) {
  3501. old = NULL;
  3502. goto out;
  3503. }
  3504. status = mark_client_expired_locked(old);
  3505. if (status) {
  3506. old = NULL;
  3507. goto out;
  3508. }
  3509. }
  3510. move_to_confirmed(unconf);
  3511. conf = unconf;
  3512. }
  3513. get_client_locked(conf);
  3514. spin_unlock(&nn->client_lock);
  3515. nfsd4_probe_callback(conf);
  3516. spin_lock(&nn->client_lock);
  3517. put_client_renew_locked(conf);
  3518. out:
  3519. spin_unlock(&nn->client_lock);
  3520. if (old)
  3521. expire_client(old);
  3522. return status;
  3523. }
  3524. static struct nfs4_file *nfsd4_alloc_file(void)
  3525. {
  3526. return kmem_cache_alloc(file_slab, GFP_KERNEL);
  3527. }
  3528. /* OPEN Share state helper functions */
  3529. static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
  3530. struct nfs4_file *fp)
  3531. {
  3532. lockdep_assert_held(&state_lock);
  3533. refcount_set(&fp->fi_ref, 1);
  3534. spin_lock_init(&fp->fi_lock);
  3535. INIT_LIST_HEAD(&fp->fi_stateids);
  3536. INIT_LIST_HEAD(&fp->fi_delegations);
  3537. INIT_LIST_HEAD(&fp->fi_clnt_odstate);
  3538. fh_copy_shallow(&fp->fi_fhandle, fh);
  3539. fp->fi_deleg_file = NULL;
  3540. fp->fi_had_conflict = false;
  3541. fp->fi_share_deny = 0;
  3542. memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
  3543. memset(fp->fi_access, 0, sizeof(fp->fi_access));
  3544. #ifdef CONFIG_NFSD_PNFS
  3545. INIT_LIST_HEAD(&fp->fi_lo_states);
  3546. atomic_set(&fp->fi_lo_recalls, 0);
  3547. #endif
  3548. hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
  3549. }
  3550. void
  3551. nfsd4_free_slabs(void)
  3552. {
  3553. kmem_cache_destroy(client_slab);
  3554. kmem_cache_destroy(openowner_slab);
  3555. kmem_cache_destroy(lockowner_slab);
  3556. kmem_cache_destroy(file_slab);
  3557. kmem_cache_destroy(stateid_slab);
  3558. kmem_cache_destroy(deleg_slab);
  3559. kmem_cache_destroy(odstate_slab);
  3560. }
  3561. int
  3562. nfsd4_init_slabs(void)
  3563. {
  3564. client_slab = kmem_cache_create("nfsd4_clients",
  3565. sizeof(struct nfs4_client), 0, 0, NULL);
  3566. if (client_slab == NULL)
  3567. goto out;
  3568. openowner_slab = kmem_cache_create("nfsd4_openowners",
  3569. sizeof(struct nfs4_openowner), 0, 0, NULL);
  3570. if (openowner_slab == NULL)
  3571. goto out_free_client_slab;
  3572. lockowner_slab = kmem_cache_create("nfsd4_lockowners",
  3573. sizeof(struct nfs4_lockowner), 0, 0, NULL);
  3574. if (lockowner_slab == NULL)
  3575. goto out_free_openowner_slab;
  3576. file_slab = kmem_cache_create("nfsd4_files",
  3577. sizeof(struct nfs4_file), 0, 0, NULL);
  3578. if (file_slab == NULL)
  3579. goto out_free_lockowner_slab;
  3580. stateid_slab = kmem_cache_create("nfsd4_stateids",
  3581. sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
  3582. if (stateid_slab == NULL)
  3583. goto out_free_file_slab;
  3584. deleg_slab = kmem_cache_create("nfsd4_delegations",
  3585. sizeof(struct nfs4_delegation), 0, 0, NULL);
  3586. if (deleg_slab == NULL)
  3587. goto out_free_stateid_slab;
  3588. odstate_slab = kmem_cache_create("nfsd4_odstate",
  3589. sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
  3590. if (odstate_slab == NULL)
  3591. goto out_free_deleg_slab;
  3592. return 0;
  3593. out_free_deleg_slab:
  3594. kmem_cache_destroy(deleg_slab);
  3595. out_free_stateid_slab:
  3596. kmem_cache_destroy(stateid_slab);
  3597. out_free_file_slab:
  3598. kmem_cache_destroy(file_slab);
  3599. out_free_lockowner_slab:
  3600. kmem_cache_destroy(lockowner_slab);
  3601. out_free_openowner_slab:
  3602. kmem_cache_destroy(openowner_slab);
  3603. out_free_client_slab:
  3604. kmem_cache_destroy(client_slab);
  3605. out:
  3606. return -ENOMEM;
  3607. }
  3608. static void init_nfs4_replay(struct nfs4_replay *rp)
  3609. {
  3610. rp->rp_status = nfserr_serverfault;
  3611. rp->rp_buflen = 0;
  3612. rp->rp_buf = rp->rp_ibuf;
  3613. mutex_init(&rp->rp_mutex);
  3614. }
  3615. static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
  3616. struct nfs4_stateowner *so)
  3617. {
  3618. if (!nfsd4_has_session(cstate)) {
  3619. mutex_lock(&so->so_replay.rp_mutex);
  3620. cstate->replay_owner = nfs4_get_stateowner(so);
  3621. }
  3622. }
  3623. void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
  3624. {
  3625. struct nfs4_stateowner *so = cstate->replay_owner;
  3626. if (so != NULL) {
  3627. cstate->replay_owner = NULL;
  3628. mutex_unlock(&so->so_replay.rp_mutex);
  3629. nfs4_put_stateowner(so);
  3630. }
  3631. }
  3632. static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
  3633. {
  3634. struct nfs4_stateowner *sop;
  3635. sop = kmem_cache_alloc(slab, GFP_KERNEL);
  3636. if (!sop)
  3637. return NULL;
  3638. xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
  3639. if (!sop->so_owner.data) {
  3640. kmem_cache_free(slab, sop);
  3641. return NULL;
  3642. }
  3643. INIT_LIST_HEAD(&sop->so_stateids);
  3644. sop->so_client = clp;
  3645. init_nfs4_replay(&sop->so_replay);
  3646. atomic_set(&sop->so_count, 1);
  3647. return sop;
  3648. }
  3649. static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
  3650. {
  3651. lockdep_assert_held(&clp->cl_lock);
  3652. list_add(&oo->oo_owner.so_strhash,
  3653. &clp->cl_ownerstr_hashtbl[strhashval]);
  3654. list_add(&oo->oo_perclient, &clp->cl_openowners);
  3655. }
  3656. static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
  3657. {
  3658. unhash_openowner_locked(openowner(so));
  3659. }
  3660. static void nfs4_free_openowner(struct nfs4_stateowner *so)
  3661. {
  3662. struct nfs4_openowner *oo = openowner(so);
  3663. kmem_cache_free(openowner_slab, oo);
  3664. }
  3665. static const struct nfs4_stateowner_operations openowner_ops = {
  3666. .so_unhash = nfs4_unhash_openowner,
  3667. .so_free = nfs4_free_openowner,
  3668. };
  3669. static struct nfs4_ol_stateid *
  3670. nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
  3671. {
  3672. struct nfs4_ol_stateid *local, *ret = NULL;
  3673. struct nfs4_openowner *oo = open->op_openowner;
  3674. lockdep_assert_held(&fp->fi_lock);
  3675. list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
  3676. /* ignore lock owners */
  3677. if (local->st_stateowner->so_is_open_owner == 0)
  3678. continue;
  3679. if (local->st_stateowner != &oo->oo_owner)
  3680. continue;
  3681. if (local->st_stid.sc_type == NFS4_OPEN_STID) {
  3682. ret = local;
  3683. refcount_inc(&ret->st_stid.sc_count);
  3684. break;
  3685. }
  3686. }
  3687. return ret;
  3688. }
  3689. static __be32
  3690. nfsd4_verify_open_stid(struct nfs4_stid *s)
  3691. {
  3692. __be32 ret = nfs_ok;
  3693. switch (s->sc_type) {
  3694. default:
  3695. break;
  3696. case 0:
  3697. case NFS4_CLOSED_STID:
  3698. case NFS4_CLOSED_DELEG_STID:
  3699. ret = nfserr_bad_stateid;
  3700. break;
  3701. case NFS4_REVOKED_DELEG_STID:
  3702. ret = nfserr_deleg_revoked;
  3703. }
  3704. return ret;
  3705. }
  3706. /* Lock the stateid st_mutex, and deal with races with CLOSE */
  3707. static __be32
  3708. nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
  3709. {
  3710. __be32 ret;
  3711. mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
  3712. ret = nfsd4_verify_open_stid(&stp->st_stid);
  3713. if (ret != nfs_ok)
  3714. mutex_unlock(&stp->st_mutex);
  3715. return ret;
  3716. }
  3717. static struct nfs4_ol_stateid *
  3718. nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
  3719. {
  3720. struct nfs4_ol_stateid *stp;
  3721. for (;;) {
  3722. spin_lock(&fp->fi_lock);
  3723. stp = nfsd4_find_existing_open(fp, open);
  3724. spin_unlock(&fp->fi_lock);
  3725. if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
  3726. break;
  3727. nfs4_put_stid(&stp->st_stid);
  3728. }
  3729. return stp;
  3730. }
  3731. static struct nfs4_openowner *
  3732. alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
  3733. struct nfsd4_compound_state *cstate)
  3734. {
  3735. struct nfs4_client *clp = cstate->clp;
  3736. struct nfs4_openowner *oo, *ret;
  3737. oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
  3738. if (!oo)
  3739. return NULL;
  3740. oo->oo_owner.so_ops = &openowner_ops;
  3741. oo->oo_owner.so_is_open_owner = 1;
  3742. oo->oo_owner.so_seqid = open->op_seqid;
  3743. oo->oo_flags = 0;
  3744. if (nfsd4_has_session(cstate))
  3745. oo->oo_flags |= NFS4_OO_CONFIRMED;
  3746. oo->oo_time = 0;
  3747. oo->oo_last_closed_stid = NULL;
  3748. INIT_LIST_HEAD(&oo->oo_close_lru);
  3749. spin_lock(&clp->cl_lock);
  3750. ret = find_openstateowner_str_locked(strhashval, open, clp);
  3751. if (ret == NULL) {
  3752. hash_openowner(oo, clp, strhashval);
  3753. ret = oo;
  3754. } else
  3755. nfs4_free_stateowner(&oo->oo_owner);
  3756. spin_unlock(&clp->cl_lock);
  3757. return ret;
  3758. }
  3759. static struct nfs4_ol_stateid *
  3760. init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
  3761. {
  3762. struct nfs4_openowner *oo = open->op_openowner;
  3763. struct nfs4_ol_stateid *retstp = NULL;
  3764. struct nfs4_ol_stateid *stp;
  3765. stp = open->op_stp;
  3766. /* We are moving these outside of the spinlocks to avoid the warnings */
  3767. mutex_init(&stp->st_mutex);
  3768. mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
  3769. retry:
  3770. spin_lock(&oo->oo_owner.so_client->cl_lock);
  3771. spin_lock(&fp->fi_lock);
  3772. retstp = nfsd4_find_existing_open(fp, open);
  3773. if (retstp)
  3774. goto out_unlock;
  3775. open->op_stp = NULL;
  3776. refcount_inc(&stp->st_stid.sc_count);
  3777. stp->st_stid.sc_type = NFS4_OPEN_STID;
  3778. INIT_LIST_HEAD(&stp->st_locks);
  3779. stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
  3780. get_nfs4_file(fp);
  3781. stp->st_stid.sc_file = fp;
  3782. stp->st_access_bmap = 0;
  3783. stp->st_deny_bmap = 0;
  3784. stp->st_openstp = NULL;
  3785. list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
  3786. list_add(&stp->st_perfile, &fp->fi_stateids);
  3787. out_unlock:
  3788. spin_unlock(&fp->fi_lock);
  3789. spin_unlock(&oo->oo_owner.so_client->cl_lock);
  3790. if (retstp) {
  3791. /* Handle races with CLOSE */
  3792. if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
  3793. nfs4_put_stid(&retstp->st_stid);
  3794. goto retry;
  3795. }
  3796. /* To keep mutex tracking happy */
  3797. mutex_unlock(&stp->st_mutex);
  3798. stp = retstp;
  3799. }
  3800. return stp;
  3801. }
  3802. /*
  3803. * In the 4.0 case we need to keep the owners around a little while to handle
  3804. * CLOSE replay. We still do need to release any file access that is held by
  3805. * them before returning however.
  3806. */
  3807. static void
  3808. move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
  3809. {
  3810. struct nfs4_ol_stateid *last;
  3811. struct nfs4_openowner *oo = openowner(s->st_stateowner);
  3812. struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
  3813. nfsd_net_id);
  3814. dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
  3815. /*
  3816. * We know that we hold one reference via nfsd4_close, and another
  3817. * "persistent" reference for the client. If the refcount is higher
  3818. * than 2, then there are still calls in progress that are using this
  3819. * stateid. We can't put the sc_file reference until they are finished.
  3820. * Wait for the refcount to drop to 2. Since it has been unhashed,
  3821. * there should be no danger of the refcount going back up again at
  3822. * this point.
  3823. */
  3824. wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
  3825. release_all_access(s);
  3826. if (s->st_stid.sc_file) {
  3827. put_nfs4_file(s->st_stid.sc_file);
  3828. s->st_stid.sc_file = NULL;
  3829. }
  3830. spin_lock(&nn->client_lock);
  3831. last = oo->oo_last_closed_stid;
  3832. oo->oo_last_closed_stid = s;
  3833. list_move_tail(&oo->oo_close_lru, &nn->close_lru);
  3834. oo->oo_time = ktime_get_boottime_seconds();
  3835. spin_unlock(&nn->client_lock);
  3836. if (last)
  3837. nfs4_put_stid(&last->st_stid);
  3838. }
  3839. /* search file_hashtbl[] for file */
  3840. static struct nfs4_file *
  3841. find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
  3842. {
  3843. struct nfs4_file *fp;
  3844. hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
  3845. lockdep_is_held(&state_lock)) {
  3846. if (fh_match(&fp->fi_fhandle, fh)) {
  3847. if (refcount_inc_not_zero(&fp->fi_ref))
  3848. return fp;
  3849. }
  3850. }
  3851. return NULL;
  3852. }
  3853. struct nfs4_file *
  3854. find_file(struct knfsd_fh *fh)
  3855. {
  3856. struct nfs4_file *fp;
  3857. unsigned int hashval = file_hashval(fh);
  3858. rcu_read_lock();
  3859. fp = find_file_locked(fh, hashval);
  3860. rcu_read_unlock();
  3861. return fp;
  3862. }
  3863. static struct nfs4_file *
  3864. find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
  3865. {
  3866. struct nfs4_file *fp;
  3867. unsigned int hashval = file_hashval(fh);
  3868. rcu_read_lock();
  3869. fp = find_file_locked(fh, hashval);
  3870. rcu_read_unlock();
  3871. if (fp)
  3872. return fp;
  3873. spin_lock(&state_lock);
  3874. fp = find_file_locked(fh, hashval);
  3875. if (likely(fp == NULL)) {
  3876. nfsd4_init_file(fh, hashval, new);
  3877. fp = new;
  3878. }
  3879. spin_unlock(&state_lock);
  3880. return fp;
  3881. }
  3882. /*
  3883. * Called to check deny when READ with all zero stateid or
  3884. * WRITE with all zero or all one stateid
  3885. */
  3886. static __be32
  3887. nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
  3888. {
  3889. struct nfs4_file *fp;
  3890. __be32 ret = nfs_ok;
  3891. fp = find_file(&current_fh->fh_handle);
  3892. if (!fp)
  3893. return ret;
  3894. /* Check for conflicting share reservations */
  3895. spin_lock(&fp->fi_lock);
  3896. if (fp->fi_share_deny & deny_type)
  3897. ret = nfserr_locked;
  3898. spin_unlock(&fp->fi_lock);
  3899. put_nfs4_file(fp);
  3900. return ret;
  3901. }
  3902. static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
  3903. {
  3904. struct nfs4_delegation *dp = cb_to_delegation(cb);
  3905. struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
  3906. nfsd_net_id);
  3907. block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
  3908. /*
  3909. * We can't do this in nfsd_break_deleg_cb because it is
  3910. * already holding inode->i_lock.
  3911. *
  3912. * If the dl_time != 0, then we know that it has already been
  3913. * queued for a lease break. Don't queue it again.
  3914. */
  3915. spin_lock(&state_lock);
  3916. if (delegation_hashed(dp) && dp->dl_time == 0) {
  3917. dp->dl_time = ktime_get_boottime_seconds();
  3918. list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
  3919. }
  3920. spin_unlock(&state_lock);
  3921. }
  3922. static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
  3923. struct rpc_task *task)
  3924. {
  3925. struct nfs4_delegation *dp = cb_to_delegation(cb);
  3926. if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
  3927. dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
  3928. return 1;
  3929. switch (task->tk_status) {
  3930. case 0:
  3931. return 1;
  3932. case -NFS4ERR_DELAY:
  3933. rpc_delay(task, 2 * HZ);
  3934. return 0;
  3935. case -EBADHANDLE:
  3936. case -NFS4ERR_BAD_STATEID:
  3937. /*
  3938. * Race: client probably got cb_recall before open reply
  3939. * granting delegation.
  3940. */
  3941. if (dp->dl_retries--) {
  3942. rpc_delay(task, 2 * HZ);
  3943. return 0;
  3944. }
  3945. fallthrough;
  3946. default:
  3947. return 1;
  3948. }
  3949. }
  3950. static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
  3951. {
  3952. struct nfs4_delegation *dp = cb_to_delegation(cb);
  3953. nfs4_put_stid(&dp->dl_stid);
  3954. }
  3955. static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
  3956. .prepare = nfsd4_cb_recall_prepare,
  3957. .done = nfsd4_cb_recall_done,
  3958. .release = nfsd4_cb_recall_release,
  3959. };
  3960. static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
  3961. {
  3962. /*
  3963. * We're assuming the state code never drops its reference
  3964. * without first removing the lease. Since we're in this lease
  3965. * callback (and since the lease code is serialized by the
  3966. * i_lock) we know the server hasn't removed the lease yet, and
  3967. * we know it's safe to take a reference.
  3968. */
  3969. refcount_inc(&dp->dl_stid.sc_count);
  3970. nfsd4_run_cb(&dp->dl_recall);
  3971. }
  3972. /* Called from break_lease() with i_lock held. */
  3973. static bool
  3974. nfsd_break_deleg_cb(struct file_lock *fl)
  3975. {
  3976. bool ret = false;
  3977. struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
  3978. struct nfs4_file *fp = dp->dl_stid.sc_file;
  3979. trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
  3980. /*
  3981. * We don't want the locks code to timeout the lease for us;
  3982. * we'll remove it ourself if a delegation isn't returned
  3983. * in time:
  3984. */
  3985. fl->fl_break_time = 0;
  3986. spin_lock(&fp->fi_lock);
  3987. fp->fi_had_conflict = true;
  3988. nfsd_break_one_deleg(dp);
  3989. spin_unlock(&fp->fi_lock);
  3990. return ret;
  3991. }
  3992. /**
  3993. * nfsd_breaker_owns_lease - Check if lease conflict was resolved
  3994. * @fl: Lock state to check
  3995. *
  3996. * Return values:
  3997. * %true: Lease conflict was resolved
  3998. * %false: Lease conflict was not resolved.
  3999. */
  4000. static bool nfsd_breaker_owns_lease(struct file_lock *fl)
  4001. {
  4002. struct nfs4_delegation *dl = fl->fl_owner;
  4003. struct svc_rqst *rqst;
  4004. struct nfs4_client *clp;
  4005. if (!i_am_nfsd())
  4006. return false;
  4007. rqst = kthread_data(current);
  4008. /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
  4009. if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
  4010. return false;
  4011. clp = *(rqst->rq_lease_breaker);
  4012. return dl->dl_stid.sc_client == clp;
  4013. }
  4014. static int
  4015. nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
  4016. struct list_head *dispose)
  4017. {
  4018. if (arg & F_UNLCK)
  4019. return lease_modify(onlist, arg, dispose);
  4020. else
  4021. return -EAGAIN;
  4022. }
  4023. static const struct lock_manager_operations nfsd_lease_mng_ops = {
  4024. .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
  4025. .lm_break = nfsd_break_deleg_cb,
  4026. .lm_change = nfsd_change_deleg_cb,
  4027. };
  4028. static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
  4029. {
  4030. if (nfsd4_has_session(cstate))
  4031. return nfs_ok;
  4032. if (seqid == so->so_seqid - 1)
  4033. return nfserr_replay_me;
  4034. if (seqid == so->so_seqid)
  4035. return nfs_ok;
  4036. return nfserr_bad_seqid;
  4037. }
  4038. static __be32 lookup_clientid(clientid_t *clid,
  4039. struct nfsd4_compound_state *cstate,
  4040. struct nfsd_net *nn,
  4041. bool sessions)
  4042. {
  4043. struct nfs4_client *found;
  4044. if (cstate->clp) {
  4045. found = cstate->clp;
  4046. if (!same_clid(&found->cl_clientid, clid))
  4047. return nfserr_stale_clientid;
  4048. return nfs_ok;
  4049. }
  4050. if (STALE_CLIENTID(clid, nn))
  4051. return nfserr_stale_clientid;
  4052. /*
  4053. * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
  4054. * cached already then we know this is for is for v4.0 and "sessions"
  4055. * will be false.
  4056. */
  4057. WARN_ON_ONCE(cstate->session);
  4058. spin_lock(&nn->client_lock);
  4059. found = find_confirmed_client(clid, sessions, nn);
  4060. if (!found) {
  4061. spin_unlock(&nn->client_lock);
  4062. return nfserr_expired;
  4063. }
  4064. atomic_inc(&found->cl_rpc_users);
  4065. spin_unlock(&nn->client_lock);
  4066. /* Cache the nfs4_client in cstate! */
  4067. cstate->clp = found;
  4068. return nfs_ok;
  4069. }
  4070. __be32
  4071. nfsd4_process_open1(struct nfsd4_compound_state *cstate,
  4072. struct nfsd4_open *open, struct nfsd_net *nn)
  4073. {
  4074. clientid_t *clientid = &open->op_clientid;
  4075. struct nfs4_client *clp = NULL;
  4076. unsigned int strhashval;
  4077. struct nfs4_openowner *oo = NULL;
  4078. __be32 status;
  4079. if (STALE_CLIENTID(&open->op_clientid, nn))
  4080. return nfserr_stale_clientid;
  4081. /*
  4082. * In case we need it later, after we've already created the
  4083. * file and don't want to risk a further failure:
  4084. */
  4085. open->op_file = nfsd4_alloc_file();
  4086. if (open->op_file == NULL)
  4087. return nfserr_jukebox;
  4088. status = lookup_clientid(clientid, cstate, nn, false);
  4089. if (status)
  4090. return status;
  4091. clp = cstate->clp;
  4092. strhashval = ownerstr_hashval(&open->op_owner);
  4093. oo = find_openstateowner_str(strhashval, open, clp);
  4094. open->op_openowner = oo;
  4095. if (!oo) {
  4096. goto new_owner;
  4097. }
  4098. if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
  4099. /* Replace unconfirmed owners without checking for replay. */
  4100. release_openowner(oo);
  4101. open->op_openowner = NULL;
  4102. goto new_owner;
  4103. }
  4104. status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
  4105. if (status)
  4106. return status;
  4107. goto alloc_stateid;
  4108. new_owner:
  4109. oo = alloc_init_open_stateowner(strhashval, open, cstate);
  4110. if (oo == NULL)
  4111. return nfserr_jukebox;
  4112. open->op_openowner = oo;
  4113. alloc_stateid:
  4114. open->op_stp = nfs4_alloc_open_stateid(clp);
  4115. if (!open->op_stp)
  4116. return nfserr_jukebox;
  4117. if (nfsd4_has_session(cstate) &&
  4118. (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
  4119. open->op_odstate = alloc_clnt_odstate(clp);
  4120. if (!open->op_odstate)
  4121. return nfserr_jukebox;
  4122. }
  4123. return nfs_ok;
  4124. }
  4125. static inline __be32
  4126. nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
  4127. {
  4128. if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
  4129. return nfserr_openmode;
  4130. else
  4131. return nfs_ok;
  4132. }
  4133. static int share_access_to_flags(u32 share_access)
  4134. {
  4135. return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
  4136. }
  4137. static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
  4138. {
  4139. struct nfs4_stid *ret;
  4140. ret = find_stateid_by_type(cl, s,
  4141. NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
  4142. if (!ret)
  4143. return NULL;
  4144. return delegstateid(ret);
  4145. }
  4146. static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
  4147. {
  4148. return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
  4149. open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
  4150. }
  4151. static __be32
  4152. nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
  4153. struct nfs4_delegation **dp)
  4154. {
  4155. int flags;
  4156. __be32 status = nfserr_bad_stateid;
  4157. struct nfs4_delegation *deleg;
  4158. deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
  4159. if (deleg == NULL)
  4160. goto out;
  4161. if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
  4162. nfs4_put_stid(&deleg->dl_stid);
  4163. if (cl->cl_minorversion)
  4164. status = nfserr_deleg_revoked;
  4165. goto out;
  4166. }
  4167. flags = share_access_to_flags(open->op_share_access);
  4168. status = nfs4_check_delegmode(deleg, flags);
  4169. if (status) {
  4170. nfs4_put_stid(&deleg->dl_stid);
  4171. goto out;
  4172. }
  4173. *dp = deleg;
  4174. out:
  4175. if (!nfsd4_is_deleg_cur(open))
  4176. return nfs_ok;
  4177. if (status)
  4178. return status;
  4179. open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
  4180. return nfs_ok;
  4181. }
  4182. static inline int nfs4_access_to_access(u32 nfs4_access)
  4183. {
  4184. int flags = 0;
  4185. if (nfs4_access & NFS4_SHARE_ACCESS_READ)
  4186. flags |= NFSD_MAY_READ;
  4187. if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
  4188. flags |= NFSD_MAY_WRITE;
  4189. return flags;
  4190. }
  4191. static inline __be32
  4192. nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
  4193. struct nfsd4_open *open)
  4194. {
  4195. struct iattr iattr = {
  4196. .ia_valid = ATTR_SIZE,
  4197. .ia_size = 0,
  4198. };
  4199. if (!open->op_truncate)
  4200. return 0;
  4201. if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
  4202. return nfserr_inval;
  4203. return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
  4204. }
  4205. static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
  4206. struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
  4207. struct nfsd4_open *open)
  4208. {
  4209. struct nfsd_file *nf = NULL;
  4210. __be32 status;
  4211. int oflag = nfs4_access_to_omode(open->op_share_access);
  4212. int access = nfs4_access_to_access(open->op_share_access);
  4213. unsigned char old_access_bmap, old_deny_bmap;
  4214. spin_lock(&fp->fi_lock);
  4215. /*
  4216. * Are we trying to set a deny mode that would conflict with
  4217. * current access?
  4218. */
  4219. status = nfs4_file_check_deny(fp, open->op_share_deny);
  4220. if (status != nfs_ok) {
  4221. spin_unlock(&fp->fi_lock);
  4222. goto out;
  4223. }
  4224. /* set access to the file */
  4225. status = nfs4_file_get_access(fp, open->op_share_access);
  4226. if (status != nfs_ok) {
  4227. spin_unlock(&fp->fi_lock);
  4228. goto out;
  4229. }
  4230. /* Set access bits in stateid */
  4231. old_access_bmap = stp->st_access_bmap;
  4232. set_access(open->op_share_access, stp);
  4233. /* Set new deny mask */
  4234. old_deny_bmap = stp->st_deny_bmap;
  4235. set_deny(open->op_share_deny, stp);
  4236. fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
  4237. if (!fp->fi_fds[oflag]) {
  4238. spin_unlock(&fp->fi_lock);
  4239. status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
  4240. if (status)
  4241. goto out_put_access;
  4242. spin_lock(&fp->fi_lock);
  4243. if (!fp->fi_fds[oflag]) {
  4244. fp->fi_fds[oflag] = nf;
  4245. nf = NULL;
  4246. }
  4247. }
  4248. spin_unlock(&fp->fi_lock);
  4249. if (nf)
  4250. nfsd_file_put(nf);
  4251. status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
  4252. access));
  4253. if (status)
  4254. goto out_put_access;
  4255. status = nfsd4_truncate(rqstp, cur_fh, open);
  4256. if (status)
  4257. goto out_put_access;
  4258. out:
  4259. return status;
  4260. out_put_access:
  4261. stp->st_access_bmap = old_access_bmap;
  4262. nfs4_file_put_access(fp, open->op_share_access);
  4263. reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
  4264. goto out;
  4265. }
  4266. static __be32
  4267. nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
  4268. {
  4269. __be32 status;
  4270. unsigned char old_deny_bmap = stp->st_deny_bmap;
  4271. if (!test_access(open->op_share_access, stp))
  4272. return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
  4273. /* test and set deny mode */
  4274. spin_lock(&fp->fi_lock);
  4275. status = nfs4_file_check_deny(fp, open->op_share_deny);
  4276. if (status == nfs_ok) {
  4277. set_deny(open->op_share_deny, stp);
  4278. fp->fi_share_deny |=
  4279. (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
  4280. }
  4281. spin_unlock(&fp->fi_lock);
  4282. if (status != nfs_ok)
  4283. return status;
  4284. status = nfsd4_truncate(rqstp, cur_fh, open);
  4285. if (status != nfs_ok)
  4286. reset_union_bmap_deny(old_deny_bmap, stp);
  4287. return status;
  4288. }
  4289. /* Should we give out recallable state?: */
  4290. static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
  4291. {
  4292. if (clp->cl_cb_state == NFSD4_CB_UP)
  4293. return true;
  4294. /*
  4295. * In the sessions case, since we don't have to establish a
  4296. * separate connection for callbacks, we assume it's OK
  4297. * until we hear otherwise:
  4298. */
  4299. return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
  4300. }
  4301. static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
  4302. int flag)
  4303. {
  4304. struct file_lock *fl;
  4305. fl = locks_alloc_lock();
  4306. if (!fl)
  4307. return NULL;
  4308. fl->fl_lmops = &nfsd_lease_mng_ops;
  4309. fl->fl_flags = FL_DELEG;
  4310. fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
  4311. fl->fl_end = OFFSET_MAX;
  4312. fl->fl_owner = (fl_owner_t)dp;
  4313. fl->fl_pid = current->tgid;
  4314. fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
  4315. return fl;
  4316. }
  4317. static struct nfs4_delegation *
  4318. nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
  4319. struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
  4320. {
  4321. int status = 0;
  4322. struct nfs4_delegation *dp;
  4323. struct nfsd_file *nf;
  4324. struct file_lock *fl;
  4325. /*
  4326. * The fi_had_conflict and nfs_get_existing_delegation checks
  4327. * here are just optimizations; we'll need to recheck them at
  4328. * the end:
  4329. */
  4330. if (fp->fi_had_conflict)
  4331. return ERR_PTR(-EAGAIN);
  4332. nf = find_readable_file(fp);
  4333. if (!nf) {
  4334. /* We should always have a readable file here */
  4335. WARN_ON_ONCE(1);
  4336. return ERR_PTR(-EBADF);
  4337. }
  4338. spin_lock(&state_lock);
  4339. spin_lock(&fp->fi_lock);
  4340. if (nfs4_delegation_exists(clp, fp))
  4341. status = -EAGAIN;
  4342. else if (!fp->fi_deleg_file) {
  4343. fp->fi_deleg_file = nf;
  4344. /* increment early to prevent fi_deleg_file from being
  4345. * cleared */
  4346. fp->fi_delegees = 1;
  4347. nf = NULL;
  4348. } else
  4349. fp->fi_delegees++;
  4350. spin_unlock(&fp->fi_lock);
  4351. spin_unlock(&state_lock);
  4352. if (nf)
  4353. nfsd_file_put(nf);
  4354. if (status)
  4355. return ERR_PTR(status);
  4356. status = -ENOMEM;
  4357. dp = alloc_init_deleg(clp, fp, fh, odstate);
  4358. if (!dp)
  4359. goto out_delegees;
  4360. fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
  4361. if (!fl)
  4362. goto out_clnt_odstate;
  4363. status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
  4364. if (fl)
  4365. locks_free_lock(fl);
  4366. if (status)
  4367. goto out_clnt_odstate;
  4368. spin_lock(&state_lock);
  4369. spin_lock(&fp->fi_lock);
  4370. if (fp->fi_had_conflict)
  4371. status = -EAGAIN;
  4372. else
  4373. status = hash_delegation_locked(dp, fp);
  4374. spin_unlock(&fp->fi_lock);
  4375. spin_unlock(&state_lock);
  4376. if (status)
  4377. goto out_unlock;
  4378. return dp;
  4379. out_unlock:
  4380. vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
  4381. out_clnt_odstate:
  4382. put_clnt_odstate(dp->dl_clnt_odstate);
  4383. nfs4_put_stid(&dp->dl_stid);
  4384. out_delegees:
  4385. put_deleg_file(fp);
  4386. return ERR_PTR(status);
  4387. }
  4388. static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
  4389. {
  4390. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  4391. if (status == -EAGAIN)
  4392. open->op_why_no_deleg = WND4_CONTENTION;
  4393. else {
  4394. open->op_why_no_deleg = WND4_RESOURCE;
  4395. switch (open->op_deleg_want) {
  4396. case NFS4_SHARE_WANT_READ_DELEG:
  4397. case NFS4_SHARE_WANT_WRITE_DELEG:
  4398. case NFS4_SHARE_WANT_ANY_DELEG:
  4399. break;
  4400. case NFS4_SHARE_WANT_CANCEL:
  4401. open->op_why_no_deleg = WND4_CANCELLED;
  4402. break;
  4403. case NFS4_SHARE_WANT_NO_DELEG:
  4404. WARN_ON_ONCE(1);
  4405. }
  4406. }
  4407. }
  4408. /*
  4409. * Attempt to hand out a delegation.
  4410. *
  4411. * Note we don't support write delegations, and won't until the vfs has
  4412. * proper support for them.
  4413. */
  4414. static void
  4415. nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
  4416. struct nfs4_ol_stateid *stp)
  4417. {
  4418. struct nfs4_delegation *dp;
  4419. struct nfs4_openowner *oo = openowner(stp->st_stateowner);
  4420. struct nfs4_client *clp = stp->st_stid.sc_client;
  4421. int cb_up;
  4422. int status = 0;
  4423. cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
  4424. open->op_recall = 0;
  4425. switch (open->op_claim_type) {
  4426. case NFS4_OPEN_CLAIM_PREVIOUS:
  4427. if (!cb_up)
  4428. open->op_recall = 1;
  4429. if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
  4430. goto out_no_deleg;
  4431. break;
  4432. case NFS4_OPEN_CLAIM_NULL:
  4433. case NFS4_OPEN_CLAIM_FH:
  4434. /*
  4435. * Let's not give out any delegations till everyone's
  4436. * had the chance to reclaim theirs, *and* until
  4437. * NLM locks have all been reclaimed:
  4438. */
  4439. if (locks_in_grace(clp->net))
  4440. goto out_no_deleg;
  4441. if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
  4442. goto out_no_deleg;
  4443. /*
  4444. * Also, if the file was opened for write or
  4445. * create, there's a good chance the client's
  4446. * about to write to it, resulting in an
  4447. * immediate recall (since we don't support
  4448. * write delegations):
  4449. */
  4450. if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
  4451. goto out_no_deleg;
  4452. if (open->op_create == NFS4_OPEN_CREATE)
  4453. goto out_no_deleg;
  4454. break;
  4455. default:
  4456. goto out_no_deleg;
  4457. }
  4458. dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
  4459. if (IS_ERR(dp))
  4460. goto out_no_deleg;
  4461. memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
  4462. trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
  4463. open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
  4464. nfs4_put_stid(&dp->dl_stid);
  4465. return;
  4466. out_no_deleg:
  4467. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
  4468. if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
  4469. open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
  4470. dprintk("NFSD: WARNING: refusing delegation reclaim\n");
  4471. open->op_recall = 1;
  4472. }
  4473. /* 4.1 client asking for a delegation? */
  4474. if (open->op_deleg_want)
  4475. nfsd4_open_deleg_none_ext(open, status);
  4476. return;
  4477. }
  4478. static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
  4479. struct nfs4_delegation *dp)
  4480. {
  4481. if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
  4482. dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
  4483. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  4484. open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
  4485. } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
  4486. dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
  4487. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  4488. open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
  4489. }
  4490. /* Otherwise the client must be confused wanting a delegation
  4491. * it already has, therefore we don't return
  4492. * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
  4493. */
  4494. }
  4495. __be32
  4496. nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
  4497. {
  4498. struct nfsd4_compoundres *resp = rqstp->rq_resp;
  4499. struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
  4500. struct nfs4_file *fp = NULL;
  4501. struct nfs4_ol_stateid *stp = NULL;
  4502. struct nfs4_delegation *dp = NULL;
  4503. __be32 status;
  4504. bool new_stp = false;
  4505. /*
  4506. * Lookup file; if found, lookup stateid and check open request,
  4507. * and check for delegations in the process of being recalled.
  4508. * If not found, create the nfs4_file struct
  4509. */
  4510. fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
  4511. if (fp != open->op_file) {
  4512. status = nfs4_check_deleg(cl, open, &dp);
  4513. if (status)
  4514. goto out;
  4515. stp = nfsd4_find_and_lock_existing_open(fp, open);
  4516. } else {
  4517. open->op_file = NULL;
  4518. status = nfserr_bad_stateid;
  4519. if (nfsd4_is_deleg_cur(open))
  4520. goto out;
  4521. }
  4522. if (!stp) {
  4523. stp = init_open_stateid(fp, open);
  4524. if (!open->op_stp)
  4525. new_stp = true;
  4526. }
  4527. /*
  4528. * OPEN the file, or upgrade an existing OPEN.
  4529. * If truncate fails, the OPEN fails.
  4530. *
  4531. * stp is already locked.
  4532. */
  4533. if (!new_stp) {
  4534. /* Stateid was found, this is an OPEN upgrade */
  4535. status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
  4536. if (status) {
  4537. mutex_unlock(&stp->st_mutex);
  4538. goto out;
  4539. }
  4540. } else {
  4541. status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
  4542. if (status) {
  4543. stp->st_stid.sc_type = NFS4_CLOSED_STID;
  4544. release_open_stateid(stp);
  4545. mutex_unlock(&stp->st_mutex);
  4546. goto out;
  4547. }
  4548. stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
  4549. open->op_odstate);
  4550. if (stp->st_clnt_odstate == open->op_odstate)
  4551. open->op_odstate = NULL;
  4552. }
  4553. nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
  4554. mutex_unlock(&stp->st_mutex);
  4555. if (nfsd4_has_session(&resp->cstate)) {
  4556. if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
  4557. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  4558. open->op_why_no_deleg = WND4_NOT_WANTED;
  4559. goto nodeleg;
  4560. }
  4561. }
  4562. /*
  4563. * Attempt to hand out a delegation. No error return, because the
  4564. * OPEN succeeds even if we fail.
  4565. */
  4566. nfs4_open_delegation(current_fh, open, stp);
  4567. nodeleg:
  4568. status = nfs_ok;
  4569. trace_nfsd_open(&stp->st_stid.sc_stateid);
  4570. out:
  4571. /* 4.1 client trying to upgrade/downgrade delegation? */
  4572. if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
  4573. open->op_deleg_want)
  4574. nfsd4_deleg_xgrade_none_ext(open, dp);
  4575. if (fp)
  4576. put_nfs4_file(fp);
  4577. if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
  4578. open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
  4579. /*
  4580. * To finish the open response, we just need to set the rflags.
  4581. */
  4582. open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
  4583. if (nfsd4_has_session(&resp->cstate))
  4584. open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
  4585. else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
  4586. open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
  4587. if (dp)
  4588. nfs4_put_stid(&dp->dl_stid);
  4589. if (stp)
  4590. nfs4_put_stid(&stp->st_stid);
  4591. return status;
  4592. }
  4593. void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
  4594. struct nfsd4_open *open)
  4595. {
  4596. if (open->op_openowner) {
  4597. struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
  4598. nfsd4_cstate_assign_replay(cstate, so);
  4599. nfs4_put_stateowner(so);
  4600. }
  4601. if (open->op_file)
  4602. kmem_cache_free(file_slab, open->op_file);
  4603. if (open->op_stp)
  4604. nfs4_put_stid(&open->op_stp->st_stid);
  4605. if (open->op_odstate)
  4606. kmem_cache_free(odstate_slab, open->op_odstate);
  4607. }
  4608. __be32
  4609. nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  4610. union nfsd4_op_u *u)
  4611. {
  4612. clientid_t *clid = &u->renew;
  4613. struct nfs4_client *clp;
  4614. __be32 status;
  4615. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  4616. trace_nfsd_clid_renew(clid);
  4617. status = lookup_clientid(clid, cstate, nn, false);
  4618. if (status)
  4619. goto out;
  4620. clp = cstate->clp;
  4621. status = nfserr_cb_path_down;
  4622. if (!list_empty(&clp->cl_delegations)
  4623. && clp->cl_cb_state != NFSD4_CB_UP)
  4624. goto out;
  4625. status = nfs_ok;
  4626. out:
  4627. return status;
  4628. }
  4629. void
  4630. nfsd4_end_grace(struct nfsd_net *nn)
  4631. {
  4632. /* do nothing if grace period already ended */
  4633. if (nn->grace_ended)
  4634. return;
  4635. trace_nfsd_grace_complete(nn);
  4636. nn->grace_ended = true;
  4637. /*
  4638. * If the server goes down again right now, an NFSv4
  4639. * client will still be allowed to reclaim after it comes back up,
  4640. * even if it hasn't yet had a chance to reclaim state this time.
  4641. *
  4642. */
  4643. nfsd4_record_grace_done(nn);
  4644. /*
  4645. * At this point, NFSv4 clients can still reclaim. But if the
  4646. * server crashes, any that have not yet reclaimed will be out
  4647. * of luck on the next boot.
  4648. *
  4649. * (NFSv4.1+ clients are considered to have reclaimed once they
  4650. * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
  4651. * have reclaimed after their first OPEN.)
  4652. */
  4653. locks_end_grace(&nn->nfsd4_manager);
  4654. /*
  4655. * At this point, and once lockd and/or any other containers
  4656. * exit their grace period, further reclaims will fail and
  4657. * regular locking can resume.
  4658. */
  4659. }
  4660. /*
  4661. * If we've waited a lease period but there are still clients trying to
  4662. * reclaim, wait a little longer to give them a chance to finish.
  4663. */
  4664. static bool clients_still_reclaiming(struct nfsd_net *nn)
  4665. {
  4666. time64_t double_grace_period_end = nn->boot_time +
  4667. 2 * nn->nfsd4_lease;
  4668. if (nn->track_reclaim_completes &&
  4669. atomic_read(&nn->nr_reclaim_complete) ==
  4670. nn->reclaim_str_hashtbl_size)
  4671. return false;
  4672. if (!nn->somebody_reclaimed)
  4673. return false;
  4674. nn->somebody_reclaimed = false;
  4675. /*
  4676. * If we've given them *two* lease times to reclaim, and they're
  4677. * still not done, give up:
  4678. */
  4679. if (ktime_get_boottime_seconds() > double_grace_period_end)
  4680. return false;
  4681. return true;
  4682. }
  4683. static time64_t
  4684. nfs4_laundromat(struct nfsd_net *nn)
  4685. {
  4686. struct nfs4_client *clp;
  4687. struct nfs4_openowner *oo;
  4688. struct nfs4_delegation *dp;
  4689. struct nfs4_ol_stateid *stp;
  4690. struct nfsd4_blocked_lock *nbl;
  4691. struct list_head *pos, *next, reaplist;
  4692. time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease;
  4693. time64_t t, new_timeo = nn->nfsd4_lease;
  4694. struct nfs4_cpntf_state *cps;
  4695. copy_stateid_t *cps_t;
  4696. int i;
  4697. if (clients_still_reclaiming(nn)) {
  4698. new_timeo = 0;
  4699. goto out;
  4700. }
  4701. nfsd4_end_grace(nn);
  4702. INIT_LIST_HEAD(&reaplist);
  4703. spin_lock(&nn->s2s_cp_lock);
  4704. idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
  4705. cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
  4706. if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
  4707. cps->cpntf_time < cutoff)
  4708. _free_cpntf_state_locked(nn, cps);
  4709. }
  4710. spin_unlock(&nn->s2s_cp_lock);
  4711. spin_lock(&nn->client_lock);
  4712. list_for_each_safe(pos, next, &nn->client_lru) {
  4713. clp = list_entry(pos, struct nfs4_client, cl_lru);
  4714. if (clp->cl_time > cutoff) {
  4715. t = clp->cl_time - cutoff;
  4716. new_timeo = min(new_timeo, t);
  4717. break;
  4718. }
  4719. if (mark_client_expired_locked(clp)) {
  4720. trace_nfsd_clid_expired(&clp->cl_clientid);
  4721. continue;
  4722. }
  4723. list_add(&clp->cl_lru, &reaplist);
  4724. }
  4725. spin_unlock(&nn->client_lock);
  4726. list_for_each_safe(pos, next, &reaplist) {
  4727. clp = list_entry(pos, struct nfs4_client, cl_lru);
  4728. trace_nfsd_clid_purged(&clp->cl_clientid);
  4729. list_del_init(&clp->cl_lru);
  4730. expire_client(clp);
  4731. }
  4732. spin_lock(&state_lock);
  4733. list_for_each_safe(pos, next, &nn->del_recall_lru) {
  4734. dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
  4735. if (dp->dl_time > cutoff) {
  4736. t = dp->dl_time - cutoff;
  4737. new_timeo = min(new_timeo, t);
  4738. break;
  4739. }
  4740. WARN_ON(!unhash_delegation_locked(dp));
  4741. list_add(&dp->dl_recall_lru, &reaplist);
  4742. }
  4743. spin_unlock(&state_lock);
  4744. while (!list_empty(&reaplist)) {
  4745. dp = list_first_entry(&reaplist, struct nfs4_delegation,
  4746. dl_recall_lru);
  4747. list_del_init(&dp->dl_recall_lru);
  4748. revoke_delegation(dp);
  4749. }
  4750. spin_lock(&nn->client_lock);
  4751. while (!list_empty(&nn->close_lru)) {
  4752. oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
  4753. oo_close_lru);
  4754. if (oo->oo_time > cutoff) {
  4755. t = oo->oo_time - cutoff;
  4756. new_timeo = min(new_timeo, t);
  4757. break;
  4758. }
  4759. list_del_init(&oo->oo_close_lru);
  4760. stp = oo->oo_last_closed_stid;
  4761. oo->oo_last_closed_stid = NULL;
  4762. spin_unlock(&nn->client_lock);
  4763. nfs4_put_stid(&stp->st_stid);
  4764. spin_lock(&nn->client_lock);
  4765. }
  4766. spin_unlock(&nn->client_lock);
  4767. /*
  4768. * It's possible for a client to try and acquire an already held lock
  4769. * that is being held for a long time, and then lose interest in it.
  4770. * So, we clean out any un-revisited request after a lease period
  4771. * under the assumption that the client is no longer interested.
  4772. *
  4773. * RFC5661, sec. 9.6 states that the client must not rely on getting
  4774. * notifications and must continue to poll for locks, even when the
  4775. * server supports them. Thus this shouldn't lead to clients blocking
  4776. * indefinitely once the lock does become free.
  4777. */
  4778. BUG_ON(!list_empty(&reaplist));
  4779. spin_lock(&nn->blocked_locks_lock);
  4780. while (!list_empty(&nn->blocked_locks_lru)) {
  4781. nbl = list_first_entry(&nn->blocked_locks_lru,
  4782. struct nfsd4_blocked_lock, nbl_lru);
  4783. if (nbl->nbl_time > cutoff) {
  4784. t = nbl->nbl_time - cutoff;
  4785. new_timeo = min(new_timeo, t);
  4786. break;
  4787. }
  4788. list_move(&nbl->nbl_lru, &reaplist);
  4789. list_del_init(&nbl->nbl_list);
  4790. }
  4791. spin_unlock(&nn->blocked_locks_lock);
  4792. while (!list_empty(&reaplist)) {
  4793. nbl = list_first_entry(&reaplist,
  4794. struct nfsd4_blocked_lock, nbl_lru);
  4795. list_del_init(&nbl->nbl_lru);
  4796. free_blocked_lock(nbl);
  4797. }
  4798. out:
  4799. new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
  4800. return new_timeo;
  4801. }
  4802. static struct workqueue_struct *laundry_wq;
  4803. static void laundromat_main(struct work_struct *);
  4804. static void
  4805. laundromat_main(struct work_struct *laundry)
  4806. {
  4807. time64_t t;
  4808. struct delayed_work *dwork = to_delayed_work(laundry);
  4809. struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
  4810. laundromat_work);
  4811. t = nfs4_laundromat(nn);
  4812. queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
  4813. }
  4814. static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
  4815. {
  4816. if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
  4817. return nfserr_bad_stateid;
  4818. return nfs_ok;
  4819. }
  4820. static inline int
  4821. access_permit_read(struct nfs4_ol_stateid *stp)
  4822. {
  4823. return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
  4824. test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
  4825. test_access(NFS4_SHARE_ACCESS_WRITE, stp);
  4826. }
  4827. static inline int
  4828. access_permit_write(struct nfs4_ol_stateid *stp)
  4829. {
  4830. return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
  4831. test_access(NFS4_SHARE_ACCESS_BOTH, stp);
  4832. }
  4833. static
  4834. __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
  4835. {
  4836. __be32 status = nfserr_openmode;
  4837. /* For lock stateid's, we test the parent open, not the lock: */
  4838. if (stp->st_openstp)
  4839. stp = stp->st_openstp;
  4840. if ((flags & WR_STATE) && !access_permit_write(stp))
  4841. goto out;
  4842. if ((flags & RD_STATE) && !access_permit_read(stp))
  4843. goto out;
  4844. status = nfs_ok;
  4845. out:
  4846. return status;
  4847. }
  4848. static inline __be32
  4849. check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
  4850. {
  4851. if (ONE_STATEID(stateid) && (flags & RD_STATE))
  4852. return nfs_ok;
  4853. else if (opens_in_grace(net)) {
  4854. /* Answer in remaining cases depends on existence of
  4855. * conflicting state; so we must wait out the grace period. */
  4856. return nfserr_grace;
  4857. } else if (flags & WR_STATE)
  4858. return nfs4_share_conflict(current_fh,
  4859. NFS4_SHARE_DENY_WRITE);
  4860. else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
  4861. return nfs4_share_conflict(current_fh,
  4862. NFS4_SHARE_DENY_READ);
  4863. }
  4864. /*
  4865. * Allow READ/WRITE during grace period on recovered state only for files
  4866. * that are not able to provide mandatory locking.
  4867. */
  4868. static inline int
  4869. grace_disallows_io(struct net *net, struct inode *inode)
  4870. {
  4871. return opens_in_grace(net) && mandatory_lock(inode);
  4872. }
  4873. static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
  4874. {
  4875. /*
  4876. * When sessions are used the stateid generation number is ignored
  4877. * when it is zero.
  4878. */
  4879. if (has_session && in->si_generation == 0)
  4880. return nfs_ok;
  4881. if (in->si_generation == ref->si_generation)
  4882. return nfs_ok;
  4883. /* If the client sends us a stateid from the future, it's buggy: */
  4884. if (nfsd4_stateid_generation_after(in, ref))
  4885. return nfserr_bad_stateid;
  4886. /*
  4887. * However, we could see a stateid from the past, even from a
  4888. * non-buggy client. For example, if the client sends a lock
  4889. * while some IO is outstanding, the lock may bump si_generation
  4890. * while the IO is still in flight. The client could avoid that
  4891. * situation by waiting for responses on all the IO requests,
  4892. * but better performance may result in retrying IO that
  4893. * receives an old_stateid error if requests are rarely
  4894. * reordered in flight:
  4895. */
  4896. return nfserr_old_stateid;
  4897. }
  4898. static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
  4899. {
  4900. __be32 ret;
  4901. spin_lock(&s->sc_lock);
  4902. ret = nfsd4_verify_open_stid(s);
  4903. if (ret == nfs_ok)
  4904. ret = check_stateid_generation(in, &s->sc_stateid, has_session);
  4905. spin_unlock(&s->sc_lock);
  4906. return ret;
  4907. }
  4908. static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
  4909. {
  4910. if (ols->st_stateowner->so_is_open_owner &&
  4911. !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
  4912. return nfserr_bad_stateid;
  4913. return nfs_ok;
  4914. }
  4915. static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
  4916. {
  4917. struct nfs4_stid *s;
  4918. __be32 status = nfserr_bad_stateid;
  4919. if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
  4920. CLOSE_STATEID(stateid))
  4921. return status;
  4922. if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
  4923. return status;
  4924. spin_lock(&cl->cl_lock);
  4925. s = find_stateid_locked(cl, stateid);
  4926. if (!s)
  4927. goto out_unlock;
  4928. status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
  4929. if (status)
  4930. goto out_unlock;
  4931. switch (s->sc_type) {
  4932. case NFS4_DELEG_STID:
  4933. status = nfs_ok;
  4934. break;
  4935. case NFS4_REVOKED_DELEG_STID:
  4936. status = nfserr_deleg_revoked;
  4937. break;
  4938. case NFS4_OPEN_STID:
  4939. case NFS4_LOCK_STID:
  4940. status = nfsd4_check_openowner_confirmed(openlockstateid(s));
  4941. break;
  4942. default:
  4943. printk("unknown stateid type %x\n", s->sc_type);
  4944. fallthrough;
  4945. case NFS4_CLOSED_STID:
  4946. case NFS4_CLOSED_DELEG_STID:
  4947. status = nfserr_bad_stateid;
  4948. }
  4949. out_unlock:
  4950. spin_unlock(&cl->cl_lock);
  4951. return status;
  4952. }
  4953. __be32
  4954. nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
  4955. stateid_t *stateid, unsigned char typemask,
  4956. struct nfs4_stid **s, struct nfsd_net *nn)
  4957. {
  4958. __be32 status;
  4959. bool return_revoked = false;
  4960. /*
  4961. * only return revoked delegations if explicitly asked.
  4962. * otherwise we report revoked or bad_stateid status.
  4963. */
  4964. if (typemask & NFS4_REVOKED_DELEG_STID)
  4965. return_revoked = true;
  4966. else if (typemask & NFS4_DELEG_STID)
  4967. typemask |= NFS4_REVOKED_DELEG_STID;
  4968. if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
  4969. CLOSE_STATEID(stateid))
  4970. return nfserr_bad_stateid;
  4971. status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
  4972. false);
  4973. if (status == nfserr_stale_clientid) {
  4974. if (cstate->session)
  4975. return nfserr_bad_stateid;
  4976. return nfserr_stale_stateid;
  4977. }
  4978. if (status)
  4979. return status;
  4980. *s = find_stateid_by_type(cstate->clp, stateid, typemask);
  4981. if (!*s)
  4982. return nfserr_bad_stateid;
  4983. if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
  4984. nfs4_put_stid(*s);
  4985. if (cstate->minorversion)
  4986. return nfserr_deleg_revoked;
  4987. return nfserr_bad_stateid;
  4988. }
  4989. return nfs_ok;
  4990. }
  4991. static struct nfsd_file *
  4992. nfs4_find_file(struct nfs4_stid *s, int flags)
  4993. {
  4994. if (!s)
  4995. return NULL;
  4996. switch (s->sc_type) {
  4997. case NFS4_DELEG_STID:
  4998. if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
  4999. return NULL;
  5000. return nfsd_file_get(s->sc_file->fi_deleg_file);
  5001. case NFS4_OPEN_STID:
  5002. case NFS4_LOCK_STID:
  5003. if (flags & RD_STATE)
  5004. return find_readable_file(s->sc_file);
  5005. else
  5006. return find_writeable_file(s->sc_file);
  5007. }
  5008. return NULL;
  5009. }
  5010. static __be32
  5011. nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
  5012. {
  5013. __be32 status;
  5014. status = nfsd4_check_openowner_confirmed(ols);
  5015. if (status)
  5016. return status;
  5017. return nfs4_check_openmode(ols, flags);
  5018. }
  5019. static __be32
  5020. nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
  5021. struct nfsd_file **nfp, int flags)
  5022. {
  5023. int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
  5024. struct nfsd_file *nf;
  5025. __be32 status;
  5026. nf = nfs4_find_file(s, flags);
  5027. if (nf) {
  5028. status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
  5029. acc | NFSD_MAY_OWNER_OVERRIDE);
  5030. if (status) {
  5031. nfsd_file_put(nf);
  5032. goto out;
  5033. }
  5034. } else {
  5035. status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
  5036. if (status)
  5037. return status;
  5038. }
  5039. *nfp = nf;
  5040. out:
  5041. return status;
  5042. }
  5043. static void
  5044. _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
  5045. {
  5046. WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
  5047. if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
  5048. return;
  5049. list_del(&cps->cp_list);
  5050. idr_remove(&nn->s2s_cp_stateids,
  5051. cps->cp_stateid.stid.si_opaque.so_id);
  5052. kfree(cps);
  5053. }
  5054. /*
  5055. * A READ from an inter server to server COPY will have a
  5056. * copy stateid. Look up the copy notify stateid from the
  5057. * idr structure and take a reference on it.
  5058. */
  5059. __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
  5060. struct nfs4_client *clp,
  5061. struct nfs4_cpntf_state **cps)
  5062. {
  5063. copy_stateid_t *cps_t;
  5064. struct nfs4_cpntf_state *state = NULL;
  5065. if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
  5066. return nfserr_bad_stateid;
  5067. spin_lock(&nn->s2s_cp_lock);
  5068. cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
  5069. if (cps_t) {
  5070. state = container_of(cps_t, struct nfs4_cpntf_state,
  5071. cp_stateid);
  5072. if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
  5073. state = NULL;
  5074. goto unlock;
  5075. }
  5076. if (!clp)
  5077. refcount_inc(&state->cp_stateid.sc_count);
  5078. else
  5079. _free_cpntf_state_locked(nn, state);
  5080. }
  5081. unlock:
  5082. spin_unlock(&nn->s2s_cp_lock);
  5083. if (!state)
  5084. return nfserr_bad_stateid;
  5085. if (!clp && state)
  5086. *cps = state;
  5087. return 0;
  5088. }
  5089. static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
  5090. struct nfs4_stid **stid)
  5091. {
  5092. __be32 status;
  5093. struct nfs4_cpntf_state *cps = NULL;
  5094. struct nfsd4_compound_state cstate;
  5095. status = manage_cpntf_state(nn, st, NULL, &cps);
  5096. if (status)
  5097. return status;
  5098. cps->cpntf_time = ktime_get_boottime_seconds();
  5099. memset(&cstate, 0, sizeof(cstate));
  5100. status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
  5101. if (status)
  5102. goto out;
  5103. status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
  5104. NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
  5105. stid, nn);
  5106. put_client_renew(cstate.clp);
  5107. out:
  5108. nfs4_put_cpntf_state(nn, cps);
  5109. return status;
  5110. }
  5111. void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
  5112. {
  5113. spin_lock(&nn->s2s_cp_lock);
  5114. _free_cpntf_state_locked(nn, cps);
  5115. spin_unlock(&nn->s2s_cp_lock);
  5116. }
  5117. /*
  5118. * Checks for stateid operations
  5119. */
  5120. __be32
  5121. nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
  5122. struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
  5123. stateid_t *stateid, int flags, struct nfsd_file **nfp,
  5124. struct nfs4_stid **cstid)
  5125. {
  5126. struct inode *ino = d_inode(fhp->fh_dentry);
  5127. struct net *net = SVC_NET(rqstp);
  5128. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5129. struct nfs4_stid *s = NULL;
  5130. __be32 status;
  5131. if (nfp)
  5132. *nfp = NULL;
  5133. if (grace_disallows_io(net, ino))
  5134. return nfserr_grace;
  5135. if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
  5136. status = check_special_stateids(net, fhp, stateid, flags);
  5137. goto done;
  5138. }
  5139. status = nfsd4_lookup_stateid(cstate, stateid,
  5140. NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
  5141. &s, nn);
  5142. if (status == nfserr_bad_stateid)
  5143. status = find_cpntf_state(nn, stateid, &s);
  5144. if (status)
  5145. return status;
  5146. status = nfsd4_stid_check_stateid_generation(stateid, s,
  5147. nfsd4_has_session(cstate));
  5148. if (status)
  5149. goto out;
  5150. switch (s->sc_type) {
  5151. case NFS4_DELEG_STID:
  5152. status = nfs4_check_delegmode(delegstateid(s), flags);
  5153. break;
  5154. case NFS4_OPEN_STID:
  5155. case NFS4_LOCK_STID:
  5156. status = nfs4_check_olstateid(openlockstateid(s), flags);
  5157. break;
  5158. default:
  5159. status = nfserr_bad_stateid;
  5160. break;
  5161. }
  5162. if (status)
  5163. goto out;
  5164. status = nfs4_check_fh(fhp, s);
  5165. done:
  5166. if (status == nfs_ok && nfp)
  5167. status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
  5168. out:
  5169. if (s) {
  5170. if (!status && cstid)
  5171. *cstid = s;
  5172. else
  5173. nfs4_put_stid(s);
  5174. }
  5175. return status;
  5176. }
  5177. /*
  5178. * Test if the stateid is valid
  5179. */
  5180. __be32
  5181. nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  5182. union nfsd4_op_u *u)
  5183. {
  5184. struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
  5185. struct nfsd4_test_stateid_id *stateid;
  5186. struct nfs4_client *cl = cstate->session->se_client;
  5187. list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
  5188. stateid->ts_id_status =
  5189. nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
  5190. return nfs_ok;
  5191. }
  5192. static __be32
  5193. nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
  5194. {
  5195. struct nfs4_ol_stateid *stp = openlockstateid(s);
  5196. __be32 ret;
  5197. ret = nfsd4_lock_ol_stateid(stp);
  5198. if (ret)
  5199. goto out_put_stid;
  5200. ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
  5201. if (ret)
  5202. goto out;
  5203. ret = nfserr_locks_held;
  5204. if (check_for_locks(stp->st_stid.sc_file,
  5205. lockowner(stp->st_stateowner)))
  5206. goto out;
  5207. release_lock_stateid(stp);
  5208. ret = nfs_ok;
  5209. out:
  5210. mutex_unlock(&stp->st_mutex);
  5211. out_put_stid:
  5212. nfs4_put_stid(s);
  5213. return ret;
  5214. }
  5215. __be32
  5216. nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  5217. union nfsd4_op_u *u)
  5218. {
  5219. struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
  5220. stateid_t *stateid = &free_stateid->fr_stateid;
  5221. struct nfs4_stid *s;
  5222. struct nfs4_delegation *dp;
  5223. struct nfs4_client *cl = cstate->session->se_client;
  5224. __be32 ret = nfserr_bad_stateid;
  5225. spin_lock(&cl->cl_lock);
  5226. s = find_stateid_locked(cl, stateid);
  5227. if (!s)
  5228. goto out_unlock;
  5229. spin_lock(&s->sc_lock);
  5230. switch (s->sc_type) {
  5231. case NFS4_DELEG_STID:
  5232. ret = nfserr_locks_held;
  5233. break;
  5234. case NFS4_OPEN_STID:
  5235. ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
  5236. if (ret)
  5237. break;
  5238. ret = nfserr_locks_held;
  5239. break;
  5240. case NFS4_LOCK_STID:
  5241. spin_unlock(&s->sc_lock);
  5242. refcount_inc(&s->sc_count);
  5243. spin_unlock(&cl->cl_lock);
  5244. ret = nfsd4_free_lock_stateid(stateid, s);
  5245. goto out;
  5246. case NFS4_REVOKED_DELEG_STID:
  5247. spin_unlock(&s->sc_lock);
  5248. dp = delegstateid(s);
  5249. list_del_init(&dp->dl_recall_lru);
  5250. spin_unlock(&cl->cl_lock);
  5251. nfs4_put_stid(s);
  5252. ret = nfs_ok;
  5253. goto out;
  5254. /* Default falls through and returns nfserr_bad_stateid */
  5255. }
  5256. spin_unlock(&s->sc_lock);
  5257. out_unlock:
  5258. spin_unlock(&cl->cl_lock);
  5259. out:
  5260. return ret;
  5261. }
  5262. static inline int
  5263. setlkflg (int type)
  5264. {
  5265. return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
  5266. RD_STATE : WR_STATE;
  5267. }
  5268. static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
  5269. {
  5270. struct svc_fh *current_fh = &cstate->current_fh;
  5271. struct nfs4_stateowner *sop = stp->st_stateowner;
  5272. __be32 status;
  5273. status = nfsd4_check_seqid(cstate, sop, seqid);
  5274. if (status)
  5275. return status;
  5276. status = nfsd4_lock_ol_stateid(stp);
  5277. if (status != nfs_ok)
  5278. return status;
  5279. status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
  5280. if (status == nfs_ok)
  5281. status = nfs4_check_fh(current_fh, &stp->st_stid);
  5282. if (status != nfs_ok)
  5283. mutex_unlock(&stp->st_mutex);
  5284. return status;
  5285. }
  5286. /*
  5287. * Checks for sequence id mutating operations.
  5288. */
  5289. static __be32
  5290. nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
  5291. stateid_t *stateid, char typemask,
  5292. struct nfs4_ol_stateid **stpp,
  5293. struct nfsd_net *nn)
  5294. {
  5295. __be32 status;
  5296. struct nfs4_stid *s;
  5297. struct nfs4_ol_stateid *stp = NULL;
  5298. trace_nfsd_preprocess(seqid, stateid);
  5299. *stpp = NULL;
  5300. status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
  5301. if (status)
  5302. return status;
  5303. stp = openlockstateid(s);
  5304. nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
  5305. status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
  5306. if (!status)
  5307. *stpp = stp;
  5308. else
  5309. nfs4_put_stid(&stp->st_stid);
  5310. return status;
  5311. }
  5312. static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
  5313. stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
  5314. {
  5315. __be32 status;
  5316. struct nfs4_openowner *oo;
  5317. struct nfs4_ol_stateid *stp;
  5318. status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
  5319. NFS4_OPEN_STID, &stp, nn);
  5320. if (status)
  5321. return status;
  5322. oo = openowner(stp->st_stateowner);
  5323. if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
  5324. mutex_unlock(&stp->st_mutex);
  5325. nfs4_put_stid(&stp->st_stid);
  5326. return nfserr_bad_stateid;
  5327. }
  5328. *stpp = stp;
  5329. return nfs_ok;
  5330. }
  5331. __be32
  5332. nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  5333. union nfsd4_op_u *u)
  5334. {
  5335. struct nfsd4_open_confirm *oc = &u->open_confirm;
  5336. __be32 status;
  5337. struct nfs4_openowner *oo;
  5338. struct nfs4_ol_stateid *stp;
  5339. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  5340. dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
  5341. cstate->current_fh.fh_dentry);
  5342. status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
  5343. if (status)
  5344. return status;
  5345. status = nfs4_preprocess_seqid_op(cstate,
  5346. oc->oc_seqid, &oc->oc_req_stateid,
  5347. NFS4_OPEN_STID, &stp, nn);
  5348. if (status)
  5349. goto out;
  5350. oo = openowner(stp->st_stateowner);
  5351. status = nfserr_bad_stateid;
  5352. if (oo->oo_flags & NFS4_OO_CONFIRMED) {
  5353. mutex_unlock(&stp->st_mutex);
  5354. goto put_stateid;
  5355. }
  5356. oo->oo_flags |= NFS4_OO_CONFIRMED;
  5357. nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
  5358. mutex_unlock(&stp->st_mutex);
  5359. trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
  5360. nfsd4_client_record_create(oo->oo_owner.so_client);
  5361. status = nfs_ok;
  5362. put_stateid:
  5363. nfs4_put_stid(&stp->st_stid);
  5364. out:
  5365. nfsd4_bump_seqid(cstate, status);
  5366. return status;
  5367. }
  5368. static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
  5369. {
  5370. if (!test_access(access, stp))
  5371. return;
  5372. nfs4_file_put_access(stp->st_stid.sc_file, access);
  5373. clear_access(access, stp);
  5374. }
  5375. static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
  5376. {
  5377. switch (to_access) {
  5378. case NFS4_SHARE_ACCESS_READ:
  5379. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
  5380. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
  5381. break;
  5382. case NFS4_SHARE_ACCESS_WRITE:
  5383. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
  5384. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
  5385. break;
  5386. case NFS4_SHARE_ACCESS_BOTH:
  5387. break;
  5388. default:
  5389. WARN_ON_ONCE(1);
  5390. }
  5391. }
  5392. __be32
  5393. nfsd4_open_downgrade(struct svc_rqst *rqstp,
  5394. struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
  5395. {
  5396. struct nfsd4_open_downgrade *od = &u->open_downgrade;
  5397. __be32 status;
  5398. struct nfs4_ol_stateid *stp;
  5399. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  5400. dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
  5401. cstate->current_fh.fh_dentry);
  5402. /* We don't yet support WANT bits: */
  5403. if (od->od_deleg_want)
  5404. dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
  5405. od->od_deleg_want);
  5406. status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
  5407. &od->od_stateid, &stp, nn);
  5408. if (status)
  5409. goto out;
  5410. status = nfserr_inval;
  5411. if (!test_access(od->od_share_access, stp)) {
  5412. dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
  5413. stp->st_access_bmap, od->od_share_access);
  5414. goto put_stateid;
  5415. }
  5416. if (!test_deny(od->od_share_deny, stp)) {
  5417. dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
  5418. stp->st_deny_bmap, od->od_share_deny);
  5419. goto put_stateid;
  5420. }
  5421. nfs4_stateid_downgrade(stp, od->od_share_access);
  5422. reset_union_bmap_deny(od->od_share_deny, stp);
  5423. nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
  5424. status = nfs_ok;
  5425. put_stateid:
  5426. mutex_unlock(&stp->st_mutex);
  5427. nfs4_put_stid(&stp->st_stid);
  5428. out:
  5429. nfsd4_bump_seqid(cstate, status);
  5430. return status;
  5431. }
  5432. static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
  5433. {
  5434. struct nfs4_client *clp = s->st_stid.sc_client;
  5435. bool unhashed;
  5436. LIST_HEAD(reaplist);
  5437. spin_lock(&clp->cl_lock);
  5438. unhashed = unhash_open_stateid(s, &reaplist);
  5439. if (clp->cl_minorversion) {
  5440. if (unhashed)
  5441. put_ol_stateid_locked(s, &reaplist);
  5442. spin_unlock(&clp->cl_lock);
  5443. free_ol_stateid_reaplist(&reaplist);
  5444. } else {
  5445. spin_unlock(&clp->cl_lock);
  5446. free_ol_stateid_reaplist(&reaplist);
  5447. if (unhashed)
  5448. move_to_close_lru(s, clp->net);
  5449. }
  5450. }
  5451. /*
  5452. * nfs4_unlock_state() called after encode
  5453. */
  5454. __be32
  5455. nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  5456. union nfsd4_op_u *u)
  5457. {
  5458. struct nfsd4_close *close = &u->close;
  5459. __be32 status;
  5460. struct nfs4_ol_stateid *stp;
  5461. struct net *net = SVC_NET(rqstp);
  5462. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5463. dprintk("NFSD: nfsd4_close on file %pd\n",
  5464. cstate->current_fh.fh_dentry);
  5465. status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
  5466. &close->cl_stateid,
  5467. NFS4_OPEN_STID|NFS4_CLOSED_STID,
  5468. &stp, nn);
  5469. nfsd4_bump_seqid(cstate, status);
  5470. if (status)
  5471. goto out;
  5472. stp->st_stid.sc_type = NFS4_CLOSED_STID;
  5473. /*
  5474. * Technically we don't _really_ have to increment or copy it, since
  5475. * it should just be gone after this operation and we clobber the
  5476. * copied value below, but we continue to do so here just to ensure
  5477. * that racing ops see that there was a state change.
  5478. */
  5479. nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
  5480. nfsd4_close_open_stateid(stp);
  5481. mutex_unlock(&stp->st_mutex);
  5482. /* v4.1+ suggests that we send a special stateid in here, since the
  5483. * clients should just ignore this anyway. Since this is not useful
  5484. * for v4.0 clients either, we set it to the special close_stateid
  5485. * universally.
  5486. *
  5487. * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
  5488. */
  5489. memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
  5490. /* put reference from nfs4_preprocess_seqid_op */
  5491. nfs4_put_stid(&stp->st_stid);
  5492. out:
  5493. return status;
  5494. }
  5495. __be32
  5496. nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  5497. union nfsd4_op_u *u)
  5498. {
  5499. struct nfsd4_delegreturn *dr = &u->delegreturn;
  5500. struct nfs4_delegation *dp;
  5501. stateid_t *stateid = &dr->dr_stateid;
  5502. struct nfs4_stid *s;
  5503. __be32 status;
  5504. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  5505. if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
  5506. return status;
  5507. status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
  5508. if (status)
  5509. goto out;
  5510. dp = delegstateid(s);
  5511. status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
  5512. if (status)
  5513. goto put_stateid;
  5514. destroy_delegation(dp);
  5515. put_stateid:
  5516. nfs4_put_stid(&dp->dl_stid);
  5517. out:
  5518. return status;
  5519. }
  5520. static inline u64
  5521. end_offset(u64 start, u64 len)
  5522. {
  5523. u64 end;
  5524. end = start + len;
  5525. return end >= start ? end: NFS4_MAX_UINT64;
  5526. }
  5527. /* last octet in a range */
  5528. static inline u64
  5529. last_byte_offset(u64 start, u64 len)
  5530. {
  5531. u64 end;
  5532. WARN_ON_ONCE(!len);
  5533. end = start + len;
  5534. return end > start ? end - 1: NFS4_MAX_UINT64;
  5535. }
  5536. /*
  5537. * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
  5538. * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
  5539. * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
  5540. * locking, this prevents us from being completely protocol-compliant. The
  5541. * real solution to this problem is to start using unsigned file offsets in
  5542. * the VFS, but this is a very deep change!
  5543. */
  5544. static inline void
  5545. nfs4_transform_lock_offset(struct file_lock *lock)
  5546. {
  5547. if (lock->fl_start < 0)
  5548. lock->fl_start = OFFSET_MAX;
  5549. if (lock->fl_end < 0)
  5550. lock->fl_end = OFFSET_MAX;
  5551. }
  5552. static fl_owner_t
  5553. nfsd4_fl_get_owner(fl_owner_t owner)
  5554. {
  5555. struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
  5556. nfs4_get_stateowner(&lo->lo_owner);
  5557. return owner;
  5558. }
  5559. static void
  5560. nfsd4_fl_put_owner(fl_owner_t owner)
  5561. {
  5562. struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
  5563. if (lo)
  5564. nfs4_put_stateowner(&lo->lo_owner);
  5565. }
  5566. static void
  5567. nfsd4_lm_notify(struct file_lock *fl)
  5568. {
  5569. struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
  5570. struct net *net = lo->lo_owner.so_client->net;
  5571. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5572. struct nfsd4_blocked_lock *nbl = container_of(fl,
  5573. struct nfsd4_blocked_lock, nbl_lock);
  5574. bool queue = false;
  5575. /* An empty list means that something else is going to be using it */
  5576. spin_lock(&nn->blocked_locks_lock);
  5577. if (!list_empty(&nbl->nbl_list)) {
  5578. list_del_init(&nbl->nbl_list);
  5579. list_del_init(&nbl->nbl_lru);
  5580. queue = true;
  5581. }
  5582. spin_unlock(&nn->blocked_locks_lock);
  5583. if (queue)
  5584. nfsd4_run_cb(&nbl->nbl_cb);
  5585. }
  5586. static const struct lock_manager_operations nfsd_posix_mng_ops = {
  5587. .lm_notify = nfsd4_lm_notify,
  5588. .lm_get_owner = nfsd4_fl_get_owner,
  5589. .lm_put_owner = nfsd4_fl_put_owner,
  5590. };
  5591. static inline void
  5592. nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
  5593. {
  5594. struct nfs4_lockowner *lo;
  5595. if (fl->fl_lmops == &nfsd_posix_mng_ops) {
  5596. lo = (struct nfs4_lockowner *) fl->fl_owner;
  5597. xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
  5598. GFP_KERNEL);
  5599. if (!deny->ld_owner.data)
  5600. /* We just don't care that much */
  5601. goto nevermind;
  5602. deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
  5603. } else {
  5604. nevermind:
  5605. deny->ld_owner.len = 0;
  5606. deny->ld_owner.data = NULL;
  5607. deny->ld_clientid.cl_boot = 0;
  5608. deny->ld_clientid.cl_id = 0;
  5609. }
  5610. deny->ld_start = fl->fl_start;
  5611. deny->ld_length = NFS4_MAX_UINT64;
  5612. if (fl->fl_end != NFS4_MAX_UINT64)
  5613. deny->ld_length = fl->fl_end - fl->fl_start + 1;
  5614. deny->ld_type = NFS4_READ_LT;
  5615. if (fl->fl_type != F_RDLCK)
  5616. deny->ld_type = NFS4_WRITE_LT;
  5617. }
  5618. static struct nfs4_lockowner *
  5619. find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
  5620. {
  5621. unsigned int strhashval = ownerstr_hashval(owner);
  5622. struct nfs4_stateowner *so;
  5623. lockdep_assert_held(&clp->cl_lock);
  5624. list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
  5625. so_strhash) {
  5626. if (so->so_is_open_owner)
  5627. continue;
  5628. if (same_owner_str(so, owner))
  5629. return lockowner(nfs4_get_stateowner(so));
  5630. }
  5631. return NULL;
  5632. }
  5633. static struct nfs4_lockowner *
  5634. find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
  5635. {
  5636. struct nfs4_lockowner *lo;
  5637. spin_lock(&clp->cl_lock);
  5638. lo = find_lockowner_str_locked(clp, owner);
  5639. spin_unlock(&clp->cl_lock);
  5640. return lo;
  5641. }
  5642. static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
  5643. {
  5644. unhash_lockowner_locked(lockowner(sop));
  5645. }
  5646. static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
  5647. {
  5648. struct nfs4_lockowner *lo = lockowner(sop);
  5649. kmem_cache_free(lockowner_slab, lo);
  5650. }
  5651. static const struct nfs4_stateowner_operations lockowner_ops = {
  5652. .so_unhash = nfs4_unhash_lockowner,
  5653. .so_free = nfs4_free_lockowner,
  5654. };
  5655. /*
  5656. * Alloc a lock owner structure.
  5657. * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
  5658. * occurred.
  5659. *
  5660. * strhashval = ownerstr_hashval
  5661. */
  5662. static struct nfs4_lockowner *
  5663. alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
  5664. struct nfs4_ol_stateid *open_stp,
  5665. struct nfsd4_lock *lock)
  5666. {
  5667. struct nfs4_lockowner *lo, *ret;
  5668. lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
  5669. if (!lo)
  5670. return NULL;
  5671. INIT_LIST_HEAD(&lo->lo_blocked);
  5672. INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
  5673. lo->lo_owner.so_is_open_owner = 0;
  5674. lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
  5675. lo->lo_owner.so_ops = &lockowner_ops;
  5676. spin_lock(&clp->cl_lock);
  5677. ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
  5678. if (ret == NULL) {
  5679. list_add(&lo->lo_owner.so_strhash,
  5680. &clp->cl_ownerstr_hashtbl[strhashval]);
  5681. ret = lo;
  5682. } else
  5683. nfs4_free_stateowner(&lo->lo_owner);
  5684. spin_unlock(&clp->cl_lock);
  5685. return ret;
  5686. }
  5687. static struct nfs4_ol_stateid *
  5688. find_lock_stateid(const struct nfs4_lockowner *lo,
  5689. const struct nfs4_ol_stateid *ost)
  5690. {
  5691. struct nfs4_ol_stateid *lst;
  5692. lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
  5693. /* If ost is not hashed, ost->st_locks will not be valid */
  5694. if (!nfs4_ol_stateid_unhashed(ost))
  5695. list_for_each_entry(lst, &ost->st_locks, st_locks) {
  5696. if (lst->st_stateowner == &lo->lo_owner) {
  5697. refcount_inc(&lst->st_stid.sc_count);
  5698. return lst;
  5699. }
  5700. }
  5701. return NULL;
  5702. }
  5703. static struct nfs4_ol_stateid *
  5704. init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
  5705. struct nfs4_file *fp, struct inode *inode,
  5706. struct nfs4_ol_stateid *open_stp)
  5707. {
  5708. struct nfs4_client *clp = lo->lo_owner.so_client;
  5709. struct nfs4_ol_stateid *retstp;
  5710. mutex_init(&stp->st_mutex);
  5711. mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
  5712. retry:
  5713. spin_lock(&clp->cl_lock);
  5714. if (nfs4_ol_stateid_unhashed(open_stp))
  5715. goto out_close;
  5716. retstp = find_lock_stateid(lo, open_stp);
  5717. if (retstp)
  5718. goto out_found;
  5719. refcount_inc(&stp->st_stid.sc_count);
  5720. stp->st_stid.sc_type = NFS4_LOCK_STID;
  5721. stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
  5722. get_nfs4_file(fp);
  5723. stp->st_stid.sc_file = fp;
  5724. stp->st_access_bmap = 0;
  5725. stp->st_deny_bmap = open_stp->st_deny_bmap;
  5726. stp->st_openstp = open_stp;
  5727. spin_lock(&fp->fi_lock);
  5728. list_add(&stp->st_locks, &open_stp->st_locks);
  5729. list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
  5730. list_add(&stp->st_perfile, &fp->fi_stateids);
  5731. spin_unlock(&fp->fi_lock);
  5732. spin_unlock(&clp->cl_lock);
  5733. return stp;
  5734. out_found:
  5735. spin_unlock(&clp->cl_lock);
  5736. if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
  5737. nfs4_put_stid(&retstp->st_stid);
  5738. goto retry;
  5739. }
  5740. /* To keep mutex tracking happy */
  5741. mutex_unlock(&stp->st_mutex);
  5742. return retstp;
  5743. out_close:
  5744. spin_unlock(&clp->cl_lock);
  5745. mutex_unlock(&stp->st_mutex);
  5746. return NULL;
  5747. }
  5748. static struct nfs4_ol_stateid *
  5749. find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
  5750. struct inode *inode, struct nfs4_ol_stateid *ost,
  5751. bool *new)
  5752. {
  5753. struct nfs4_stid *ns = NULL;
  5754. struct nfs4_ol_stateid *lst;
  5755. struct nfs4_openowner *oo = openowner(ost->st_stateowner);
  5756. struct nfs4_client *clp = oo->oo_owner.so_client;
  5757. *new = false;
  5758. spin_lock(&clp->cl_lock);
  5759. lst = find_lock_stateid(lo, ost);
  5760. spin_unlock(&clp->cl_lock);
  5761. if (lst != NULL) {
  5762. if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
  5763. goto out;
  5764. nfs4_put_stid(&lst->st_stid);
  5765. }
  5766. ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
  5767. if (ns == NULL)
  5768. return NULL;
  5769. lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
  5770. if (lst == openlockstateid(ns))
  5771. *new = true;
  5772. else
  5773. nfs4_put_stid(ns);
  5774. out:
  5775. return lst;
  5776. }
  5777. static int
  5778. check_lock_length(u64 offset, u64 length)
  5779. {
  5780. return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
  5781. (length > ~offset)));
  5782. }
  5783. static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
  5784. {
  5785. struct nfs4_file *fp = lock_stp->st_stid.sc_file;
  5786. lockdep_assert_held(&fp->fi_lock);
  5787. if (test_access(access, lock_stp))
  5788. return;
  5789. __nfs4_file_get_access(fp, access);
  5790. set_access(access, lock_stp);
  5791. }
  5792. static __be32
  5793. lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
  5794. struct nfs4_ol_stateid *ost,
  5795. struct nfsd4_lock *lock,
  5796. struct nfs4_ol_stateid **plst, bool *new)
  5797. {
  5798. __be32 status;
  5799. struct nfs4_file *fi = ost->st_stid.sc_file;
  5800. struct nfs4_openowner *oo = openowner(ost->st_stateowner);
  5801. struct nfs4_client *cl = oo->oo_owner.so_client;
  5802. struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
  5803. struct nfs4_lockowner *lo;
  5804. struct nfs4_ol_stateid *lst;
  5805. unsigned int strhashval;
  5806. lo = find_lockowner_str(cl, &lock->lk_new_owner);
  5807. if (!lo) {
  5808. strhashval = ownerstr_hashval(&lock->lk_new_owner);
  5809. lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
  5810. if (lo == NULL)
  5811. return nfserr_jukebox;
  5812. } else {
  5813. /* with an existing lockowner, seqids must be the same */
  5814. status = nfserr_bad_seqid;
  5815. if (!cstate->minorversion &&
  5816. lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
  5817. goto out;
  5818. }
  5819. lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
  5820. if (lst == NULL) {
  5821. status = nfserr_jukebox;
  5822. goto out;
  5823. }
  5824. status = nfs_ok;
  5825. *plst = lst;
  5826. out:
  5827. nfs4_put_stateowner(&lo->lo_owner);
  5828. return status;
  5829. }
  5830. /*
  5831. * LOCK operation
  5832. */
  5833. __be32
  5834. nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  5835. union nfsd4_op_u *u)
  5836. {
  5837. struct nfsd4_lock *lock = &u->lock;
  5838. struct nfs4_openowner *open_sop = NULL;
  5839. struct nfs4_lockowner *lock_sop = NULL;
  5840. struct nfs4_ol_stateid *lock_stp = NULL;
  5841. struct nfs4_ol_stateid *open_stp = NULL;
  5842. struct nfs4_file *fp;
  5843. struct nfsd_file *nf = NULL;
  5844. struct nfsd4_blocked_lock *nbl = NULL;
  5845. struct file_lock *file_lock = NULL;
  5846. struct file_lock *conflock = NULL;
  5847. __be32 status = 0;
  5848. int lkflg;
  5849. int err;
  5850. bool new = false;
  5851. unsigned char fl_type;
  5852. unsigned int fl_flags = FL_POSIX;
  5853. struct net *net = SVC_NET(rqstp);
  5854. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5855. dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
  5856. (long long) lock->lk_offset,
  5857. (long long) lock->lk_length);
  5858. if (check_lock_length(lock->lk_offset, lock->lk_length))
  5859. return nfserr_inval;
  5860. if ((status = fh_verify(rqstp, &cstate->current_fh,
  5861. S_IFREG, NFSD_MAY_LOCK))) {
  5862. dprintk("NFSD: nfsd4_lock: permission denied!\n");
  5863. return status;
  5864. }
  5865. if (lock->lk_is_new) {
  5866. if (nfsd4_has_session(cstate))
  5867. /* See rfc 5661 18.10.3: given clientid is ignored: */
  5868. memcpy(&lock->lk_new_clientid,
  5869. &cstate->session->se_client->cl_clientid,
  5870. sizeof(clientid_t));
  5871. status = nfserr_stale_clientid;
  5872. if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
  5873. goto out;
  5874. /* validate and update open stateid and open seqid */
  5875. status = nfs4_preprocess_confirmed_seqid_op(cstate,
  5876. lock->lk_new_open_seqid,
  5877. &lock->lk_new_open_stateid,
  5878. &open_stp, nn);
  5879. if (status)
  5880. goto out;
  5881. mutex_unlock(&open_stp->st_mutex);
  5882. open_sop = openowner(open_stp->st_stateowner);
  5883. status = nfserr_bad_stateid;
  5884. if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
  5885. &lock->lk_new_clientid))
  5886. goto out;
  5887. status = lookup_or_create_lock_state(cstate, open_stp, lock,
  5888. &lock_stp, &new);
  5889. } else {
  5890. status = nfs4_preprocess_seqid_op(cstate,
  5891. lock->lk_old_lock_seqid,
  5892. &lock->lk_old_lock_stateid,
  5893. NFS4_LOCK_STID, &lock_stp, nn);
  5894. }
  5895. if (status)
  5896. goto out;
  5897. lock_sop = lockowner(lock_stp->st_stateowner);
  5898. lkflg = setlkflg(lock->lk_type);
  5899. status = nfs4_check_openmode(lock_stp, lkflg);
  5900. if (status)
  5901. goto out;
  5902. status = nfserr_grace;
  5903. if (locks_in_grace(net) && !lock->lk_reclaim)
  5904. goto out;
  5905. status = nfserr_no_grace;
  5906. if (!locks_in_grace(net) && lock->lk_reclaim)
  5907. goto out;
  5908. fp = lock_stp->st_stid.sc_file;
  5909. switch (lock->lk_type) {
  5910. case NFS4_READW_LT:
  5911. if (nfsd4_has_session(cstate))
  5912. fl_flags |= FL_SLEEP;
  5913. fallthrough;
  5914. case NFS4_READ_LT:
  5915. spin_lock(&fp->fi_lock);
  5916. nf = find_readable_file_locked(fp);
  5917. if (nf)
  5918. get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
  5919. spin_unlock(&fp->fi_lock);
  5920. fl_type = F_RDLCK;
  5921. break;
  5922. case NFS4_WRITEW_LT:
  5923. if (nfsd4_has_session(cstate))
  5924. fl_flags |= FL_SLEEP;
  5925. fallthrough;
  5926. case NFS4_WRITE_LT:
  5927. spin_lock(&fp->fi_lock);
  5928. nf = find_writeable_file_locked(fp);
  5929. if (nf)
  5930. get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
  5931. spin_unlock(&fp->fi_lock);
  5932. fl_type = F_WRLCK;
  5933. break;
  5934. default:
  5935. status = nfserr_inval;
  5936. goto out;
  5937. }
  5938. if (!nf) {
  5939. status = nfserr_openmode;
  5940. goto out;
  5941. }
  5942. nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
  5943. if (!nbl) {
  5944. dprintk("NFSD: %s: unable to allocate block!\n", __func__);
  5945. status = nfserr_jukebox;
  5946. goto out;
  5947. }
  5948. file_lock = &nbl->nbl_lock;
  5949. file_lock->fl_type = fl_type;
  5950. file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
  5951. file_lock->fl_pid = current->tgid;
  5952. file_lock->fl_file = nf->nf_file;
  5953. file_lock->fl_flags = fl_flags;
  5954. file_lock->fl_lmops = &nfsd_posix_mng_ops;
  5955. file_lock->fl_start = lock->lk_offset;
  5956. file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
  5957. nfs4_transform_lock_offset(file_lock);
  5958. conflock = locks_alloc_lock();
  5959. if (!conflock) {
  5960. dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
  5961. status = nfserr_jukebox;
  5962. goto out;
  5963. }
  5964. if (fl_flags & FL_SLEEP) {
  5965. nbl->nbl_time = ktime_get_boottime_seconds();
  5966. spin_lock(&nn->blocked_locks_lock);
  5967. list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
  5968. list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
  5969. spin_unlock(&nn->blocked_locks_lock);
  5970. }
  5971. err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
  5972. switch (err) {
  5973. case 0: /* success! */
  5974. nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
  5975. status = 0;
  5976. if (lock->lk_reclaim)
  5977. nn->somebody_reclaimed = true;
  5978. break;
  5979. case FILE_LOCK_DEFERRED:
  5980. nbl = NULL;
  5981. fallthrough;
  5982. case -EAGAIN: /* conflock holds conflicting lock */
  5983. status = nfserr_denied;
  5984. dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
  5985. nfs4_set_lock_denied(conflock, &lock->lk_denied);
  5986. break;
  5987. case -EDEADLK:
  5988. status = nfserr_deadlock;
  5989. break;
  5990. default:
  5991. dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
  5992. status = nfserrno(err);
  5993. break;
  5994. }
  5995. out:
  5996. if (nbl) {
  5997. /* dequeue it if we queued it before */
  5998. if (fl_flags & FL_SLEEP) {
  5999. spin_lock(&nn->blocked_locks_lock);
  6000. list_del_init(&nbl->nbl_list);
  6001. list_del_init(&nbl->nbl_lru);
  6002. spin_unlock(&nn->blocked_locks_lock);
  6003. }
  6004. free_blocked_lock(nbl);
  6005. }
  6006. if (nf)
  6007. nfsd_file_put(nf);
  6008. if (lock_stp) {
  6009. /* Bump seqid manually if the 4.0 replay owner is openowner */
  6010. if (cstate->replay_owner &&
  6011. cstate->replay_owner != &lock_sop->lo_owner &&
  6012. seqid_mutating_err(ntohl(status)))
  6013. lock_sop->lo_owner.so_seqid++;
  6014. /*
  6015. * If this is a new, never-before-used stateid, and we are
  6016. * returning an error, then just go ahead and release it.
  6017. */
  6018. if (status && new)
  6019. release_lock_stateid(lock_stp);
  6020. mutex_unlock(&lock_stp->st_mutex);
  6021. nfs4_put_stid(&lock_stp->st_stid);
  6022. }
  6023. if (open_stp)
  6024. nfs4_put_stid(&open_stp->st_stid);
  6025. nfsd4_bump_seqid(cstate, status);
  6026. if (conflock)
  6027. locks_free_lock(conflock);
  6028. return status;
  6029. }
  6030. /*
  6031. * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
  6032. * so we do a temporary open here just to get an open file to pass to
  6033. * vfs_test_lock.
  6034. */
  6035. static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
  6036. {
  6037. struct nfsd_file *nf;
  6038. __be32 err;
  6039. err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
  6040. if (err)
  6041. return err;
  6042. fh_lock(fhp); /* to block new leases till after test_lock: */
  6043. err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
  6044. NFSD_MAY_READ));
  6045. if (err)
  6046. goto out;
  6047. lock->fl_file = nf->nf_file;
  6048. err = nfserrno(vfs_test_lock(nf->nf_file, lock));
  6049. lock->fl_file = NULL;
  6050. out:
  6051. fh_unlock(fhp);
  6052. nfsd_file_put(nf);
  6053. return err;
  6054. }
  6055. /*
  6056. * LOCKT operation
  6057. */
  6058. __be32
  6059. nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  6060. union nfsd4_op_u *u)
  6061. {
  6062. struct nfsd4_lockt *lockt = &u->lockt;
  6063. struct file_lock *file_lock = NULL;
  6064. struct nfs4_lockowner *lo = NULL;
  6065. __be32 status;
  6066. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  6067. if (locks_in_grace(SVC_NET(rqstp)))
  6068. return nfserr_grace;
  6069. if (check_lock_length(lockt->lt_offset, lockt->lt_length))
  6070. return nfserr_inval;
  6071. if (!nfsd4_has_session(cstate)) {
  6072. status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
  6073. false);
  6074. if (status)
  6075. goto out;
  6076. }
  6077. if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
  6078. goto out;
  6079. file_lock = locks_alloc_lock();
  6080. if (!file_lock) {
  6081. dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
  6082. status = nfserr_jukebox;
  6083. goto out;
  6084. }
  6085. switch (lockt->lt_type) {
  6086. case NFS4_READ_LT:
  6087. case NFS4_READW_LT:
  6088. file_lock->fl_type = F_RDLCK;
  6089. break;
  6090. case NFS4_WRITE_LT:
  6091. case NFS4_WRITEW_LT:
  6092. file_lock->fl_type = F_WRLCK;
  6093. break;
  6094. default:
  6095. dprintk("NFSD: nfs4_lockt: bad lock type!\n");
  6096. status = nfserr_inval;
  6097. goto out;
  6098. }
  6099. lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
  6100. if (lo)
  6101. file_lock->fl_owner = (fl_owner_t)lo;
  6102. file_lock->fl_pid = current->tgid;
  6103. file_lock->fl_flags = FL_POSIX;
  6104. file_lock->fl_start = lockt->lt_offset;
  6105. file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
  6106. nfs4_transform_lock_offset(file_lock);
  6107. status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
  6108. if (status)
  6109. goto out;
  6110. if (file_lock->fl_type != F_UNLCK) {
  6111. status = nfserr_denied;
  6112. nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
  6113. }
  6114. out:
  6115. if (lo)
  6116. nfs4_put_stateowner(&lo->lo_owner);
  6117. if (file_lock)
  6118. locks_free_lock(file_lock);
  6119. return status;
  6120. }
  6121. __be32
  6122. nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  6123. union nfsd4_op_u *u)
  6124. {
  6125. struct nfsd4_locku *locku = &u->locku;
  6126. struct nfs4_ol_stateid *stp;
  6127. struct nfsd_file *nf = NULL;
  6128. struct file_lock *file_lock = NULL;
  6129. __be32 status;
  6130. int err;
  6131. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  6132. dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
  6133. (long long) locku->lu_offset,
  6134. (long long) locku->lu_length);
  6135. if (check_lock_length(locku->lu_offset, locku->lu_length))
  6136. return nfserr_inval;
  6137. status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
  6138. &locku->lu_stateid, NFS4_LOCK_STID,
  6139. &stp, nn);
  6140. if (status)
  6141. goto out;
  6142. nf = find_any_file(stp->st_stid.sc_file);
  6143. if (!nf) {
  6144. status = nfserr_lock_range;
  6145. goto put_stateid;
  6146. }
  6147. file_lock = locks_alloc_lock();
  6148. if (!file_lock) {
  6149. dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
  6150. status = nfserr_jukebox;
  6151. goto put_file;
  6152. }
  6153. file_lock->fl_type = F_UNLCK;
  6154. file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
  6155. file_lock->fl_pid = current->tgid;
  6156. file_lock->fl_file = nf->nf_file;
  6157. file_lock->fl_flags = FL_POSIX;
  6158. file_lock->fl_lmops = &nfsd_posix_mng_ops;
  6159. file_lock->fl_start = locku->lu_offset;
  6160. file_lock->fl_end = last_byte_offset(locku->lu_offset,
  6161. locku->lu_length);
  6162. nfs4_transform_lock_offset(file_lock);
  6163. err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
  6164. if (err) {
  6165. dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
  6166. goto out_nfserr;
  6167. }
  6168. nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
  6169. put_file:
  6170. nfsd_file_put(nf);
  6171. put_stateid:
  6172. mutex_unlock(&stp->st_mutex);
  6173. nfs4_put_stid(&stp->st_stid);
  6174. out:
  6175. nfsd4_bump_seqid(cstate, status);
  6176. if (file_lock)
  6177. locks_free_lock(file_lock);
  6178. return status;
  6179. out_nfserr:
  6180. status = nfserrno(err);
  6181. goto put_file;
  6182. }
  6183. /*
  6184. * returns
  6185. * true: locks held by lockowner
  6186. * false: no locks held by lockowner
  6187. */
  6188. static bool
  6189. check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
  6190. {
  6191. struct file_lock *fl;
  6192. int status = false;
  6193. struct nfsd_file *nf = find_any_file(fp);
  6194. struct inode *inode;
  6195. struct file_lock_context *flctx;
  6196. if (!nf) {
  6197. /* Any valid lock stateid should have some sort of access */
  6198. WARN_ON_ONCE(1);
  6199. return status;
  6200. }
  6201. inode = locks_inode(nf->nf_file);
  6202. flctx = inode->i_flctx;
  6203. if (flctx && !list_empty_careful(&flctx->flc_posix)) {
  6204. spin_lock(&flctx->flc_lock);
  6205. list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
  6206. if (fl->fl_owner == (fl_owner_t)lowner) {
  6207. status = true;
  6208. break;
  6209. }
  6210. }
  6211. spin_unlock(&flctx->flc_lock);
  6212. }
  6213. nfsd_file_put(nf);
  6214. return status;
  6215. }
  6216. __be32
  6217. nfsd4_release_lockowner(struct svc_rqst *rqstp,
  6218. struct nfsd4_compound_state *cstate,
  6219. union nfsd4_op_u *u)
  6220. {
  6221. struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
  6222. clientid_t *clid = &rlockowner->rl_clientid;
  6223. struct nfs4_stateowner *sop;
  6224. struct nfs4_lockowner *lo = NULL;
  6225. struct nfs4_ol_stateid *stp;
  6226. struct xdr_netobj *owner = &rlockowner->rl_owner;
  6227. unsigned int hashval = ownerstr_hashval(owner);
  6228. __be32 status;
  6229. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  6230. struct nfs4_client *clp;
  6231. LIST_HEAD (reaplist);
  6232. dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
  6233. clid->cl_boot, clid->cl_id);
  6234. status = lookup_clientid(clid, cstate, nn, false);
  6235. if (status)
  6236. return status;
  6237. clp = cstate->clp;
  6238. /* Find the matching lock stateowner */
  6239. spin_lock(&clp->cl_lock);
  6240. list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
  6241. so_strhash) {
  6242. if (sop->so_is_open_owner || !same_owner_str(sop, owner))
  6243. continue;
  6244. /* see if there are still any locks associated with it */
  6245. lo = lockowner(sop);
  6246. list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
  6247. if (check_for_locks(stp->st_stid.sc_file, lo)) {
  6248. status = nfserr_locks_held;
  6249. spin_unlock(&clp->cl_lock);
  6250. return status;
  6251. }
  6252. }
  6253. nfs4_get_stateowner(sop);
  6254. break;
  6255. }
  6256. if (!lo) {
  6257. spin_unlock(&clp->cl_lock);
  6258. return status;
  6259. }
  6260. unhash_lockowner_locked(lo);
  6261. while (!list_empty(&lo->lo_owner.so_stateids)) {
  6262. stp = list_first_entry(&lo->lo_owner.so_stateids,
  6263. struct nfs4_ol_stateid,
  6264. st_perstateowner);
  6265. WARN_ON(!unhash_lock_stateid(stp));
  6266. put_ol_stateid_locked(stp, &reaplist);
  6267. }
  6268. spin_unlock(&clp->cl_lock);
  6269. free_ol_stateid_reaplist(&reaplist);
  6270. remove_blocked_locks(lo);
  6271. nfs4_put_stateowner(&lo->lo_owner);
  6272. return status;
  6273. }
  6274. static inline struct nfs4_client_reclaim *
  6275. alloc_reclaim(void)
  6276. {
  6277. return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
  6278. }
  6279. bool
  6280. nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
  6281. {
  6282. struct nfs4_client_reclaim *crp;
  6283. crp = nfsd4_find_reclaim_client(name, nn);
  6284. return (crp && crp->cr_clp);
  6285. }
  6286. /*
  6287. * failure => all reset bets are off, nfserr_no_grace...
  6288. *
  6289. * The caller is responsible for freeing name.data if NULL is returned (it
  6290. * will be freed in nfs4_remove_reclaim_record in the normal case).
  6291. */
  6292. struct nfs4_client_reclaim *
  6293. nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
  6294. struct nfsd_net *nn)
  6295. {
  6296. unsigned int strhashval;
  6297. struct nfs4_client_reclaim *crp;
  6298. crp = alloc_reclaim();
  6299. if (crp) {
  6300. strhashval = clientstr_hashval(name);
  6301. INIT_LIST_HEAD(&crp->cr_strhash);
  6302. list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
  6303. crp->cr_name.data = name.data;
  6304. crp->cr_name.len = name.len;
  6305. crp->cr_princhash.data = princhash.data;
  6306. crp->cr_princhash.len = princhash.len;
  6307. crp->cr_clp = NULL;
  6308. nn->reclaim_str_hashtbl_size++;
  6309. }
  6310. return crp;
  6311. }
  6312. void
  6313. nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
  6314. {
  6315. list_del(&crp->cr_strhash);
  6316. kfree(crp->cr_name.data);
  6317. kfree(crp->cr_princhash.data);
  6318. kfree(crp);
  6319. nn->reclaim_str_hashtbl_size--;
  6320. }
  6321. void
  6322. nfs4_release_reclaim(struct nfsd_net *nn)
  6323. {
  6324. struct nfs4_client_reclaim *crp = NULL;
  6325. int i;
  6326. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  6327. while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
  6328. crp = list_entry(nn->reclaim_str_hashtbl[i].next,
  6329. struct nfs4_client_reclaim, cr_strhash);
  6330. nfs4_remove_reclaim_record(crp, nn);
  6331. }
  6332. }
  6333. WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
  6334. }
  6335. /*
  6336. * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
  6337. struct nfs4_client_reclaim *
  6338. nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
  6339. {
  6340. unsigned int strhashval;
  6341. struct nfs4_client_reclaim *crp = NULL;
  6342. strhashval = clientstr_hashval(name);
  6343. list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
  6344. if (compare_blob(&crp->cr_name, &name) == 0) {
  6345. return crp;
  6346. }
  6347. }
  6348. return NULL;
  6349. }
  6350. /*
  6351. * Called from OPEN. Look for clientid in reclaim list.
  6352. */
  6353. __be32
  6354. nfs4_check_open_reclaim(clientid_t *clid,
  6355. struct nfsd4_compound_state *cstate,
  6356. struct nfsd_net *nn)
  6357. {
  6358. __be32 status;
  6359. /* find clientid in conf_id_hashtbl */
  6360. status = lookup_clientid(clid, cstate, nn, false);
  6361. if (status)
  6362. return nfserr_reclaim_bad;
  6363. if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
  6364. return nfserr_no_grace;
  6365. if (nfsd4_client_record_check(cstate->clp))
  6366. return nfserr_reclaim_bad;
  6367. return nfs_ok;
  6368. }
  6369. /*
  6370. * Since the lifetime of a delegation isn't limited to that of an open, a
  6371. * client may quite reasonably hang on to a delegation as long as it has
  6372. * the inode cached. This becomes an obvious problem the first time a
  6373. * client's inode cache approaches the size of the server's total memory.
  6374. *
  6375. * For now we avoid this problem by imposing a hard limit on the number
  6376. * of delegations, which varies according to the server's memory size.
  6377. */
  6378. static void
  6379. set_max_delegations(void)
  6380. {
  6381. /*
  6382. * Allow at most 4 delegations per megabyte of RAM. Quick
  6383. * estimates suggest that in the worst case (where every delegation
  6384. * is for a different inode), a delegation could take about 1.5K,
  6385. * giving a worst case usage of about 6% of memory.
  6386. */
  6387. max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
  6388. }
  6389. static int nfs4_state_create_net(struct net *net)
  6390. {
  6391. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  6392. int i;
  6393. nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
  6394. sizeof(struct list_head),
  6395. GFP_KERNEL);
  6396. if (!nn->conf_id_hashtbl)
  6397. goto err;
  6398. nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
  6399. sizeof(struct list_head),
  6400. GFP_KERNEL);
  6401. if (!nn->unconf_id_hashtbl)
  6402. goto err_unconf_id;
  6403. nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
  6404. sizeof(struct list_head),
  6405. GFP_KERNEL);
  6406. if (!nn->sessionid_hashtbl)
  6407. goto err_sessionid;
  6408. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  6409. INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
  6410. INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
  6411. }
  6412. for (i = 0; i < SESSION_HASH_SIZE; i++)
  6413. INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
  6414. nn->conf_name_tree = RB_ROOT;
  6415. nn->unconf_name_tree = RB_ROOT;
  6416. nn->boot_time = ktime_get_real_seconds();
  6417. nn->grace_ended = false;
  6418. nn->nfsd4_manager.block_opens = true;
  6419. INIT_LIST_HEAD(&nn->nfsd4_manager.list);
  6420. INIT_LIST_HEAD(&nn->client_lru);
  6421. INIT_LIST_HEAD(&nn->close_lru);
  6422. INIT_LIST_HEAD(&nn->del_recall_lru);
  6423. spin_lock_init(&nn->client_lock);
  6424. spin_lock_init(&nn->s2s_cp_lock);
  6425. idr_init(&nn->s2s_cp_stateids);
  6426. spin_lock_init(&nn->blocked_locks_lock);
  6427. INIT_LIST_HEAD(&nn->blocked_locks_lru);
  6428. INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
  6429. get_net(net);
  6430. return 0;
  6431. err_sessionid:
  6432. kfree(nn->unconf_id_hashtbl);
  6433. err_unconf_id:
  6434. kfree(nn->conf_id_hashtbl);
  6435. err:
  6436. return -ENOMEM;
  6437. }
  6438. static void
  6439. nfs4_state_destroy_net(struct net *net)
  6440. {
  6441. int i;
  6442. struct nfs4_client *clp = NULL;
  6443. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  6444. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  6445. while (!list_empty(&nn->conf_id_hashtbl[i])) {
  6446. clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
  6447. destroy_client(clp);
  6448. }
  6449. }
  6450. WARN_ON(!list_empty(&nn->blocked_locks_lru));
  6451. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  6452. while (!list_empty(&nn->unconf_id_hashtbl[i])) {
  6453. clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
  6454. destroy_client(clp);
  6455. }
  6456. }
  6457. kfree(nn->sessionid_hashtbl);
  6458. kfree(nn->unconf_id_hashtbl);
  6459. kfree(nn->conf_id_hashtbl);
  6460. put_net(net);
  6461. }
  6462. int
  6463. nfs4_state_start_net(struct net *net)
  6464. {
  6465. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  6466. int ret;
  6467. ret = get_nfsdfs(net);
  6468. if (ret)
  6469. return ret;
  6470. ret = nfs4_state_create_net(net);
  6471. if (ret) {
  6472. mntput(nn->nfsd_mnt);
  6473. return ret;
  6474. }
  6475. locks_start_grace(net, &nn->nfsd4_manager);
  6476. nfsd4_client_tracking_init(net);
  6477. if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
  6478. goto skip_grace;
  6479. printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
  6480. nn->nfsd4_grace, net->ns.inum);
  6481. trace_nfsd_grace_start(nn);
  6482. queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
  6483. return 0;
  6484. skip_grace:
  6485. printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
  6486. net->ns.inum);
  6487. queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
  6488. nfsd4_end_grace(nn);
  6489. return 0;
  6490. }
  6491. /* initialization to perform when the nfsd service is started: */
  6492. int
  6493. nfs4_state_start(void)
  6494. {
  6495. int ret;
  6496. laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
  6497. if (laundry_wq == NULL) {
  6498. ret = -ENOMEM;
  6499. goto out;
  6500. }
  6501. ret = nfsd4_create_callback_queue();
  6502. if (ret)
  6503. goto out_free_laundry;
  6504. set_max_delegations();
  6505. return 0;
  6506. out_free_laundry:
  6507. destroy_workqueue(laundry_wq);
  6508. out:
  6509. return ret;
  6510. }
  6511. void
  6512. nfs4_state_shutdown_net(struct net *net)
  6513. {
  6514. struct nfs4_delegation *dp = NULL;
  6515. struct list_head *pos, *next, reaplist;
  6516. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  6517. cancel_delayed_work_sync(&nn->laundromat_work);
  6518. locks_end_grace(&nn->nfsd4_manager);
  6519. INIT_LIST_HEAD(&reaplist);
  6520. spin_lock(&state_lock);
  6521. list_for_each_safe(pos, next, &nn->del_recall_lru) {
  6522. dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
  6523. WARN_ON(!unhash_delegation_locked(dp));
  6524. list_add(&dp->dl_recall_lru, &reaplist);
  6525. }
  6526. spin_unlock(&state_lock);
  6527. list_for_each_safe(pos, next, &reaplist) {
  6528. dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
  6529. list_del_init(&dp->dl_recall_lru);
  6530. destroy_unhashed_deleg(dp);
  6531. }
  6532. nfsd4_client_tracking_exit(net);
  6533. nfs4_state_destroy_net(net);
  6534. mntput(nn->nfsd_mnt);
  6535. }
  6536. void
  6537. nfs4_state_shutdown(void)
  6538. {
  6539. destroy_workqueue(laundry_wq);
  6540. nfsd4_destroy_callback_queue();
  6541. }
  6542. static void
  6543. get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
  6544. {
  6545. if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
  6546. CURRENT_STATEID(stateid))
  6547. memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
  6548. }
  6549. static void
  6550. put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
  6551. {
  6552. if (cstate->minorversion) {
  6553. memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
  6554. SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
  6555. }
  6556. }
  6557. void
  6558. clear_current_stateid(struct nfsd4_compound_state *cstate)
  6559. {
  6560. CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
  6561. }
  6562. /*
  6563. * functions to set current state id
  6564. */
  6565. void
  6566. nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
  6567. union nfsd4_op_u *u)
  6568. {
  6569. put_stateid(cstate, &u->open_downgrade.od_stateid);
  6570. }
  6571. void
  6572. nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
  6573. union nfsd4_op_u *u)
  6574. {
  6575. put_stateid(cstate, &u->open.op_stateid);
  6576. }
  6577. void
  6578. nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
  6579. union nfsd4_op_u *u)
  6580. {
  6581. put_stateid(cstate, &u->close.cl_stateid);
  6582. }
  6583. void
  6584. nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
  6585. union nfsd4_op_u *u)
  6586. {
  6587. put_stateid(cstate, &u->lock.lk_resp_stateid);
  6588. }
  6589. /*
  6590. * functions to consume current state id
  6591. */
  6592. void
  6593. nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
  6594. union nfsd4_op_u *u)
  6595. {
  6596. get_stateid(cstate, &u->open_downgrade.od_stateid);
  6597. }
  6598. void
  6599. nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
  6600. union nfsd4_op_u *u)
  6601. {
  6602. get_stateid(cstate, &u->delegreturn.dr_stateid);
  6603. }
  6604. void
  6605. nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
  6606. union nfsd4_op_u *u)
  6607. {
  6608. get_stateid(cstate, &u->free_stateid.fr_stateid);
  6609. }
  6610. void
  6611. nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
  6612. union nfsd4_op_u *u)
  6613. {
  6614. get_stateid(cstate, &u->setattr.sa_stateid);
  6615. }
  6616. void
  6617. nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
  6618. union nfsd4_op_u *u)
  6619. {
  6620. get_stateid(cstate, &u->close.cl_stateid);
  6621. }
  6622. void
  6623. nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
  6624. union nfsd4_op_u *u)
  6625. {
  6626. get_stateid(cstate, &u->locku.lu_stateid);
  6627. }
  6628. void
  6629. nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
  6630. union nfsd4_op_u *u)
  6631. {
  6632. get_stateid(cstate, &u->read.rd_stateid);
  6633. }
  6634. void
  6635. nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
  6636. union nfsd4_op_u *u)
  6637. {
  6638. get_stateid(cstate, &u->write.wr_stateid);
  6639. }