bfq-iosched.c 237 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Budget Fair Queueing (BFQ) I/O scheduler.
  4. *
  5. * Based on ideas and code from CFQ:
  6. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  7. *
  8. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  9. * Paolo Valente <paolo.valente@unimore.it>
  10. *
  11. * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
  12. * Arianna Avanzini <avanzini@google.com>
  13. *
  14. * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
  15. *
  16. * BFQ is a proportional-share I/O scheduler, with some extra
  17. * low-latency capabilities. BFQ also supports full hierarchical
  18. * scheduling through cgroups. Next paragraphs provide an introduction
  19. * on BFQ inner workings. Details on BFQ benefits, usage and
  20. * limitations can be found in Documentation/block/bfq-iosched.rst.
  21. *
  22. * BFQ is a proportional-share storage-I/O scheduling algorithm based
  23. * on the slice-by-slice service scheme of CFQ. But BFQ assigns
  24. * budgets, measured in number of sectors, to processes instead of
  25. * time slices. The device is not granted to the in-service process
  26. * for a given time slice, but until it has exhausted its assigned
  27. * budget. This change from the time to the service domain enables BFQ
  28. * to distribute the device throughput among processes as desired,
  29. * without any distortion due to throughput fluctuations, or to device
  30. * internal queueing. BFQ uses an ad hoc internal scheduler, called
  31. * B-WF2Q+, to schedule processes according to their budgets. More
  32. * precisely, BFQ schedules queues associated with processes. Each
  33. * process/queue is assigned a user-configurable weight, and B-WF2Q+
  34. * guarantees that each queue receives a fraction of the throughput
  35. * proportional to its weight. Thanks to the accurate policy of
  36. * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
  37. * processes issuing sequential requests (to boost the throughput),
  38. * and yet guarantee a low latency to interactive and soft real-time
  39. * applications.
  40. *
  41. * In particular, to provide these low-latency guarantees, BFQ
  42. * explicitly privileges the I/O of two classes of time-sensitive
  43. * applications: interactive and soft real-time. In more detail, BFQ
  44. * behaves this way if the low_latency parameter is set (default
  45. * configuration). This feature enables BFQ to provide applications in
  46. * these classes with a very low latency.
  47. *
  48. * To implement this feature, BFQ constantly tries to detect whether
  49. * the I/O requests in a bfq_queue come from an interactive or a soft
  50. * real-time application. For brevity, in these cases, the queue is
  51. * said to be interactive or soft real-time. In both cases, BFQ
  52. * privileges the service of the queue, over that of non-interactive
  53. * and non-soft-real-time queues. This privileging is performed,
  54. * mainly, by raising the weight of the queue. So, for brevity, we
  55. * call just weight-raising periods the time periods during which a
  56. * queue is privileged, because deemed interactive or soft real-time.
  57. *
  58. * The detection of soft real-time queues/applications is described in
  59. * detail in the comments on the function
  60. * bfq_bfqq_softrt_next_start. On the other hand, the detection of an
  61. * interactive queue works as follows: a queue is deemed interactive
  62. * if it is constantly non empty only for a limited time interval,
  63. * after which it does become empty. The queue may be deemed
  64. * interactive again (for a limited time), if it restarts being
  65. * constantly non empty, provided that this happens only after the
  66. * queue has remained empty for a given minimum idle time.
  67. *
  68. * By default, BFQ computes automatically the above maximum time
  69. * interval, i.e., the time interval after which a constantly
  70. * non-empty queue stops being deemed interactive. Since a queue is
  71. * weight-raised while it is deemed interactive, this maximum time
  72. * interval happens to coincide with the (maximum) duration of the
  73. * weight-raising for interactive queues.
  74. *
  75. * Finally, BFQ also features additional heuristics for
  76. * preserving both a low latency and a high throughput on NCQ-capable,
  77. * rotational or flash-based devices, and to get the job done quickly
  78. * for applications consisting in many I/O-bound processes.
  79. *
  80. * NOTE: if the main or only goal, with a given device, is to achieve
  81. * the maximum-possible throughput at all times, then do switch off
  82. * all low-latency heuristics for that device, by setting low_latency
  83. * to 0.
  84. *
  85. * BFQ is described in [1], where also a reference to the initial,
  86. * more theoretical paper on BFQ can be found. The interested reader
  87. * can find in the latter paper full details on the main algorithm, as
  88. * well as formulas of the guarantees and formal proofs of all the
  89. * properties. With respect to the version of BFQ presented in these
  90. * papers, this implementation adds a few more heuristics, such as the
  91. * ones that guarantee a low latency to interactive and soft real-time
  92. * applications, and a hierarchical extension based on H-WF2Q+.
  93. *
  94. * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
  95. * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
  96. * with O(log N) complexity derives from the one introduced with EEVDF
  97. * in [3].
  98. *
  99. * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
  100. * Scheduler", Proceedings of the First Workshop on Mobile System
  101. * Technologies (MST-2015), May 2015.
  102. * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
  103. *
  104. * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
  105. * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
  106. * Oct 1997.
  107. *
  108. * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
  109. *
  110. * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
  111. * First: A Flexible and Accurate Mechanism for Proportional Share
  112. * Resource Allocation", technical report.
  113. *
  114. * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
  115. */
  116. #include <linux/module.h>
  117. #include <linux/slab.h>
  118. #include <linux/blkdev.h>
  119. #include <linux/cgroup.h>
  120. #include <linux/elevator.h>
  121. #include <linux/ktime.h>
  122. #include <linux/rbtree.h>
  123. #include <linux/ioprio.h>
  124. #include <linux/sbitmap.h>
  125. #include <linux/delay.h>
  126. #include <linux/backing-dev.h>
  127. #include "blk.h"
  128. #include "blk-mq.h"
  129. #include "blk-mq-tag.h"
  130. #include "blk-mq-sched.h"
  131. #include "bfq-iosched.h"
  132. #include "blk-wbt.h"
  133. #define BFQ_BFQQ_FNS(name) \
  134. void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
  135. { \
  136. __set_bit(BFQQF_##name, &(bfqq)->flags); \
  137. } \
  138. void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
  139. { \
  140. __clear_bit(BFQQF_##name, &(bfqq)->flags); \
  141. } \
  142. int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
  143. { \
  144. return test_bit(BFQQF_##name, &(bfqq)->flags); \
  145. }
  146. BFQ_BFQQ_FNS(just_created);
  147. BFQ_BFQQ_FNS(busy);
  148. BFQ_BFQQ_FNS(wait_request);
  149. BFQ_BFQQ_FNS(non_blocking_wait_rq);
  150. BFQ_BFQQ_FNS(fifo_expire);
  151. BFQ_BFQQ_FNS(has_short_ttime);
  152. BFQ_BFQQ_FNS(sync);
  153. BFQ_BFQQ_FNS(IO_bound);
  154. BFQ_BFQQ_FNS(in_large_burst);
  155. BFQ_BFQQ_FNS(coop);
  156. BFQ_BFQQ_FNS(split_coop);
  157. BFQ_BFQQ_FNS(softrt_update);
  158. BFQ_BFQQ_FNS(has_waker);
  159. #undef BFQ_BFQQ_FNS \
  160. /* Expiration time of sync (0) and async (1) requests, in ns. */
  161. static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
  162. /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
  163. static const int bfq_back_max = 16 * 1024;
  164. /* Penalty of a backwards seek, in number of sectors. */
  165. static const int bfq_back_penalty = 2;
  166. /* Idling period duration, in ns. */
  167. static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
  168. /* Minimum number of assigned budgets for which stats are safe to compute. */
  169. static const int bfq_stats_min_budgets = 194;
  170. /* Default maximum budget values, in sectors and number of requests. */
  171. static const int bfq_default_max_budget = 16 * 1024;
  172. /*
  173. * When a sync request is dispatched, the queue that contains that
  174. * request, and all the ancestor entities of that queue, are charged
  175. * with the number of sectors of the request. In contrast, if the
  176. * request is async, then the queue and its ancestor entities are
  177. * charged with the number of sectors of the request, multiplied by
  178. * the factor below. This throttles the bandwidth for async I/O,
  179. * w.r.t. to sync I/O, and it is done to counter the tendency of async
  180. * writes to steal I/O throughput to reads.
  181. *
  182. * The current value of this parameter is the result of a tuning with
  183. * several hardware and software configurations. We tried to find the
  184. * lowest value for which writes do not cause noticeable problems to
  185. * reads. In fact, the lower this parameter, the stabler I/O control,
  186. * in the following respect. The lower this parameter is, the less
  187. * the bandwidth enjoyed by a group decreases
  188. * - when the group does writes, w.r.t. to when it does reads;
  189. * - when other groups do reads, w.r.t. to when they do writes.
  190. */
  191. static const int bfq_async_charge_factor = 3;
  192. /* Default timeout values, in jiffies, approximating CFQ defaults. */
  193. const int bfq_timeout = HZ / 8;
  194. /*
  195. * Time limit for merging (see comments in bfq_setup_cooperator). Set
  196. * to the slowest value that, in our tests, proved to be effective in
  197. * removing false positives, while not causing true positives to miss
  198. * queue merging.
  199. *
  200. * As can be deduced from the low time limit below, queue merging, if
  201. * successful, happens at the very beginning of the I/O of the involved
  202. * cooperating processes, as a consequence of the arrival of the very
  203. * first requests from each cooperator. After that, there is very
  204. * little chance to find cooperators.
  205. */
  206. static const unsigned long bfq_merge_time_limit = HZ/10;
  207. static struct kmem_cache *bfq_pool;
  208. /* Below this threshold (in ns), we consider thinktime immediate. */
  209. #define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
  210. /* hw_tag detection: parallel requests threshold and min samples needed. */
  211. #define BFQ_HW_QUEUE_THRESHOLD 3
  212. #define BFQ_HW_QUEUE_SAMPLES 32
  213. #define BFQQ_SEEK_THR (sector_t)(8 * 100)
  214. #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
  215. #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
  216. (get_sdist(last_pos, rq) > \
  217. BFQQ_SEEK_THR && \
  218. (!blk_queue_nonrot(bfqd->queue) || \
  219. blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
  220. #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
  221. #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
  222. /*
  223. * Sync random I/O is likely to be confused with soft real-time I/O,
  224. * because it is characterized by limited throughput and apparently
  225. * isochronous arrival pattern. To avoid false positives, queues
  226. * containing only random (seeky) I/O are prevented from being tagged
  227. * as soft real-time.
  228. */
  229. #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
  230. /* Min number of samples required to perform peak-rate update */
  231. #define BFQ_RATE_MIN_SAMPLES 32
  232. /* Min observation time interval required to perform a peak-rate update (ns) */
  233. #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
  234. /* Target observation time interval for a peak-rate update (ns) */
  235. #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
  236. /*
  237. * Shift used for peak-rate fixed precision calculations.
  238. * With
  239. * - the current shift: 16 positions
  240. * - the current type used to store rate: u32
  241. * - the current unit of measure for rate: [sectors/usec], or, more precisely,
  242. * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
  243. * the range of rates that can be stored is
  244. * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
  245. * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
  246. * [15, 65G] sectors/sec
  247. * Which, assuming a sector size of 512B, corresponds to a range of
  248. * [7.5K, 33T] B/sec
  249. */
  250. #define BFQ_RATE_SHIFT 16
  251. /*
  252. * When configured for computing the duration of the weight-raising
  253. * for interactive queues automatically (see the comments at the
  254. * beginning of this file), BFQ does it using the following formula:
  255. * duration = (ref_rate / r) * ref_wr_duration,
  256. * where r is the peak rate of the device, and ref_rate and
  257. * ref_wr_duration are two reference parameters. In particular,
  258. * ref_rate is the peak rate of the reference storage device (see
  259. * below), and ref_wr_duration is about the maximum time needed, with
  260. * BFQ and while reading two files in parallel, to load typical large
  261. * applications on the reference device (see the comments on
  262. * max_service_from_wr below, for more details on how ref_wr_duration
  263. * is obtained). In practice, the slower/faster the device at hand
  264. * is, the more/less it takes to load applications with respect to the
  265. * reference device. Accordingly, the longer/shorter BFQ grants
  266. * weight raising to interactive applications.
  267. *
  268. * BFQ uses two different reference pairs (ref_rate, ref_wr_duration),
  269. * depending on whether the device is rotational or non-rotational.
  270. *
  271. * In the following definitions, ref_rate[0] and ref_wr_duration[0]
  272. * are the reference values for a rotational device, whereas
  273. * ref_rate[1] and ref_wr_duration[1] are the reference values for a
  274. * non-rotational device. The reference rates are not the actual peak
  275. * rates of the devices used as a reference, but slightly lower
  276. * values. The reason for using slightly lower values is that the
  277. * peak-rate estimator tends to yield slightly lower values than the
  278. * actual peak rate (it can yield the actual peak rate only if there
  279. * is only one process doing I/O, and the process does sequential
  280. * I/O).
  281. *
  282. * The reference peak rates are measured in sectors/usec, left-shifted
  283. * by BFQ_RATE_SHIFT.
  284. */
  285. static int ref_rate[2] = {14000, 33000};
  286. /*
  287. * To improve readability, a conversion function is used to initialize
  288. * the following array, which entails that the array can be
  289. * initialized only in a function.
  290. */
  291. static int ref_wr_duration[2];
  292. /*
  293. * BFQ uses the above-detailed, time-based weight-raising mechanism to
  294. * privilege interactive tasks. This mechanism is vulnerable to the
  295. * following false positives: I/O-bound applications that will go on
  296. * doing I/O for much longer than the duration of weight
  297. * raising. These applications have basically no benefit from being
  298. * weight-raised at the beginning of their I/O. On the opposite end,
  299. * while being weight-raised, these applications
  300. * a) unjustly steal throughput to applications that may actually need
  301. * low latency;
  302. * b) make BFQ uselessly perform device idling; device idling results
  303. * in loss of device throughput with most flash-based storage, and may
  304. * increase latencies when used purposelessly.
  305. *
  306. * BFQ tries to reduce these problems, by adopting the following
  307. * countermeasure. To introduce this countermeasure, we need first to
  308. * finish explaining how the duration of weight-raising for
  309. * interactive tasks is computed.
  310. *
  311. * For a bfq_queue deemed as interactive, the duration of weight
  312. * raising is dynamically adjusted, as a function of the estimated
  313. * peak rate of the device, so as to be equal to the time needed to
  314. * execute the 'largest' interactive task we benchmarked so far. By
  315. * largest task, we mean the task for which each involved process has
  316. * to do more I/O than for any of the other tasks we benchmarked. This
  317. * reference interactive task is the start-up of LibreOffice Writer,
  318. * and in this task each process/bfq_queue needs to have at most ~110K
  319. * sectors transferred.
  320. *
  321. * This last piece of information enables BFQ to reduce the actual
  322. * duration of weight-raising for at least one class of I/O-bound
  323. * applications: those doing sequential or quasi-sequential I/O. An
  324. * example is file copy. In fact, once started, the main I/O-bound
  325. * processes of these applications usually consume the above 110K
  326. * sectors in much less time than the processes of an application that
  327. * is starting, because these I/O-bound processes will greedily devote
  328. * almost all their CPU cycles only to their target,
  329. * throughput-friendly I/O operations. This is even more true if BFQ
  330. * happens to be underestimating the device peak rate, and thus
  331. * overestimating the duration of weight raising. But, according to
  332. * our measurements, once transferred 110K sectors, these processes
  333. * have no right to be weight-raised any longer.
  334. *
  335. * Basing on the last consideration, BFQ ends weight-raising for a
  336. * bfq_queue if the latter happens to have received an amount of
  337. * service at least equal to the following constant. The constant is
  338. * set to slightly more than 110K, to have a minimum safety margin.
  339. *
  340. * This early ending of weight-raising reduces the amount of time
  341. * during which interactive false positives cause the two problems
  342. * described at the beginning of these comments.
  343. */
  344. static const unsigned long max_service_from_wr = 120000;
  345. #define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
  346. #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
  347. struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
  348. {
  349. return bic->bfqq[is_sync];
  350. }
  351. void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
  352. {
  353. bic->bfqq[is_sync] = bfqq;
  354. }
  355. struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
  356. {
  357. return bic->icq.q->elevator->elevator_data;
  358. }
  359. /**
  360. * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
  361. * @icq: the iocontext queue.
  362. */
  363. static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
  364. {
  365. /* bic->icq is the first member, %NULL will convert to %NULL */
  366. return container_of(icq, struct bfq_io_cq, icq);
  367. }
  368. /**
  369. * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
  370. * @bfqd: the lookup key.
  371. * @ioc: the io_context of the process doing I/O.
  372. * @q: the request queue.
  373. */
  374. static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
  375. struct io_context *ioc,
  376. struct request_queue *q)
  377. {
  378. if (ioc) {
  379. unsigned long flags;
  380. struct bfq_io_cq *icq;
  381. spin_lock_irqsave(&q->queue_lock, flags);
  382. icq = icq_to_bic(ioc_lookup_icq(ioc, q));
  383. spin_unlock_irqrestore(&q->queue_lock, flags);
  384. return icq;
  385. }
  386. return NULL;
  387. }
  388. /*
  389. * Scheduler run of queue, if there are requests pending and no one in the
  390. * driver that will restart queueing.
  391. */
  392. void bfq_schedule_dispatch(struct bfq_data *bfqd)
  393. {
  394. if (bfqd->queued != 0) {
  395. bfq_log(bfqd, "schedule dispatch");
  396. blk_mq_run_hw_queues(bfqd->queue, true);
  397. }
  398. }
  399. #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  400. #define bfq_sample_valid(samples) ((samples) > 80)
  401. /*
  402. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  403. * We choose the request that is closer to the head right now. Distance
  404. * behind the head is penalized and only allowed to a certain extent.
  405. */
  406. static struct request *bfq_choose_req(struct bfq_data *bfqd,
  407. struct request *rq1,
  408. struct request *rq2,
  409. sector_t last)
  410. {
  411. sector_t s1, s2, d1 = 0, d2 = 0;
  412. unsigned long back_max;
  413. #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  414. #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  415. unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
  416. if (!rq1 || rq1 == rq2)
  417. return rq2;
  418. if (!rq2)
  419. return rq1;
  420. if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  421. return rq1;
  422. else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  423. return rq2;
  424. if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
  425. return rq1;
  426. else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
  427. return rq2;
  428. s1 = blk_rq_pos(rq1);
  429. s2 = blk_rq_pos(rq2);
  430. /*
  431. * By definition, 1KiB is 2 sectors.
  432. */
  433. back_max = bfqd->bfq_back_max * 2;
  434. /*
  435. * Strict one way elevator _except_ in the case where we allow
  436. * short backward seeks which are biased as twice the cost of a
  437. * similar forward seek.
  438. */
  439. if (s1 >= last)
  440. d1 = s1 - last;
  441. else if (s1 + back_max >= last)
  442. d1 = (last - s1) * bfqd->bfq_back_penalty;
  443. else
  444. wrap |= BFQ_RQ1_WRAP;
  445. if (s2 >= last)
  446. d2 = s2 - last;
  447. else if (s2 + back_max >= last)
  448. d2 = (last - s2) * bfqd->bfq_back_penalty;
  449. else
  450. wrap |= BFQ_RQ2_WRAP;
  451. /* Found required data */
  452. /*
  453. * By doing switch() on the bit mask "wrap" we avoid having to
  454. * check two variables for all permutations: --> faster!
  455. */
  456. switch (wrap) {
  457. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  458. if (d1 < d2)
  459. return rq1;
  460. else if (d2 < d1)
  461. return rq2;
  462. if (s1 >= s2)
  463. return rq1;
  464. else
  465. return rq2;
  466. case BFQ_RQ2_WRAP:
  467. return rq1;
  468. case BFQ_RQ1_WRAP:
  469. return rq2;
  470. case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
  471. default:
  472. /*
  473. * Since both rqs are wrapped,
  474. * start with the one that's further behind head
  475. * (--> only *one* back seek required),
  476. * since back seek takes more time than forward.
  477. */
  478. if (s1 <= s2)
  479. return rq1;
  480. else
  481. return rq2;
  482. }
  483. }
  484. /*
  485. * Async I/O can easily starve sync I/O (both sync reads and sync
  486. * writes), by consuming all tags. Similarly, storms of sync writes,
  487. * such as those that sync(2) may trigger, can starve sync reads.
  488. * Limit depths of async I/O and sync writes so as to counter both
  489. * problems.
  490. */
  491. static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
  492. {
  493. struct bfq_data *bfqd = data->q->elevator->elevator_data;
  494. if (op_is_sync(op) && !op_is_write(op))
  495. return;
  496. data->shallow_depth =
  497. bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
  498. bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
  499. __func__, bfqd->wr_busy_queues, op_is_sync(op),
  500. data->shallow_depth);
  501. }
  502. static struct bfq_queue *
  503. bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
  504. sector_t sector, struct rb_node **ret_parent,
  505. struct rb_node ***rb_link)
  506. {
  507. struct rb_node **p, *parent;
  508. struct bfq_queue *bfqq = NULL;
  509. parent = NULL;
  510. p = &root->rb_node;
  511. while (*p) {
  512. struct rb_node **n;
  513. parent = *p;
  514. bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  515. /*
  516. * Sort strictly based on sector. Smallest to the left,
  517. * largest to the right.
  518. */
  519. if (sector > blk_rq_pos(bfqq->next_rq))
  520. n = &(*p)->rb_right;
  521. else if (sector < blk_rq_pos(bfqq->next_rq))
  522. n = &(*p)->rb_left;
  523. else
  524. break;
  525. p = n;
  526. bfqq = NULL;
  527. }
  528. *ret_parent = parent;
  529. if (rb_link)
  530. *rb_link = p;
  531. bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
  532. (unsigned long long)sector,
  533. bfqq ? bfqq->pid : 0);
  534. return bfqq;
  535. }
  536. static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
  537. {
  538. return bfqq->service_from_backlogged > 0 &&
  539. time_is_before_jiffies(bfqq->first_IO_time +
  540. bfq_merge_time_limit);
  541. }
  542. /*
  543. * The following function is not marked as __cold because it is
  544. * actually cold, but for the same performance goal described in the
  545. * comments on the likely() at the beginning of
  546. * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
  547. * execution time for the case where this function is not invoked, we
  548. * had to add an unlikely() in each involved if().
  549. */
  550. void __cold
  551. bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  552. {
  553. struct rb_node **p, *parent;
  554. struct bfq_queue *__bfqq;
  555. if (bfqq->pos_root) {
  556. rb_erase(&bfqq->pos_node, bfqq->pos_root);
  557. bfqq->pos_root = NULL;
  558. }
  559. /* oom_bfqq does not participate in queue merging */
  560. if (bfqq == &bfqd->oom_bfqq)
  561. return;
  562. /*
  563. * bfqq cannot be merged any longer (see comments in
  564. * bfq_setup_cooperator): no point in adding bfqq into the
  565. * position tree.
  566. */
  567. if (bfq_too_late_for_merging(bfqq))
  568. return;
  569. if (bfq_class_idle(bfqq))
  570. return;
  571. if (!bfqq->next_rq)
  572. return;
  573. bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
  574. __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
  575. blk_rq_pos(bfqq->next_rq), &parent, &p);
  576. if (!__bfqq) {
  577. rb_link_node(&bfqq->pos_node, parent, p);
  578. rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
  579. } else
  580. bfqq->pos_root = NULL;
  581. }
  582. /*
  583. * The following function returns false either if every active queue
  584. * must receive the same share of the throughput (symmetric scenario),
  585. * or, as a special case, if bfqq must receive a share of the
  586. * throughput lower than or equal to the share that every other active
  587. * queue must receive. If bfqq does sync I/O, then these are the only
  588. * two cases where bfqq happens to be guaranteed its share of the
  589. * throughput even if I/O dispatching is not plugged when bfqq remains
  590. * temporarily empty (for more details, see the comments in the
  591. * function bfq_better_to_idle()). For this reason, the return value
  592. * of this function is used to check whether I/O-dispatch plugging can
  593. * be avoided.
  594. *
  595. * The above first case (symmetric scenario) occurs when:
  596. * 1) all active queues have the same weight,
  597. * 2) all active queues belong to the same I/O-priority class,
  598. * 3) all active groups at the same level in the groups tree have the same
  599. * weight,
  600. * 4) all active groups at the same level in the groups tree have the same
  601. * number of children.
  602. *
  603. * Unfortunately, keeping the necessary state for evaluating exactly
  604. * the last two symmetry sub-conditions above would be quite complex
  605. * and time consuming. Therefore this function evaluates, instead,
  606. * only the following stronger three sub-conditions, for which it is
  607. * much easier to maintain the needed state:
  608. * 1) all active queues have the same weight,
  609. * 2) all active queues belong to the same I/O-priority class,
  610. * 3) there are no active groups.
  611. * In particular, the last condition is always true if hierarchical
  612. * support or the cgroups interface are not enabled, thus no state
  613. * needs to be maintained in this case.
  614. */
  615. static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
  616. struct bfq_queue *bfqq)
  617. {
  618. bool smallest_weight = bfqq &&
  619. bfqq->weight_counter &&
  620. bfqq->weight_counter ==
  621. container_of(
  622. rb_first_cached(&bfqd->queue_weights_tree),
  623. struct bfq_weight_counter,
  624. weights_node);
  625. /*
  626. * For queue weights to differ, queue_weights_tree must contain
  627. * at least two nodes.
  628. */
  629. bool varied_queue_weights = !smallest_weight &&
  630. !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
  631. (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
  632. bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
  633. bool multiple_classes_busy =
  634. (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
  635. (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
  636. (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
  637. return varied_queue_weights || multiple_classes_busy
  638. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  639. || bfqd->num_groups_with_pending_reqs > 0
  640. #endif
  641. ;
  642. }
  643. /*
  644. * If the weight-counter tree passed as input contains no counter for
  645. * the weight of the input queue, then add that counter; otherwise just
  646. * increment the existing counter.
  647. *
  648. * Note that weight-counter trees contain few nodes in mostly symmetric
  649. * scenarios. For example, if all queues have the same weight, then the
  650. * weight-counter tree for the queues may contain at most one node.
  651. * This holds even if low_latency is on, because weight-raised queues
  652. * are not inserted in the tree.
  653. * In most scenarios, the rate at which nodes are created/destroyed
  654. * should be low too.
  655. */
  656. void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  657. struct rb_root_cached *root)
  658. {
  659. struct bfq_entity *entity = &bfqq->entity;
  660. struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
  661. bool leftmost = true;
  662. /*
  663. * Do not insert if the queue is already associated with a
  664. * counter, which happens if:
  665. * 1) a request arrival has caused the queue to become both
  666. * non-weight-raised, and hence change its weight, and
  667. * backlogged; in this respect, each of the two events
  668. * causes an invocation of this function,
  669. * 2) this is the invocation of this function caused by the
  670. * second event. This second invocation is actually useless,
  671. * and we handle this fact by exiting immediately. More
  672. * efficient or clearer solutions might possibly be adopted.
  673. */
  674. if (bfqq->weight_counter)
  675. return;
  676. while (*new) {
  677. struct bfq_weight_counter *__counter = container_of(*new,
  678. struct bfq_weight_counter,
  679. weights_node);
  680. parent = *new;
  681. if (entity->weight == __counter->weight) {
  682. bfqq->weight_counter = __counter;
  683. goto inc_counter;
  684. }
  685. if (entity->weight < __counter->weight)
  686. new = &((*new)->rb_left);
  687. else {
  688. new = &((*new)->rb_right);
  689. leftmost = false;
  690. }
  691. }
  692. bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
  693. GFP_ATOMIC);
  694. /*
  695. * In the unlucky event of an allocation failure, we just
  696. * exit. This will cause the weight of queue to not be
  697. * considered in bfq_asymmetric_scenario, which, in its turn,
  698. * causes the scenario to be deemed wrongly symmetric in case
  699. * bfqq's weight would have been the only weight making the
  700. * scenario asymmetric. On the bright side, no unbalance will
  701. * however occur when bfqq becomes inactive again (the
  702. * invocation of this function is triggered by an activation
  703. * of queue). In fact, bfq_weights_tree_remove does nothing
  704. * if !bfqq->weight_counter.
  705. */
  706. if (unlikely(!bfqq->weight_counter))
  707. return;
  708. bfqq->weight_counter->weight = entity->weight;
  709. rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
  710. rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
  711. leftmost);
  712. inc_counter:
  713. bfqq->weight_counter->num_active++;
  714. bfqq->ref++;
  715. }
  716. /*
  717. * Decrement the weight counter associated with the queue, and, if the
  718. * counter reaches 0, remove the counter from the tree.
  719. * See the comments to the function bfq_weights_tree_add() for considerations
  720. * about overhead.
  721. */
  722. void __bfq_weights_tree_remove(struct bfq_data *bfqd,
  723. struct bfq_queue *bfqq,
  724. struct rb_root_cached *root)
  725. {
  726. if (!bfqq->weight_counter)
  727. return;
  728. bfqq->weight_counter->num_active--;
  729. if (bfqq->weight_counter->num_active > 0)
  730. goto reset_entity_pointer;
  731. rb_erase_cached(&bfqq->weight_counter->weights_node, root);
  732. kfree(bfqq->weight_counter);
  733. reset_entity_pointer:
  734. bfqq->weight_counter = NULL;
  735. bfq_put_queue(bfqq);
  736. }
  737. /*
  738. * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
  739. * of active groups for each queue's inactive parent entity.
  740. */
  741. void bfq_weights_tree_remove(struct bfq_data *bfqd,
  742. struct bfq_queue *bfqq)
  743. {
  744. struct bfq_entity *entity = bfqq->entity.parent;
  745. for_each_entity(entity) {
  746. struct bfq_sched_data *sd = entity->my_sched_data;
  747. if (sd->next_in_service || sd->in_service_entity) {
  748. /*
  749. * entity is still active, because either
  750. * next_in_service or in_service_entity is not
  751. * NULL (see the comments on the definition of
  752. * next_in_service for details on why
  753. * in_service_entity must be checked too).
  754. *
  755. * As a consequence, its parent entities are
  756. * active as well, and thus this loop must
  757. * stop here.
  758. */
  759. break;
  760. }
  761. /*
  762. * The decrement of num_groups_with_pending_reqs is
  763. * not performed immediately upon the deactivation of
  764. * entity, but it is delayed to when it also happens
  765. * that the first leaf descendant bfqq of entity gets
  766. * all its pending requests completed. The following
  767. * instructions perform this delayed decrement, if
  768. * needed. See the comments on
  769. * num_groups_with_pending_reqs for details.
  770. */
  771. if (entity->in_groups_with_pending_reqs) {
  772. entity->in_groups_with_pending_reqs = false;
  773. bfqd->num_groups_with_pending_reqs--;
  774. }
  775. }
  776. /*
  777. * Next function is invoked last, because it causes bfqq to be
  778. * freed if the following holds: bfqq is not in service and
  779. * has no dispatched request. DO NOT use bfqq after the next
  780. * function invocation.
  781. */
  782. __bfq_weights_tree_remove(bfqd, bfqq,
  783. &bfqd->queue_weights_tree);
  784. }
  785. /*
  786. * Return expired entry, or NULL to just start from scratch in rbtree.
  787. */
  788. static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
  789. struct request *last)
  790. {
  791. struct request *rq;
  792. if (bfq_bfqq_fifo_expire(bfqq))
  793. return NULL;
  794. bfq_mark_bfqq_fifo_expire(bfqq);
  795. rq = rq_entry_fifo(bfqq->fifo.next);
  796. if (rq == last || ktime_get_ns() < rq->fifo_time)
  797. return NULL;
  798. bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
  799. return rq;
  800. }
  801. static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
  802. struct bfq_queue *bfqq,
  803. struct request *last)
  804. {
  805. struct rb_node *rbnext = rb_next(&last->rb_node);
  806. struct rb_node *rbprev = rb_prev(&last->rb_node);
  807. struct request *next, *prev = NULL;
  808. /* Follow expired path, else get first next available. */
  809. next = bfq_check_fifo(bfqq, last);
  810. if (next)
  811. return next;
  812. if (rbprev)
  813. prev = rb_entry_rq(rbprev);
  814. if (rbnext)
  815. next = rb_entry_rq(rbnext);
  816. else {
  817. rbnext = rb_first(&bfqq->sort_list);
  818. if (rbnext && rbnext != &last->rb_node)
  819. next = rb_entry_rq(rbnext);
  820. }
  821. return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
  822. }
  823. /* see the definition of bfq_async_charge_factor for details */
  824. static unsigned long bfq_serv_to_charge(struct request *rq,
  825. struct bfq_queue *bfqq)
  826. {
  827. if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
  828. bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
  829. return blk_rq_sectors(rq);
  830. return blk_rq_sectors(rq) * bfq_async_charge_factor;
  831. }
  832. /**
  833. * bfq_updated_next_req - update the queue after a new next_rq selection.
  834. * @bfqd: the device data the queue belongs to.
  835. * @bfqq: the queue to update.
  836. *
  837. * If the first request of a queue changes we make sure that the queue
  838. * has enough budget to serve at least its first request (if the
  839. * request has grown). We do this because if the queue has not enough
  840. * budget for its first request, it has to go through two dispatch
  841. * rounds to actually get it dispatched.
  842. */
  843. static void bfq_updated_next_req(struct bfq_data *bfqd,
  844. struct bfq_queue *bfqq)
  845. {
  846. struct bfq_entity *entity = &bfqq->entity;
  847. struct request *next_rq = bfqq->next_rq;
  848. unsigned long new_budget;
  849. if (!next_rq)
  850. return;
  851. if (bfqq == bfqd->in_service_queue)
  852. /*
  853. * In order not to break guarantees, budgets cannot be
  854. * changed after an entity has been selected.
  855. */
  856. return;
  857. new_budget = max_t(unsigned long,
  858. max_t(unsigned long, bfqq->max_budget,
  859. bfq_serv_to_charge(next_rq, bfqq)),
  860. entity->service);
  861. if (entity->budget != new_budget) {
  862. entity->budget = new_budget;
  863. bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
  864. new_budget);
  865. bfq_requeue_bfqq(bfqd, bfqq, false);
  866. }
  867. }
  868. static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
  869. {
  870. u64 dur;
  871. if (bfqd->bfq_wr_max_time > 0)
  872. return bfqd->bfq_wr_max_time;
  873. dur = bfqd->rate_dur_prod;
  874. do_div(dur, bfqd->peak_rate);
  875. /*
  876. * Limit duration between 3 and 25 seconds. The upper limit
  877. * has been conservatively set after the following worst case:
  878. * on a QEMU/KVM virtual machine
  879. * - running in a slow PC
  880. * - with a virtual disk stacked on a slow low-end 5400rpm HDD
  881. * - serving a heavy I/O workload, such as the sequential reading
  882. * of several files
  883. * mplayer took 23 seconds to start, if constantly weight-raised.
  884. *
  885. * As for higher values than that accommodating the above bad
  886. * scenario, tests show that higher values would often yield
  887. * the opposite of the desired result, i.e., would worsen
  888. * responsiveness by allowing non-interactive applications to
  889. * preserve weight raising for too long.
  890. *
  891. * On the other end, lower values than 3 seconds make it
  892. * difficult for most interactive tasks to complete their jobs
  893. * before weight-raising finishes.
  894. */
  895. return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000));
  896. }
  897. /* switch back from soft real-time to interactive weight raising */
  898. static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
  899. struct bfq_data *bfqd)
  900. {
  901. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  902. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  903. bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
  904. }
  905. static void
  906. bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
  907. struct bfq_io_cq *bic, bool bfq_already_existing)
  908. {
  909. unsigned int old_wr_coeff = bfqq->wr_coeff;
  910. bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
  911. if (bic->saved_has_short_ttime)
  912. bfq_mark_bfqq_has_short_ttime(bfqq);
  913. else
  914. bfq_clear_bfqq_has_short_ttime(bfqq);
  915. if (bic->saved_IO_bound)
  916. bfq_mark_bfqq_IO_bound(bfqq);
  917. else
  918. bfq_clear_bfqq_IO_bound(bfqq);
  919. bfqq->entity.new_weight = bic->saved_weight;
  920. bfqq->ttime = bic->saved_ttime;
  921. bfqq->wr_coeff = bic->saved_wr_coeff;
  922. bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
  923. bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
  924. bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
  925. if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
  926. time_is_before_jiffies(bfqq->last_wr_start_finish +
  927. bfqq->wr_cur_max_time))) {
  928. if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  929. !bfq_bfqq_in_large_burst(bfqq) &&
  930. time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
  931. bfq_wr_duration(bfqd))) {
  932. switch_back_to_interactive_wr(bfqq, bfqd);
  933. } else {
  934. bfqq->wr_coeff = 1;
  935. bfq_log_bfqq(bfqq->bfqd, bfqq,
  936. "resume state: switching off wr");
  937. }
  938. }
  939. /* make sure weight will be updated, however we got here */
  940. bfqq->entity.prio_changed = 1;
  941. if (likely(!busy))
  942. return;
  943. if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
  944. bfqd->wr_busy_queues++;
  945. else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
  946. bfqd->wr_busy_queues--;
  947. }
  948. static int bfqq_process_refs(struct bfq_queue *bfqq)
  949. {
  950. return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
  951. (bfqq->weight_counter != NULL);
  952. }
  953. /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
  954. static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  955. {
  956. struct bfq_queue *item;
  957. struct hlist_node *n;
  958. hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
  959. hlist_del_init(&item->burst_list_node);
  960. /*
  961. * Start the creation of a new burst list only if there is no
  962. * active queue. See comments on the conditional invocation of
  963. * bfq_handle_burst().
  964. */
  965. if (bfq_tot_busy_queues(bfqd) == 0) {
  966. hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
  967. bfqd->burst_size = 1;
  968. } else
  969. bfqd->burst_size = 0;
  970. bfqd->burst_parent_entity = bfqq->entity.parent;
  971. }
  972. /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
  973. static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  974. {
  975. /* Increment burst size to take into account also bfqq */
  976. bfqd->burst_size++;
  977. if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
  978. struct bfq_queue *pos, *bfqq_item;
  979. struct hlist_node *n;
  980. /*
  981. * Enough queues have been activated shortly after each
  982. * other to consider this burst as large.
  983. */
  984. bfqd->large_burst = true;
  985. /*
  986. * We can now mark all queues in the burst list as
  987. * belonging to a large burst.
  988. */
  989. hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
  990. burst_list_node)
  991. bfq_mark_bfqq_in_large_burst(bfqq_item);
  992. bfq_mark_bfqq_in_large_burst(bfqq);
  993. /*
  994. * From now on, and until the current burst finishes, any
  995. * new queue being activated shortly after the last queue
  996. * was inserted in the burst can be immediately marked as
  997. * belonging to a large burst. So the burst list is not
  998. * needed any more. Remove it.
  999. */
  1000. hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
  1001. burst_list_node)
  1002. hlist_del_init(&pos->burst_list_node);
  1003. } else /*
  1004. * Burst not yet large: add bfqq to the burst list. Do
  1005. * not increment the ref counter for bfqq, because bfqq
  1006. * is removed from the burst list before freeing bfqq
  1007. * in put_queue.
  1008. */
  1009. hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
  1010. }
  1011. /*
  1012. * If many queues belonging to the same group happen to be created
  1013. * shortly after each other, then the processes associated with these
  1014. * queues have typically a common goal. In particular, bursts of queue
  1015. * creations are usually caused by services or applications that spawn
  1016. * many parallel threads/processes. Examples are systemd during boot,
  1017. * or git grep. To help these processes get their job done as soon as
  1018. * possible, it is usually better to not grant either weight-raising
  1019. * or device idling to their queues, unless these queues must be
  1020. * protected from the I/O flowing through other active queues.
  1021. *
  1022. * In this comment we describe, firstly, the reasons why this fact
  1023. * holds, and, secondly, the next function, which implements the main
  1024. * steps needed to properly mark these queues so that they can then be
  1025. * treated in a different way.
  1026. *
  1027. * The above services or applications benefit mostly from a high
  1028. * throughput: the quicker the requests of the activated queues are
  1029. * cumulatively served, the sooner the target job of these queues gets
  1030. * completed. As a consequence, weight-raising any of these queues,
  1031. * which also implies idling the device for it, is almost always
  1032. * counterproductive, unless there are other active queues to isolate
  1033. * these new queues from. If there no other active queues, then
  1034. * weight-raising these new queues just lowers throughput in most
  1035. * cases.
  1036. *
  1037. * On the other hand, a burst of queue creations may be caused also by
  1038. * the start of an application that does not consist of a lot of
  1039. * parallel I/O-bound threads. In fact, with a complex application,
  1040. * several short processes may need to be executed to start-up the
  1041. * application. In this respect, to start an application as quickly as
  1042. * possible, the best thing to do is in any case to privilege the I/O
  1043. * related to the application with respect to all other
  1044. * I/O. Therefore, the best strategy to start as quickly as possible
  1045. * an application that causes a burst of queue creations is to
  1046. * weight-raise all the queues created during the burst. This is the
  1047. * exact opposite of the best strategy for the other type of bursts.
  1048. *
  1049. * In the end, to take the best action for each of the two cases, the
  1050. * two types of bursts need to be distinguished. Fortunately, this
  1051. * seems relatively easy, by looking at the sizes of the bursts. In
  1052. * particular, we found a threshold such that only bursts with a
  1053. * larger size than that threshold are apparently caused by
  1054. * services or commands such as systemd or git grep. For brevity,
  1055. * hereafter we call just 'large' these bursts. BFQ *does not*
  1056. * weight-raise queues whose creation occurs in a large burst. In
  1057. * addition, for each of these queues BFQ performs or does not perform
  1058. * idling depending on which choice boosts the throughput more. The
  1059. * exact choice depends on the device and request pattern at
  1060. * hand.
  1061. *
  1062. * Unfortunately, false positives may occur while an interactive task
  1063. * is starting (e.g., an application is being started). The
  1064. * consequence is that the queues associated with the task do not
  1065. * enjoy weight raising as expected. Fortunately these false positives
  1066. * are very rare. They typically occur if some service happens to
  1067. * start doing I/O exactly when the interactive task starts.
  1068. *
  1069. * Turning back to the next function, it is invoked only if there are
  1070. * no active queues (apart from active queues that would belong to the
  1071. * same, possible burst bfqq would belong to), and it implements all
  1072. * the steps needed to detect the occurrence of a large burst and to
  1073. * properly mark all the queues belonging to it (so that they can then
  1074. * be treated in a different way). This goal is achieved by
  1075. * maintaining a "burst list" that holds, temporarily, the queues that
  1076. * belong to the burst in progress. The list is then used to mark
  1077. * these queues as belonging to a large burst if the burst does become
  1078. * large. The main steps are the following.
  1079. *
  1080. * . when the very first queue is created, the queue is inserted into the
  1081. * list (as it could be the first queue in a possible burst)
  1082. *
  1083. * . if the current burst has not yet become large, and a queue Q that does
  1084. * not yet belong to the burst is activated shortly after the last time
  1085. * at which a new queue entered the burst list, then the function appends
  1086. * Q to the burst list
  1087. *
  1088. * . if, as a consequence of the previous step, the burst size reaches
  1089. * the large-burst threshold, then
  1090. *
  1091. * . all the queues in the burst list are marked as belonging to a
  1092. * large burst
  1093. *
  1094. * . the burst list is deleted; in fact, the burst list already served
  1095. * its purpose (keeping temporarily track of the queues in a burst,
  1096. * so as to be able to mark them as belonging to a large burst in the
  1097. * previous sub-step), and now is not needed any more
  1098. *
  1099. * . the device enters a large-burst mode
  1100. *
  1101. * . if a queue Q that does not belong to the burst is created while
  1102. * the device is in large-burst mode and shortly after the last time
  1103. * at which a queue either entered the burst list or was marked as
  1104. * belonging to the current large burst, then Q is immediately marked
  1105. * as belonging to a large burst.
  1106. *
  1107. * . if a queue Q that does not belong to the burst is created a while
  1108. * later, i.e., not shortly after, than the last time at which a queue
  1109. * either entered the burst list or was marked as belonging to the
  1110. * current large burst, then the current burst is deemed as finished and:
  1111. *
  1112. * . the large-burst mode is reset if set
  1113. *
  1114. * . the burst list is emptied
  1115. *
  1116. * . Q is inserted in the burst list, as Q may be the first queue
  1117. * in a possible new burst (then the burst list contains just Q
  1118. * after this step).
  1119. */
  1120. static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1121. {
  1122. /*
  1123. * If bfqq is already in the burst list or is part of a large
  1124. * burst, or finally has just been split, then there is
  1125. * nothing else to do.
  1126. */
  1127. if (!hlist_unhashed(&bfqq->burst_list_node) ||
  1128. bfq_bfqq_in_large_burst(bfqq) ||
  1129. time_is_after_eq_jiffies(bfqq->split_time +
  1130. msecs_to_jiffies(10)))
  1131. return;
  1132. /*
  1133. * If bfqq's creation happens late enough, or bfqq belongs to
  1134. * a different group than the burst group, then the current
  1135. * burst is finished, and related data structures must be
  1136. * reset.
  1137. *
  1138. * In this respect, consider the special case where bfqq is
  1139. * the very first queue created after BFQ is selected for this
  1140. * device. In this case, last_ins_in_burst and
  1141. * burst_parent_entity are not yet significant when we get
  1142. * here. But it is easy to verify that, whether or not the
  1143. * following condition is true, bfqq will end up being
  1144. * inserted into the burst list. In particular the list will
  1145. * happen to contain only bfqq. And this is exactly what has
  1146. * to happen, as bfqq may be the first queue of the first
  1147. * burst.
  1148. */
  1149. if (time_is_before_jiffies(bfqd->last_ins_in_burst +
  1150. bfqd->bfq_burst_interval) ||
  1151. bfqq->entity.parent != bfqd->burst_parent_entity) {
  1152. bfqd->large_burst = false;
  1153. bfq_reset_burst_list(bfqd, bfqq);
  1154. goto end;
  1155. }
  1156. /*
  1157. * If we get here, then bfqq is being activated shortly after the
  1158. * last queue. So, if the current burst is also large, we can mark
  1159. * bfqq as belonging to this large burst immediately.
  1160. */
  1161. if (bfqd->large_burst) {
  1162. bfq_mark_bfqq_in_large_burst(bfqq);
  1163. goto end;
  1164. }
  1165. /*
  1166. * If we get here, then a large-burst state has not yet been
  1167. * reached, but bfqq is being activated shortly after the last
  1168. * queue. Then we add bfqq to the burst.
  1169. */
  1170. bfq_add_to_burst(bfqd, bfqq);
  1171. end:
  1172. /*
  1173. * At this point, bfqq either has been added to the current
  1174. * burst or has caused the current burst to terminate and a
  1175. * possible new burst to start. In particular, in the second
  1176. * case, bfqq has become the first queue in the possible new
  1177. * burst. In both cases last_ins_in_burst needs to be moved
  1178. * forward.
  1179. */
  1180. bfqd->last_ins_in_burst = jiffies;
  1181. }
  1182. static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
  1183. {
  1184. struct bfq_entity *entity = &bfqq->entity;
  1185. return entity->budget - entity->service;
  1186. }
  1187. /*
  1188. * If enough samples have been computed, return the current max budget
  1189. * stored in bfqd, which is dynamically updated according to the
  1190. * estimated disk peak rate; otherwise return the default max budget
  1191. */
  1192. static int bfq_max_budget(struct bfq_data *bfqd)
  1193. {
  1194. if (bfqd->budgets_assigned < bfq_stats_min_budgets)
  1195. return bfq_default_max_budget;
  1196. else
  1197. return bfqd->bfq_max_budget;
  1198. }
  1199. /*
  1200. * Return min budget, which is a fraction of the current or default
  1201. * max budget (trying with 1/32)
  1202. */
  1203. static int bfq_min_budget(struct bfq_data *bfqd)
  1204. {
  1205. if (bfqd->budgets_assigned < bfq_stats_min_budgets)
  1206. return bfq_default_max_budget / 32;
  1207. else
  1208. return bfqd->bfq_max_budget / 32;
  1209. }
  1210. /*
  1211. * The next function, invoked after the input queue bfqq switches from
  1212. * idle to busy, updates the budget of bfqq. The function also tells
  1213. * whether the in-service queue should be expired, by returning
  1214. * true. The purpose of expiring the in-service queue is to give bfqq
  1215. * the chance to possibly preempt the in-service queue, and the reason
  1216. * for preempting the in-service queue is to achieve one of the two
  1217. * goals below.
  1218. *
  1219. * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
  1220. * expired because it has remained idle. In particular, bfqq may have
  1221. * expired for one of the following two reasons:
  1222. *
  1223. * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
  1224. * and did not make it to issue a new request before its last
  1225. * request was served;
  1226. *
  1227. * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
  1228. * a new request before the expiration of the idling-time.
  1229. *
  1230. * Even if bfqq has expired for one of the above reasons, the process
  1231. * associated with the queue may be however issuing requests greedily,
  1232. * and thus be sensitive to the bandwidth it receives (bfqq may have
  1233. * remained idle for other reasons: CPU high load, bfqq not enjoying
  1234. * idling, I/O throttling somewhere in the path from the process to
  1235. * the I/O scheduler, ...). But if, after every expiration for one of
  1236. * the above two reasons, bfqq has to wait for the service of at least
  1237. * one full budget of another queue before being served again, then
  1238. * bfqq is likely to get a much lower bandwidth or resource time than
  1239. * its reserved ones. To address this issue, two countermeasures need
  1240. * to be taken.
  1241. *
  1242. * First, the budget and the timestamps of bfqq need to be updated in
  1243. * a special way on bfqq reactivation: they need to be updated as if
  1244. * bfqq did not remain idle and did not expire. In fact, if they are
  1245. * computed as if bfqq expired and remained idle until reactivation,
  1246. * then the process associated with bfqq is treated as if, instead of
  1247. * being greedy, it stopped issuing requests when bfqq remained idle,
  1248. * and restarts issuing requests only on this reactivation. In other
  1249. * words, the scheduler does not help the process recover the "service
  1250. * hole" between bfqq expiration and reactivation. As a consequence,
  1251. * the process receives a lower bandwidth than its reserved one. In
  1252. * contrast, to recover this hole, the budget must be updated as if
  1253. * bfqq was not expired at all before this reactivation, i.e., it must
  1254. * be set to the value of the remaining budget when bfqq was
  1255. * expired. Along the same line, timestamps need to be assigned the
  1256. * value they had the last time bfqq was selected for service, i.e.,
  1257. * before last expiration. Thus timestamps need to be back-shifted
  1258. * with respect to their normal computation (see [1] for more details
  1259. * on this tricky aspect).
  1260. *
  1261. * Secondly, to allow the process to recover the hole, the in-service
  1262. * queue must be expired too, to give bfqq the chance to preempt it
  1263. * immediately. In fact, if bfqq has to wait for a full budget of the
  1264. * in-service queue to be completed, then it may become impossible to
  1265. * let the process recover the hole, even if the back-shifted
  1266. * timestamps of bfqq are lower than those of the in-service queue. If
  1267. * this happens for most or all of the holes, then the process may not
  1268. * receive its reserved bandwidth. In this respect, it is worth noting
  1269. * that, being the service of outstanding requests unpreemptible, a
  1270. * little fraction of the holes may however be unrecoverable, thereby
  1271. * causing a little loss of bandwidth.
  1272. *
  1273. * The last important point is detecting whether bfqq does need this
  1274. * bandwidth recovery. In this respect, the next function deems the
  1275. * process associated with bfqq greedy, and thus allows it to recover
  1276. * the hole, if: 1) the process is waiting for the arrival of a new
  1277. * request (which implies that bfqq expired for one of the above two
  1278. * reasons), and 2) such a request has arrived soon. The first
  1279. * condition is controlled through the flag non_blocking_wait_rq,
  1280. * while the second through the flag arrived_in_time. If both
  1281. * conditions hold, then the function computes the budget in the
  1282. * above-described special way, and signals that the in-service queue
  1283. * should be expired. Timestamp back-shifting is done later in
  1284. * __bfq_activate_entity.
  1285. *
  1286. * 2. Reduce latency. Even if timestamps are not backshifted to let
  1287. * the process associated with bfqq recover a service hole, bfqq may
  1288. * however happen to have, after being (re)activated, a lower finish
  1289. * timestamp than the in-service queue. That is, the next budget of
  1290. * bfqq may have to be completed before the one of the in-service
  1291. * queue. If this is the case, then preempting the in-service queue
  1292. * allows this goal to be achieved, apart from the unpreemptible,
  1293. * outstanding requests mentioned above.
  1294. *
  1295. * Unfortunately, regardless of which of the above two goals one wants
  1296. * to achieve, service trees need first to be updated to know whether
  1297. * the in-service queue must be preempted. To have service trees
  1298. * correctly updated, the in-service queue must be expired and
  1299. * rescheduled, and bfqq must be scheduled too. This is one of the
  1300. * most costly operations (in future versions, the scheduling
  1301. * mechanism may be re-designed in such a way to make it possible to
  1302. * know whether preemption is needed without needing to update service
  1303. * trees). In addition, queue preemptions almost always cause random
  1304. * I/O, which may in turn cause loss of throughput. Finally, there may
  1305. * even be no in-service queue when the next function is invoked (so,
  1306. * no queue to compare timestamps with). Because of these facts, the
  1307. * next function adopts the following simple scheme to avoid costly
  1308. * operations, too frequent preemptions and too many dependencies on
  1309. * the state of the scheduler: it requests the expiration of the
  1310. * in-service queue (unconditionally) only for queues that need to
  1311. * recover a hole. Then it delegates to other parts of the code the
  1312. * responsibility of handling the above case 2.
  1313. */
  1314. static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
  1315. struct bfq_queue *bfqq,
  1316. bool arrived_in_time)
  1317. {
  1318. struct bfq_entity *entity = &bfqq->entity;
  1319. /*
  1320. * In the next compound condition, we check also whether there
  1321. * is some budget left, because otherwise there is no point in
  1322. * trying to go on serving bfqq with this same budget: bfqq
  1323. * would be expired immediately after being selected for
  1324. * service. This would only cause useless overhead.
  1325. */
  1326. if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
  1327. bfq_bfqq_budget_left(bfqq) > 0) {
  1328. /*
  1329. * We do not clear the flag non_blocking_wait_rq here, as
  1330. * the latter is used in bfq_activate_bfqq to signal
  1331. * that timestamps need to be back-shifted (and is
  1332. * cleared right after).
  1333. */
  1334. /*
  1335. * In next assignment we rely on that either
  1336. * entity->service or entity->budget are not updated
  1337. * on expiration if bfqq is empty (see
  1338. * __bfq_bfqq_recalc_budget). Thus both quantities
  1339. * remain unchanged after such an expiration, and the
  1340. * following statement therefore assigns to
  1341. * entity->budget the remaining budget on such an
  1342. * expiration.
  1343. */
  1344. entity->budget = min_t(unsigned long,
  1345. bfq_bfqq_budget_left(bfqq),
  1346. bfqq->max_budget);
  1347. /*
  1348. * At this point, we have used entity->service to get
  1349. * the budget left (needed for updating
  1350. * entity->budget). Thus we finally can, and have to,
  1351. * reset entity->service. The latter must be reset
  1352. * because bfqq would otherwise be charged again for
  1353. * the service it has received during its previous
  1354. * service slot(s).
  1355. */
  1356. entity->service = 0;
  1357. return true;
  1358. }
  1359. /*
  1360. * We can finally complete expiration, by setting service to 0.
  1361. */
  1362. entity->service = 0;
  1363. entity->budget = max_t(unsigned long, bfqq->max_budget,
  1364. bfq_serv_to_charge(bfqq->next_rq, bfqq));
  1365. bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
  1366. return false;
  1367. }
  1368. /*
  1369. * Return the farthest past time instant according to jiffies
  1370. * macros.
  1371. */
  1372. static unsigned long bfq_smallest_from_now(void)
  1373. {
  1374. return jiffies - MAX_JIFFY_OFFSET;
  1375. }
  1376. static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
  1377. struct bfq_queue *bfqq,
  1378. unsigned int old_wr_coeff,
  1379. bool wr_or_deserves_wr,
  1380. bool interactive,
  1381. bool in_burst,
  1382. bool soft_rt)
  1383. {
  1384. if (old_wr_coeff == 1 && wr_or_deserves_wr) {
  1385. /* start a weight-raising period */
  1386. if (interactive) {
  1387. bfqq->service_from_wr = 0;
  1388. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1389. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1390. } else {
  1391. /*
  1392. * No interactive weight raising in progress
  1393. * here: assign minus infinity to
  1394. * wr_start_at_switch_to_srt, to make sure
  1395. * that, at the end of the soft-real-time
  1396. * weight raising periods that is starting
  1397. * now, no interactive weight-raising period
  1398. * may be wrongly considered as still in
  1399. * progress (and thus actually started by
  1400. * mistake).
  1401. */
  1402. bfqq->wr_start_at_switch_to_srt =
  1403. bfq_smallest_from_now();
  1404. bfqq->wr_coeff = bfqd->bfq_wr_coeff *
  1405. BFQ_SOFTRT_WEIGHT_FACTOR;
  1406. bfqq->wr_cur_max_time =
  1407. bfqd->bfq_wr_rt_max_time;
  1408. }
  1409. /*
  1410. * If needed, further reduce budget to make sure it is
  1411. * close to bfqq's backlog, so as to reduce the
  1412. * scheduling-error component due to a too large
  1413. * budget. Do not care about throughput consequences,
  1414. * but only about latency. Finally, do not assign a
  1415. * too small budget either, to avoid increasing
  1416. * latency by causing too frequent expirations.
  1417. */
  1418. bfqq->entity.budget = min_t(unsigned long,
  1419. bfqq->entity.budget,
  1420. 2 * bfq_min_budget(bfqd));
  1421. } else if (old_wr_coeff > 1) {
  1422. if (interactive) { /* update wr coeff and duration */
  1423. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1424. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1425. } else if (in_burst)
  1426. bfqq->wr_coeff = 1;
  1427. else if (soft_rt) {
  1428. /*
  1429. * The application is now or still meeting the
  1430. * requirements for being deemed soft rt. We
  1431. * can then correctly and safely (re)charge
  1432. * the weight-raising duration for the
  1433. * application with the weight-raising
  1434. * duration for soft rt applications.
  1435. *
  1436. * In particular, doing this recharge now, i.e.,
  1437. * before the weight-raising period for the
  1438. * application finishes, reduces the probability
  1439. * of the following negative scenario:
  1440. * 1) the weight of a soft rt application is
  1441. * raised at startup (as for any newly
  1442. * created application),
  1443. * 2) since the application is not interactive,
  1444. * at a certain time weight-raising is
  1445. * stopped for the application,
  1446. * 3) at that time the application happens to
  1447. * still have pending requests, and hence
  1448. * is destined to not have a chance to be
  1449. * deemed soft rt before these requests are
  1450. * completed (see the comments to the
  1451. * function bfq_bfqq_softrt_next_start()
  1452. * for details on soft rt detection),
  1453. * 4) these pending requests experience a high
  1454. * latency because the application is not
  1455. * weight-raised while they are pending.
  1456. */
  1457. if (bfqq->wr_cur_max_time !=
  1458. bfqd->bfq_wr_rt_max_time) {
  1459. bfqq->wr_start_at_switch_to_srt =
  1460. bfqq->last_wr_start_finish;
  1461. bfqq->wr_cur_max_time =
  1462. bfqd->bfq_wr_rt_max_time;
  1463. bfqq->wr_coeff = bfqd->bfq_wr_coeff *
  1464. BFQ_SOFTRT_WEIGHT_FACTOR;
  1465. }
  1466. bfqq->last_wr_start_finish = jiffies;
  1467. }
  1468. }
  1469. }
  1470. static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
  1471. struct bfq_queue *bfqq)
  1472. {
  1473. return bfqq->dispatched == 0 &&
  1474. time_is_before_jiffies(
  1475. bfqq->budget_timeout +
  1476. bfqd->bfq_wr_min_idle_time);
  1477. }
  1478. /*
  1479. * Return true if bfqq is in a higher priority class, or has a higher
  1480. * weight than the in-service queue.
  1481. */
  1482. static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
  1483. struct bfq_queue *in_serv_bfqq)
  1484. {
  1485. int bfqq_weight, in_serv_weight;
  1486. if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
  1487. return true;
  1488. if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
  1489. bfqq_weight = bfqq->entity.weight;
  1490. in_serv_weight = in_serv_bfqq->entity.weight;
  1491. } else {
  1492. if (bfqq->entity.parent)
  1493. bfqq_weight = bfqq->entity.parent->weight;
  1494. else
  1495. bfqq_weight = bfqq->entity.weight;
  1496. if (in_serv_bfqq->entity.parent)
  1497. in_serv_weight = in_serv_bfqq->entity.parent->weight;
  1498. else
  1499. in_serv_weight = in_serv_bfqq->entity.weight;
  1500. }
  1501. return bfqq_weight > in_serv_weight;
  1502. }
  1503. static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
  1504. struct bfq_queue *bfqq,
  1505. int old_wr_coeff,
  1506. struct request *rq,
  1507. bool *interactive)
  1508. {
  1509. bool soft_rt, in_burst, wr_or_deserves_wr,
  1510. bfqq_wants_to_preempt,
  1511. idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
  1512. /*
  1513. * See the comments on
  1514. * bfq_bfqq_update_budg_for_activation for
  1515. * details on the usage of the next variable.
  1516. */
  1517. arrived_in_time = ktime_get_ns() <=
  1518. bfqq->ttime.last_end_request +
  1519. bfqd->bfq_slice_idle * 3;
  1520. /*
  1521. * bfqq deserves to be weight-raised if:
  1522. * - it is sync,
  1523. * - it does not belong to a large burst,
  1524. * - it has been idle for enough time or is soft real-time,
  1525. * - is linked to a bfq_io_cq (it is not shared in any sense).
  1526. */
  1527. in_burst = bfq_bfqq_in_large_burst(bfqq);
  1528. soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
  1529. !BFQQ_TOTALLY_SEEKY(bfqq) &&
  1530. !in_burst &&
  1531. time_is_before_jiffies(bfqq->soft_rt_next_start) &&
  1532. bfqq->dispatched == 0;
  1533. *interactive = !in_burst && idle_for_long_time;
  1534. wr_or_deserves_wr = bfqd->low_latency &&
  1535. (bfqq->wr_coeff > 1 ||
  1536. (bfq_bfqq_sync(bfqq) &&
  1537. bfqq->bic && (*interactive || soft_rt)));
  1538. /*
  1539. * Using the last flag, update budget and check whether bfqq
  1540. * may want to preempt the in-service queue.
  1541. */
  1542. bfqq_wants_to_preempt =
  1543. bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
  1544. arrived_in_time);
  1545. /*
  1546. * If bfqq happened to be activated in a burst, but has been
  1547. * idle for much more than an interactive queue, then we
  1548. * assume that, in the overall I/O initiated in the burst, the
  1549. * I/O associated with bfqq is finished. So bfqq does not need
  1550. * to be treated as a queue belonging to a burst
  1551. * anymore. Accordingly, we reset bfqq's in_large_burst flag
  1552. * if set, and remove bfqq from the burst list if it's
  1553. * there. We do not decrement burst_size, because the fact
  1554. * that bfqq does not need to belong to the burst list any
  1555. * more does not invalidate the fact that bfqq was created in
  1556. * a burst.
  1557. */
  1558. if (likely(!bfq_bfqq_just_created(bfqq)) &&
  1559. idle_for_long_time &&
  1560. time_is_before_jiffies(
  1561. bfqq->budget_timeout +
  1562. msecs_to_jiffies(10000))) {
  1563. hlist_del_init(&bfqq->burst_list_node);
  1564. bfq_clear_bfqq_in_large_burst(bfqq);
  1565. }
  1566. bfq_clear_bfqq_just_created(bfqq);
  1567. if (!bfq_bfqq_IO_bound(bfqq)) {
  1568. if (arrived_in_time) {
  1569. bfqq->requests_within_timer++;
  1570. if (bfqq->requests_within_timer >=
  1571. bfqd->bfq_requests_within_timer)
  1572. bfq_mark_bfqq_IO_bound(bfqq);
  1573. } else
  1574. bfqq->requests_within_timer = 0;
  1575. }
  1576. if (bfqd->low_latency) {
  1577. if (unlikely(time_is_after_jiffies(bfqq->split_time)))
  1578. /* wraparound */
  1579. bfqq->split_time =
  1580. jiffies - bfqd->bfq_wr_min_idle_time - 1;
  1581. if (time_is_before_jiffies(bfqq->split_time +
  1582. bfqd->bfq_wr_min_idle_time)) {
  1583. bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
  1584. old_wr_coeff,
  1585. wr_or_deserves_wr,
  1586. *interactive,
  1587. in_burst,
  1588. soft_rt);
  1589. if (old_wr_coeff != bfqq->wr_coeff)
  1590. bfqq->entity.prio_changed = 1;
  1591. }
  1592. }
  1593. bfqq->last_idle_bklogged = jiffies;
  1594. bfqq->service_from_backlogged = 0;
  1595. bfq_clear_bfqq_softrt_update(bfqq);
  1596. bfq_add_bfqq_busy(bfqd, bfqq);
  1597. /*
  1598. * Expire in-service queue only if preemption may be needed
  1599. * for guarantees. In particular, we care only about two
  1600. * cases. The first is that bfqq has to recover a service
  1601. * hole, as explained in the comments on
  1602. * bfq_bfqq_update_budg_for_activation(), i.e., that
  1603. * bfqq_wants_to_preempt is true. However, if bfqq does not
  1604. * carry time-critical I/O, then bfqq's bandwidth is less
  1605. * important than that of queues that carry time-critical I/O.
  1606. * So, as a further constraint, we consider this case only if
  1607. * bfqq is at least as weight-raised, i.e., at least as time
  1608. * critical, as the in-service queue.
  1609. *
  1610. * The second case is that bfqq is in a higher priority class,
  1611. * or has a higher weight than the in-service queue. If this
  1612. * condition does not hold, we don't care because, even if
  1613. * bfqq does not start to be served immediately, the resulting
  1614. * delay for bfqq's I/O is however lower or much lower than
  1615. * the ideal completion time to be guaranteed to bfqq's I/O.
  1616. *
  1617. * In both cases, preemption is needed only if, according to
  1618. * the timestamps of both bfqq and of the in-service queue,
  1619. * bfqq actually is the next queue to serve. So, to reduce
  1620. * useless preemptions, the return value of
  1621. * next_queue_may_preempt() is considered in the next compound
  1622. * condition too. Yet next_queue_may_preempt() just checks a
  1623. * simple, necessary condition for bfqq to be the next queue
  1624. * to serve. In fact, to evaluate a sufficient condition, the
  1625. * timestamps of the in-service queue would need to be
  1626. * updated, and this operation is quite costly (see the
  1627. * comments on bfq_bfqq_update_budg_for_activation()).
  1628. */
  1629. if (bfqd->in_service_queue &&
  1630. ((bfqq_wants_to_preempt &&
  1631. bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
  1632. bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue)) &&
  1633. next_queue_may_preempt(bfqd))
  1634. bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
  1635. false, BFQQE_PREEMPTED);
  1636. }
  1637. static void bfq_reset_inject_limit(struct bfq_data *bfqd,
  1638. struct bfq_queue *bfqq)
  1639. {
  1640. /* invalidate baseline total service time */
  1641. bfqq->last_serv_time_ns = 0;
  1642. /*
  1643. * Reset pointer in case we are waiting for
  1644. * some request completion.
  1645. */
  1646. bfqd->waited_rq = NULL;
  1647. /*
  1648. * If bfqq has a short think time, then start by setting the
  1649. * inject limit to 0 prudentially, because the service time of
  1650. * an injected I/O request may be higher than the think time
  1651. * of bfqq, and therefore, if one request was injected when
  1652. * bfqq remains empty, this injected request might delay the
  1653. * service of the next I/O request for bfqq significantly. In
  1654. * case bfqq can actually tolerate some injection, then the
  1655. * adaptive update will however raise the limit soon. This
  1656. * lucky circumstance holds exactly because bfqq has a short
  1657. * think time, and thus, after remaining empty, is likely to
  1658. * get new I/O enqueued---and then completed---before being
  1659. * expired. This is the very pattern that gives the
  1660. * limit-update algorithm the chance to measure the effect of
  1661. * injection on request service times, and then to update the
  1662. * limit accordingly.
  1663. *
  1664. * However, in the following special case, the inject limit is
  1665. * left to 1 even if the think time is short: bfqq's I/O is
  1666. * synchronized with that of some other queue, i.e., bfqq may
  1667. * receive new I/O only after the I/O of the other queue is
  1668. * completed. Keeping the inject limit to 1 allows the
  1669. * blocking I/O to be served while bfqq is in service. And
  1670. * this is very convenient both for bfqq and for overall
  1671. * throughput, as explained in detail in the comments in
  1672. * bfq_update_has_short_ttime().
  1673. *
  1674. * On the opposite end, if bfqq has a long think time, then
  1675. * start directly by 1, because:
  1676. * a) on the bright side, keeping at most one request in
  1677. * service in the drive is unlikely to cause any harm to the
  1678. * latency of bfqq's requests, as the service time of a single
  1679. * request is likely to be lower than the think time of bfqq;
  1680. * b) on the downside, after becoming empty, bfqq is likely to
  1681. * expire before getting its next request. With this request
  1682. * arrival pattern, it is very hard to sample total service
  1683. * times and update the inject limit accordingly (see comments
  1684. * on bfq_update_inject_limit()). So the limit is likely to be
  1685. * never, or at least seldom, updated. As a consequence, by
  1686. * setting the limit to 1, we avoid that no injection ever
  1687. * occurs with bfqq. On the downside, this proactive step
  1688. * further reduces chances to actually compute the baseline
  1689. * total service time. Thus it reduces chances to execute the
  1690. * limit-update algorithm and possibly raise the limit to more
  1691. * than 1.
  1692. */
  1693. if (bfq_bfqq_has_short_ttime(bfqq))
  1694. bfqq->inject_limit = 0;
  1695. else
  1696. bfqq->inject_limit = 1;
  1697. bfqq->decrease_time_jif = jiffies;
  1698. }
  1699. static void bfq_add_request(struct request *rq)
  1700. {
  1701. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  1702. struct bfq_data *bfqd = bfqq->bfqd;
  1703. struct request *next_rq, *prev;
  1704. unsigned int old_wr_coeff = bfqq->wr_coeff;
  1705. bool interactive = false;
  1706. bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
  1707. bfqq->queued[rq_is_sync(rq)]++;
  1708. bfqd->queued++;
  1709. if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
  1710. /*
  1711. * Detect whether bfqq's I/O seems synchronized with
  1712. * that of some other queue, i.e., whether bfqq, after
  1713. * remaining empty, happens to receive new I/O only
  1714. * right after some I/O request of the other queue has
  1715. * been completed. We call waker queue the other
  1716. * queue, and we assume, for simplicity, that bfqq may
  1717. * have at most one waker queue.
  1718. *
  1719. * A remarkable throughput boost can be reached by
  1720. * unconditionally injecting the I/O of the waker
  1721. * queue, every time a new bfq_dispatch_request
  1722. * happens to be invoked while I/O is being plugged
  1723. * for bfqq. In addition to boosting throughput, this
  1724. * unblocks bfqq's I/O, thereby improving bandwidth
  1725. * and latency for bfqq. Note that these same results
  1726. * may be achieved with the general injection
  1727. * mechanism, but less effectively. For details on
  1728. * this aspect, see the comments on the choice of the
  1729. * queue for injection in bfq_select_queue().
  1730. *
  1731. * Turning back to the detection of a waker queue, a
  1732. * queue Q is deemed as a waker queue for bfqq if, for
  1733. * two consecutive times, bfqq happens to become non
  1734. * empty right after a request of Q has been
  1735. * completed. In particular, on the first time, Q is
  1736. * tentatively set as a candidate waker queue, while
  1737. * on the second time, the flag
  1738. * bfq_bfqq_has_waker(bfqq) is set to confirm that Q
  1739. * is a waker queue for bfqq. These detection steps
  1740. * are performed only if bfqq has a long think time,
  1741. * so as to make it more likely that bfqq's I/O is
  1742. * actually being blocked by a synchronization. This
  1743. * last filter, plus the above two-times requirement,
  1744. * make false positives less likely.
  1745. *
  1746. * NOTE
  1747. *
  1748. * The sooner a waker queue is detected, the sooner
  1749. * throughput can be boosted by injecting I/O from the
  1750. * waker queue. Fortunately, detection is likely to be
  1751. * actually fast, for the following reasons. While
  1752. * blocked by synchronization, bfqq has a long think
  1753. * time. This implies that bfqq's inject limit is at
  1754. * least equal to 1 (see the comments in
  1755. * bfq_update_inject_limit()). So, thanks to
  1756. * injection, the waker queue is likely to be served
  1757. * during the very first I/O-plugging time interval
  1758. * for bfqq. This triggers the first step of the
  1759. * detection mechanism. Thanks again to injection, the
  1760. * candidate waker queue is then likely to be
  1761. * confirmed no later than during the next
  1762. * I/O-plugging interval for bfqq.
  1763. */
  1764. if (bfqd->last_completed_rq_bfqq &&
  1765. !bfq_bfqq_has_short_ttime(bfqq) &&
  1766. ktime_get_ns() - bfqd->last_completion <
  1767. 200 * NSEC_PER_USEC) {
  1768. if (bfqd->last_completed_rq_bfqq != bfqq &&
  1769. bfqd->last_completed_rq_bfqq !=
  1770. bfqq->waker_bfqq) {
  1771. /*
  1772. * First synchronization detected with
  1773. * a candidate waker queue, or with a
  1774. * different candidate waker queue
  1775. * from the current one.
  1776. */
  1777. bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
  1778. /*
  1779. * If the waker queue disappears, then
  1780. * bfqq->waker_bfqq must be reset. To
  1781. * this goal, we maintain in each
  1782. * waker queue a list, woken_list, of
  1783. * all the queues that reference the
  1784. * waker queue through their
  1785. * waker_bfqq pointer. When the waker
  1786. * queue exits, the waker_bfqq pointer
  1787. * of all the queues in the woken_list
  1788. * is reset.
  1789. *
  1790. * In addition, if bfqq is already in
  1791. * the woken_list of a waker queue,
  1792. * then, before being inserted into
  1793. * the woken_list of a new waker
  1794. * queue, bfqq must be removed from
  1795. * the woken_list of the old waker
  1796. * queue.
  1797. */
  1798. if (!hlist_unhashed(&bfqq->woken_list_node))
  1799. hlist_del_init(&bfqq->woken_list_node);
  1800. hlist_add_head(&bfqq->woken_list_node,
  1801. &bfqd->last_completed_rq_bfqq->woken_list);
  1802. bfq_clear_bfqq_has_waker(bfqq);
  1803. } else if (bfqd->last_completed_rq_bfqq ==
  1804. bfqq->waker_bfqq &&
  1805. !bfq_bfqq_has_waker(bfqq)) {
  1806. /*
  1807. * synchronization with waker_bfqq
  1808. * seen for the second time
  1809. */
  1810. bfq_mark_bfqq_has_waker(bfqq);
  1811. }
  1812. }
  1813. /*
  1814. * Periodically reset inject limit, to make sure that
  1815. * the latter eventually drops in case workload
  1816. * changes, see step (3) in the comments on
  1817. * bfq_update_inject_limit().
  1818. */
  1819. if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  1820. msecs_to_jiffies(1000)))
  1821. bfq_reset_inject_limit(bfqd, bfqq);
  1822. /*
  1823. * The following conditions must hold to setup a new
  1824. * sampling of total service time, and then a new
  1825. * update of the inject limit:
  1826. * - bfqq is in service, because the total service
  1827. * time is evaluated only for the I/O requests of
  1828. * the queues in service;
  1829. * - this is the right occasion to compute or to
  1830. * lower the baseline total service time, because
  1831. * there are actually no requests in the drive,
  1832. * or
  1833. * the baseline total service time is available, and
  1834. * this is the right occasion to compute the other
  1835. * quantity needed to update the inject limit, i.e.,
  1836. * the total service time caused by the amount of
  1837. * injection allowed by the current value of the
  1838. * limit. It is the right occasion because injection
  1839. * has actually been performed during the service
  1840. * hole, and there are still in-flight requests,
  1841. * which are very likely to be exactly the injected
  1842. * requests, or part of them;
  1843. * - the minimum interval for sampling the total
  1844. * service time and updating the inject limit has
  1845. * elapsed.
  1846. */
  1847. if (bfqq == bfqd->in_service_queue &&
  1848. (bfqd->rq_in_driver == 0 ||
  1849. (bfqq->last_serv_time_ns > 0 &&
  1850. bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
  1851. time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  1852. msecs_to_jiffies(10))) {
  1853. bfqd->last_empty_occupied_ns = ktime_get_ns();
  1854. /*
  1855. * Start the state machine for measuring the
  1856. * total service time of rq: setting
  1857. * wait_dispatch will cause bfqd->waited_rq to
  1858. * be set when rq will be dispatched.
  1859. */
  1860. bfqd->wait_dispatch = true;
  1861. /*
  1862. * If there is no I/O in service in the drive,
  1863. * then possible injection occurred before the
  1864. * arrival of rq will not affect the total
  1865. * service time of rq. So the injection limit
  1866. * must not be updated as a function of such
  1867. * total service time, unless new injection
  1868. * occurs before rq is completed. To have the
  1869. * injection limit updated only in the latter
  1870. * case, reset rqs_injected here (rqs_injected
  1871. * will be set in case injection is performed
  1872. * on bfqq before rq is completed).
  1873. */
  1874. if (bfqd->rq_in_driver == 0)
  1875. bfqd->rqs_injected = false;
  1876. }
  1877. }
  1878. elv_rb_add(&bfqq->sort_list, rq);
  1879. /*
  1880. * Check if this request is a better next-serve candidate.
  1881. */
  1882. prev = bfqq->next_rq;
  1883. next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
  1884. bfqq->next_rq = next_rq;
  1885. /*
  1886. * Adjust priority tree position, if next_rq changes.
  1887. * See comments on bfq_pos_tree_add_move() for the unlikely().
  1888. */
  1889. if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
  1890. bfq_pos_tree_add_move(bfqd, bfqq);
  1891. if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
  1892. bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
  1893. rq, &interactive);
  1894. else {
  1895. if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
  1896. time_is_before_jiffies(
  1897. bfqq->last_wr_start_finish +
  1898. bfqd->bfq_wr_min_inter_arr_async)) {
  1899. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1900. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1901. bfqd->wr_busy_queues++;
  1902. bfqq->entity.prio_changed = 1;
  1903. }
  1904. if (prev != bfqq->next_rq)
  1905. bfq_updated_next_req(bfqd, bfqq);
  1906. }
  1907. /*
  1908. * Assign jiffies to last_wr_start_finish in the following
  1909. * cases:
  1910. *
  1911. * . if bfqq is not going to be weight-raised, because, for
  1912. * non weight-raised queues, last_wr_start_finish stores the
  1913. * arrival time of the last request; as of now, this piece
  1914. * of information is used only for deciding whether to
  1915. * weight-raise async queues
  1916. *
  1917. * . if bfqq is not weight-raised, because, if bfqq is now
  1918. * switching to weight-raised, then last_wr_start_finish
  1919. * stores the time when weight-raising starts
  1920. *
  1921. * . if bfqq is interactive, because, regardless of whether
  1922. * bfqq is currently weight-raised, the weight-raising
  1923. * period must start or restart (this case is considered
  1924. * separately because it is not detected by the above
  1925. * conditions, if bfqq is already weight-raised)
  1926. *
  1927. * last_wr_start_finish has to be updated also if bfqq is soft
  1928. * real-time, because the weight-raising period is constantly
  1929. * restarted on idle-to-busy transitions for these queues, but
  1930. * this is already done in bfq_bfqq_handle_idle_busy_switch if
  1931. * needed.
  1932. */
  1933. if (bfqd->low_latency &&
  1934. (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
  1935. bfqq->last_wr_start_finish = jiffies;
  1936. }
  1937. static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
  1938. struct bio *bio,
  1939. struct request_queue *q)
  1940. {
  1941. struct bfq_queue *bfqq = bfqd->bio_bfqq;
  1942. if (bfqq)
  1943. return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
  1944. return NULL;
  1945. }
  1946. static sector_t get_sdist(sector_t last_pos, struct request *rq)
  1947. {
  1948. if (last_pos)
  1949. return abs(blk_rq_pos(rq) - last_pos);
  1950. return 0;
  1951. }
  1952. #if 0 /* Still not clear if we can do without next two functions */
  1953. static void bfq_activate_request(struct request_queue *q, struct request *rq)
  1954. {
  1955. struct bfq_data *bfqd = q->elevator->elevator_data;
  1956. bfqd->rq_in_driver++;
  1957. }
  1958. static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
  1959. {
  1960. struct bfq_data *bfqd = q->elevator->elevator_data;
  1961. bfqd->rq_in_driver--;
  1962. }
  1963. #endif
  1964. static void bfq_remove_request(struct request_queue *q,
  1965. struct request *rq)
  1966. {
  1967. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  1968. struct bfq_data *bfqd = bfqq->bfqd;
  1969. const int sync = rq_is_sync(rq);
  1970. if (bfqq->next_rq == rq) {
  1971. bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
  1972. bfq_updated_next_req(bfqd, bfqq);
  1973. }
  1974. if (rq->queuelist.prev != &rq->queuelist)
  1975. list_del_init(&rq->queuelist);
  1976. bfqq->queued[sync]--;
  1977. bfqd->queued--;
  1978. elv_rb_del(&bfqq->sort_list, rq);
  1979. elv_rqhash_del(q, rq);
  1980. if (q->last_merge == rq)
  1981. q->last_merge = NULL;
  1982. if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
  1983. bfqq->next_rq = NULL;
  1984. if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
  1985. bfq_del_bfqq_busy(bfqd, bfqq, false);
  1986. /*
  1987. * bfqq emptied. In normal operation, when
  1988. * bfqq is empty, bfqq->entity.service and
  1989. * bfqq->entity.budget must contain,
  1990. * respectively, the service received and the
  1991. * budget used last time bfqq emptied. These
  1992. * facts do not hold in this case, as at least
  1993. * this last removal occurred while bfqq is
  1994. * not in service. To avoid inconsistencies,
  1995. * reset both bfqq->entity.service and
  1996. * bfqq->entity.budget, if bfqq has still a
  1997. * process that may issue I/O requests to it.
  1998. */
  1999. bfqq->entity.budget = bfqq->entity.service = 0;
  2000. }
  2001. /*
  2002. * Remove queue from request-position tree as it is empty.
  2003. */
  2004. if (bfqq->pos_root) {
  2005. rb_erase(&bfqq->pos_node, bfqq->pos_root);
  2006. bfqq->pos_root = NULL;
  2007. }
  2008. } else {
  2009. /* see comments on bfq_pos_tree_add_move() for the unlikely() */
  2010. if (unlikely(!bfqd->nonrot_with_queueing))
  2011. bfq_pos_tree_add_move(bfqd, bfqq);
  2012. }
  2013. if (rq->cmd_flags & REQ_META)
  2014. bfqq->meta_pending--;
  2015. }
  2016. static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
  2017. unsigned int nr_segs)
  2018. {
  2019. struct bfq_data *bfqd = q->elevator->elevator_data;
  2020. struct request *free = NULL;
  2021. /*
  2022. * bfq_bic_lookup grabs the queue_lock: invoke it now and
  2023. * store its return value for later use, to avoid nesting
  2024. * queue_lock inside the bfqd->lock. We assume that the bic
  2025. * returned by bfq_bic_lookup does not go away before
  2026. * bfqd->lock is taken.
  2027. */
  2028. struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
  2029. bool ret;
  2030. spin_lock_irq(&bfqd->lock);
  2031. if (bic)
  2032. bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
  2033. else
  2034. bfqd->bio_bfqq = NULL;
  2035. bfqd->bio_bic = bic;
  2036. ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
  2037. if (free)
  2038. blk_mq_free_request(free);
  2039. spin_unlock_irq(&bfqd->lock);
  2040. return ret;
  2041. }
  2042. static int bfq_request_merge(struct request_queue *q, struct request **req,
  2043. struct bio *bio)
  2044. {
  2045. struct bfq_data *bfqd = q->elevator->elevator_data;
  2046. struct request *__rq;
  2047. __rq = bfq_find_rq_fmerge(bfqd, bio, q);
  2048. if (__rq && elv_bio_merge_ok(__rq, bio)) {
  2049. *req = __rq;
  2050. if (blk_discard_mergable(__rq))
  2051. return ELEVATOR_DISCARD_MERGE;
  2052. return ELEVATOR_FRONT_MERGE;
  2053. }
  2054. return ELEVATOR_NO_MERGE;
  2055. }
  2056. static struct bfq_queue *bfq_init_rq(struct request *rq);
  2057. static void bfq_request_merged(struct request_queue *q, struct request *req,
  2058. enum elv_merge type)
  2059. {
  2060. if (type == ELEVATOR_FRONT_MERGE &&
  2061. rb_prev(&req->rb_node) &&
  2062. blk_rq_pos(req) <
  2063. blk_rq_pos(container_of(rb_prev(&req->rb_node),
  2064. struct request, rb_node))) {
  2065. struct bfq_queue *bfqq = bfq_init_rq(req);
  2066. struct bfq_data *bfqd;
  2067. struct request *prev, *next_rq;
  2068. if (!bfqq)
  2069. return;
  2070. bfqd = bfqq->bfqd;
  2071. /* Reposition request in its sort_list */
  2072. elv_rb_del(&bfqq->sort_list, req);
  2073. elv_rb_add(&bfqq->sort_list, req);
  2074. /* Choose next request to be served for bfqq */
  2075. prev = bfqq->next_rq;
  2076. next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
  2077. bfqd->last_position);
  2078. bfqq->next_rq = next_rq;
  2079. /*
  2080. * If next_rq changes, update both the queue's budget to
  2081. * fit the new request and the queue's position in its
  2082. * rq_pos_tree.
  2083. */
  2084. if (prev != bfqq->next_rq) {
  2085. bfq_updated_next_req(bfqd, bfqq);
  2086. /*
  2087. * See comments on bfq_pos_tree_add_move() for
  2088. * the unlikely().
  2089. */
  2090. if (unlikely(!bfqd->nonrot_with_queueing))
  2091. bfq_pos_tree_add_move(bfqd, bfqq);
  2092. }
  2093. }
  2094. }
  2095. /*
  2096. * This function is called to notify the scheduler that the requests
  2097. * rq and 'next' have been merged, with 'next' going away. BFQ
  2098. * exploits this hook to address the following issue: if 'next' has a
  2099. * fifo_time lower that rq, then the fifo_time of rq must be set to
  2100. * the value of 'next', to not forget the greater age of 'next'.
  2101. *
  2102. * NOTE: in this function we assume that rq is in a bfq_queue, basing
  2103. * on that rq is picked from the hash table q->elevator->hash, which,
  2104. * in its turn, is filled only with I/O requests present in
  2105. * bfq_queues, while BFQ is in use for the request queue q. In fact,
  2106. * the function that fills this hash table (elv_rqhash_add) is called
  2107. * only by bfq_insert_request.
  2108. */
  2109. static void bfq_requests_merged(struct request_queue *q, struct request *rq,
  2110. struct request *next)
  2111. {
  2112. struct bfq_queue *bfqq = bfq_init_rq(rq),
  2113. *next_bfqq = bfq_init_rq(next);
  2114. if (!bfqq)
  2115. return;
  2116. /*
  2117. * If next and rq belong to the same bfq_queue and next is older
  2118. * than rq, then reposition rq in the fifo (by substituting next
  2119. * with rq). Otherwise, if next and rq belong to different
  2120. * bfq_queues, never reposition rq: in fact, we would have to
  2121. * reposition it with respect to next's position in its own fifo,
  2122. * which would most certainly be too expensive with respect to
  2123. * the benefits.
  2124. */
  2125. if (bfqq == next_bfqq &&
  2126. !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  2127. next->fifo_time < rq->fifo_time) {
  2128. list_del_init(&rq->queuelist);
  2129. list_replace_init(&next->queuelist, &rq->queuelist);
  2130. rq->fifo_time = next->fifo_time;
  2131. }
  2132. if (bfqq->next_rq == next)
  2133. bfqq->next_rq = rq;
  2134. bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
  2135. }
  2136. /* Must be called with bfqq != NULL */
  2137. static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
  2138. {
  2139. if (bfq_bfqq_busy(bfqq))
  2140. bfqq->bfqd->wr_busy_queues--;
  2141. bfqq->wr_coeff = 1;
  2142. bfqq->wr_cur_max_time = 0;
  2143. bfqq->last_wr_start_finish = jiffies;
  2144. /*
  2145. * Trigger a weight change on the next invocation of
  2146. * __bfq_entity_update_weight_prio.
  2147. */
  2148. bfqq->entity.prio_changed = 1;
  2149. }
  2150. void bfq_end_wr_async_queues(struct bfq_data *bfqd,
  2151. struct bfq_group *bfqg)
  2152. {
  2153. int i, j;
  2154. for (i = 0; i < 2; i++)
  2155. for (j = 0; j < IOPRIO_BE_NR; j++)
  2156. if (bfqg->async_bfqq[i][j])
  2157. bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
  2158. if (bfqg->async_idle_bfqq)
  2159. bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
  2160. }
  2161. static void bfq_end_wr(struct bfq_data *bfqd)
  2162. {
  2163. struct bfq_queue *bfqq;
  2164. spin_lock_irq(&bfqd->lock);
  2165. list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
  2166. bfq_bfqq_end_wr(bfqq);
  2167. list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
  2168. bfq_bfqq_end_wr(bfqq);
  2169. bfq_end_wr_async(bfqd);
  2170. spin_unlock_irq(&bfqd->lock);
  2171. }
  2172. static sector_t bfq_io_struct_pos(void *io_struct, bool request)
  2173. {
  2174. if (request)
  2175. return blk_rq_pos(io_struct);
  2176. else
  2177. return ((struct bio *)io_struct)->bi_iter.bi_sector;
  2178. }
  2179. static int bfq_rq_close_to_sector(void *io_struct, bool request,
  2180. sector_t sector)
  2181. {
  2182. return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
  2183. BFQQ_CLOSE_THR;
  2184. }
  2185. static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
  2186. struct bfq_queue *bfqq,
  2187. sector_t sector)
  2188. {
  2189. struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
  2190. struct rb_node *parent, *node;
  2191. struct bfq_queue *__bfqq;
  2192. if (RB_EMPTY_ROOT(root))
  2193. return NULL;
  2194. /*
  2195. * First, if we find a request starting at the end of the last
  2196. * request, choose it.
  2197. */
  2198. __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
  2199. if (__bfqq)
  2200. return __bfqq;
  2201. /*
  2202. * If the exact sector wasn't found, the parent of the NULL leaf
  2203. * will contain the closest sector (rq_pos_tree sorted by
  2204. * next_request position).
  2205. */
  2206. __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  2207. if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
  2208. return __bfqq;
  2209. if (blk_rq_pos(__bfqq->next_rq) < sector)
  2210. node = rb_next(&__bfqq->pos_node);
  2211. else
  2212. node = rb_prev(&__bfqq->pos_node);
  2213. if (!node)
  2214. return NULL;
  2215. __bfqq = rb_entry(node, struct bfq_queue, pos_node);
  2216. if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
  2217. return __bfqq;
  2218. return NULL;
  2219. }
  2220. static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
  2221. struct bfq_queue *cur_bfqq,
  2222. sector_t sector)
  2223. {
  2224. struct bfq_queue *bfqq;
  2225. /*
  2226. * We shall notice if some of the queues are cooperating,
  2227. * e.g., working closely on the same area of the device. In
  2228. * that case, we can group them together and: 1) don't waste
  2229. * time idling, and 2) serve the union of their requests in
  2230. * the best possible order for throughput.
  2231. */
  2232. bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
  2233. if (!bfqq || bfqq == cur_bfqq)
  2234. return NULL;
  2235. return bfqq;
  2236. }
  2237. static struct bfq_queue *
  2238. bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
  2239. {
  2240. int process_refs, new_process_refs;
  2241. struct bfq_queue *__bfqq;
  2242. /*
  2243. * If there are no process references on the new_bfqq, then it is
  2244. * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
  2245. * may have dropped their last reference (not just their last process
  2246. * reference).
  2247. */
  2248. if (!bfqq_process_refs(new_bfqq))
  2249. return NULL;
  2250. /* Avoid a circular list and skip interim queue merges. */
  2251. while ((__bfqq = new_bfqq->new_bfqq)) {
  2252. if (__bfqq == bfqq)
  2253. return NULL;
  2254. new_bfqq = __bfqq;
  2255. }
  2256. process_refs = bfqq_process_refs(bfqq);
  2257. new_process_refs = bfqq_process_refs(new_bfqq);
  2258. /*
  2259. * If the process for the bfqq has gone away, there is no
  2260. * sense in merging the queues.
  2261. */
  2262. if (process_refs == 0 || new_process_refs == 0)
  2263. return NULL;
  2264. bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
  2265. new_bfqq->pid);
  2266. /*
  2267. * Merging is just a redirection: the requests of the process
  2268. * owning one of the two queues are redirected to the other queue.
  2269. * The latter queue, in its turn, is set as shared if this is the
  2270. * first time that the requests of some process are redirected to
  2271. * it.
  2272. *
  2273. * We redirect bfqq to new_bfqq and not the opposite, because
  2274. * we are in the context of the process owning bfqq, thus we
  2275. * have the io_cq of this process. So we can immediately
  2276. * configure this io_cq to redirect the requests of the
  2277. * process to new_bfqq. In contrast, the io_cq of new_bfqq is
  2278. * not available any more (new_bfqq->bic == NULL).
  2279. *
  2280. * Anyway, even in case new_bfqq coincides with the in-service
  2281. * queue, redirecting requests the in-service queue is the
  2282. * best option, as we feed the in-service queue with new
  2283. * requests close to the last request served and, by doing so,
  2284. * are likely to increase the throughput.
  2285. */
  2286. bfqq->new_bfqq = new_bfqq;
  2287. /*
  2288. * The above assignment schedules the following redirections:
  2289. * each time some I/O for bfqq arrives, the process that
  2290. * generated that I/O is disassociated from bfqq and
  2291. * associated with new_bfqq. Here we increases new_bfqq->ref
  2292. * in advance, adding the number of processes that are
  2293. * expected to be associated with new_bfqq as they happen to
  2294. * issue I/O.
  2295. */
  2296. new_bfqq->ref += process_refs;
  2297. return new_bfqq;
  2298. }
  2299. static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
  2300. struct bfq_queue *new_bfqq)
  2301. {
  2302. if (bfq_too_late_for_merging(new_bfqq))
  2303. return false;
  2304. if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
  2305. (bfqq->ioprio_class != new_bfqq->ioprio_class))
  2306. return false;
  2307. /*
  2308. * If either of the queues has already been detected as seeky,
  2309. * then merging it with the other queue is unlikely to lead to
  2310. * sequential I/O.
  2311. */
  2312. if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
  2313. return false;
  2314. /*
  2315. * Interleaved I/O is known to be done by (some) applications
  2316. * only for reads, so it does not make sense to merge async
  2317. * queues.
  2318. */
  2319. if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
  2320. return false;
  2321. return true;
  2322. }
  2323. /*
  2324. * Attempt to schedule a merge of bfqq with the currently in-service
  2325. * queue or with a close queue among the scheduled queues. Return
  2326. * NULL if no merge was scheduled, a pointer to the shared bfq_queue
  2327. * structure otherwise.
  2328. *
  2329. * The OOM queue is not allowed to participate to cooperation: in fact, since
  2330. * the requests temporarily redirected to the OOM queue could be redirected
  2331. * again to dedicated queues at any time, the state needed to correctly
  2332. * handle merging with the OOM queue would be quite complex and expensive
  2333. * to maintain. Besides, in such a critical condition as an out of memory,
  2334. * the benefits of queue merging may be little relevant, or even negligible.
  2335. *
  2336. * WARNING: queue merging may impair fairness among non-weight raised
  2337. * queues, for at least two reasons: 1) the original weight of a
  2338. * merged queue may change during the merged state, 2) even being the
  2339. * weight the same, a merged queue may be bloated with many more
  2340. * requests than the ones produced by its originally-associated
  2341. * process.
  2342. */
  2343. static struct bfq_queue *
  2344. bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  2345. void *io_struct, bool request)
  2346. {
  2347. struct bfq_queue *in_service_bfqq, *new_bfqq;
  2348. /* if a merge has already been setup, then proceed with that first */
  2349. if (bfqq->new_bfqq)
  2350. return bfqq->new_bfqq;
  2351. /*
  2352. * Do not perform queue merging if the device is non
  2353. * rotational and performs internal queueing. In fact, such a
  2354. * device reaches a high speed through internal parallelism
  2355. * and pipelining. This means that, to reach a high
  2356. * throughput, it must have many requests enqueued at the same
  2357. * time. But, in this configuration, the internal scheduling
  2358. * algorithm of the device does exactly the job of queue
  2359. * merging: it reorders requests so as to obtain as much as
  2360. * possible a sequential I/O pattern. As a consequence, with
  2361. * the workload generated by processes doing interleaved I/O,
  2362. * the throughput reached by the device is likely to be the
  2363. * same, with and without queue merging.
  2364. *
  2365. * Disabling merging also provides a remarkable benefit in
  2366. * terms of throughput. Merging tends to make many workloads
  2367. * artificially more uneven, because of shared queues
  2368. * remaining non empty for incomparably more time than
  2369. * non-merged queues. This may accentuate workload
  2370. * asymmetries. For example, if one of the queues in a set of
  2371. * merged queues has a higher weight than a normal queue, then
  2372. * the shared queue may inherit such a high weight and, by
  2373. * staying almost always active, may force BFQ to perform I/O
  2374. * plugging most of the time. This evidently makes it harder
  2375. * for BFQ to let the device reach a high throughput.
  2376. *
  2377. * Finally, the likely() macro below is not used because one
  2378. * of the two branches is more likely than the other, but to
  2379. * have the code path after the following if() executed as
  2380. * fast as possible for the case of a non rotational device
  2381. * with queueing. We want it because this is the fastest kind
  2382. * of device. On the opposite end, the likely() may lengthen
  2383. * the execution time of BFQ for the case of slower devices
  2384. * (rotational or at least without queueing). But in this case
  2385. * the execution time of BFQ matters very little, if not at
  2386. * all.
  2387. */
  2388. if (likely(bfqd->nonrot_with_queueing))
  2389. return NULL;
  2390. /*
  2391. * Prevent bfqq from being merged if it has been created too
  2392. * long ago. The idea is that true cooperating processes, and
  2393. * thus their associated bfq_queues, are supposed to be
  2394. * created shortly after each other. This is the case, e.g.,
  2395. * for KVM/QEMU and dump I/O threads. Basing on this
  2396. * assumption, the following filtering greatly reduces the
  2397. * probability that two non-cooperating processes, which just
  2398. * happen to do close I/O for some short time interval, have
  2399. * their queues merged by mistake.
  2400. */
  2401. if (bfq_too_late_for_merging(bfqq))
  2402. return NULL;
  2403. if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
  2404. return NULL;
  2405. /* If there is only one backlogged queue, don't search. */
  2406. if (bfq_tot_busy_queues(bfqd) == 1)
  2407. return NULL;
  2408. in_service_bfqq = bfqd->in_service_queue;
  2409. if (in_service_bfqq && in_service_bfqq != bfqq &&
  2410. likely(in_service_bfqq != &bfqd->oom_bfqq) &&
  2411. bfq_rq_close_to_sector(io_struct, request,
  2412. bfqd->in_serv_last_pos) &&
  2413. bfqq->entity.parent == in_service_bfqq->entity.parent &&
  2414. bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
  2415. new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
  2416. if (new_bfqq)
  2417. return new_bfqq;
  2418. }
  2419. /*
  2420. * Check whether there is a cooperator among currently scheduled
  2421. * queues. The only thing we need is that the bio/request is not
  2422. * NULL, as we need it to establish whether a cooperator exists.
  2423. */
  2424. new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
  2425. bfq_io_struct_pos(io_struct, request));
  2426. if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
  2427. bfq_may_be_close_cooperator(bfqq, new_bfqq))
  2428. return bfq_setup_merge(bfqq, new_bfqq);
  2429. return NULL;
  2430. }
  2431. static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
  2432. {
  2433. struct bfq_io_cq *bic = bfqq->bic;
  2434. /*
  2435. * If !bfqq->bic, the queue is already shared or its requests
  2436. * have already been redirected to a shared queue; both idle window
  2437. * and weight raising state have already been saved. Do nothing.
  2438. */
  2439. if (!bic)
  2440. return;
  2441. bic->saved_weight = bfqq->entity.orig_weight;
  2442. bic->saved_ttime = bfqq->ttime;
  2443. bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
  2444. bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
  2445. bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
  2446. bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
  2447. if (unlikely(bfq_bfqq_just_created(bfqq) &&
  2448. !bfq_bfqq_in_large_burst(bfqq) &&
  2449. bfqq->bfqd->low_latency)) {
  2450. /*
  2451. * bfqq being merged right after being created: bfqq
  2452. * would have deserved interactive weight raising, but
  2453. * did not make it to be set in a weight-raised state,
  2454. * because of this early merge. Store directly the
  2455. * weight-raising state that would have been assigned
  2456. * to bfqq, so that to avoid that bfqq unjustly fails
  2457. * to enjoy weight raising if split soon.
  2458. */
  2459. bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
  2460. bic->saved_wr_start_at_switch_to_srt = bfq_smallest_from_now();
  2461. bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
  2462. bic->saved_last_wr_start_finish = jiffies;
  2463. } else {
  2464. bic->saved_wr_coeff = bfqq->wr_coeff;
  2465. bic->saved_wr_start_at_switch_to_srt =
  2466. bfqq->wr_start_at_switch_to_srt;
  2467. bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
  2468. bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
  2469. }
  2470. }
  2471. void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  2472. {
  2473. /*
  2474. * To prevent bfqq's service guarantees from being violated,
  2475. * bfqq may be left busy, i.e., queued for service, even if
  2476. * empty (see comments in __bfq_bfqq_expire() for
  2477. * details). But, if no process will send requests to bfqq any
  2478. * longer, then there is no point in keeping bfqq queued for
  2479. * service. In addition, keeping bfqq queued for service, but
  2480. * with no process ref any longer, may have caused bfqq to be
  2481. * freed when dequeued from service. But this is assumed to
  2482. * never happen.
  2483. */
  2484. if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
  2485. bfqq != bfqd->in_service_queue)
  2486. bfq_del_bfqq_busy(bfqd, bfqq, false);
  2487. bfq_put_queue(bfqq);
  2488. }
  2489. static void
  2490. bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
  2491. struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
  2492. {
  2493. bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
  2494. (unsigned long)new_bfqq->pid);
  2495. /* Save weight raising and idle window of the merged queues */
  2496. bfq_bfqq_save_state(bfqq);
  2497. bfq_bfqq_save_state(new_bfqq);
  2498. if (bfq_bfqq_IO_bound(bfqq))
  2499. bfq_mark_bfqq_IO_bound(new_bfqq);
  2500. bfq_clear_bfqq_IO_bound(bfqq);
  2501. /*
  2502. * If bfqq is weight-raised, then let new_bfqq inherit
  2503. * weight-raising. To reduce false positives, neglect the case
  2504. * where bfqq has just been created, but has not yet made it
  2505. * to be weight-raised (which may happen because EQM may merge
  2506. * bfqq even before bfq_add_request is executed for the first
  2507. * time for bfqq). Handling this case would however be very
  2508. * easy, thanks to the flag just_created.
  2509. */
  2510. if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
  2511. new_bfqq->wr_coeff = bfqq->wr_coeff;
  2512. new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
  2513. new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
  2514. new_bfqq->wr_start_at_switch_to_srt =
  2515. bfqq->wr_start_at_switch_to_srt;
  2516. if (bfq_bfqq_busy(new_bfqq))
  2517. bfqd->wr_busy_queues++;
  2518. new_bfqq->entity.prio_changed = 1;
  2519. }
  2520. if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
  2521. bfqq->wr_coeff = 1;
  2522. bfqq->entity.prio_changed = 1;
  2523. if (bfq_bfqq_busy(bfqq))
  2524. bfqd->wr_busy_queues--;
  2525. }
  2526. bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
  2527. bfqd->wr_busy_queues);
  2528. /*
  2529. * Merge queues (that is, let bic redirect its requests to new_bfqq)
  2530. */
  2531. bic_set_bfqq(bic, new_bfqq, 1);
  2532. bfq_mark_bfqq_coop(new_bfqq);
  2533. /*
  2534. * new_bfqq now belongs to at least two bics (it is a shared queue):
  2535. * set new_bfqq->bic to NULL. bfqq either:
  2536. * - does not belong to any bic any more, and hence bfqq->bic must
  2537. * be set to NULL, or
  2538. * - is a queue whose owning bics have already been redirected to a
  2539. * different queue, hence the queue is destined to not belong to
  2540. * any bic soon and bfqq->bic is already NULL (therefore the next
  2541. * assignment causes no harm).
  2542. */
  2543. new_bfqq->bic = NULL;
  2544. /*
  2545. * If the queue is shared, the pid is the pid of one of the associated
  2546. * processes. Which pid depends on the exact sequence of merge events
  2547. * the queue underwent. So printing such a pid is useless and confusing
  2548. * because it reports a random pid between those of the associated
  2549. * processes.
  2550. * We mark such a queue with a pid -1, and then print SHARED instead of
  2551. * a pid in logging messages.
  2552. */
  2553. new_bfqq->pid = -1;
  2554. bfqq->bic = NULL;
  2555. bfq_release_process_ref(bfqd, bfqq);
  2556. }
  2557. static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
  2558. struct bio *bio)
  2559. {
  2560. struct bfq_data *bfqd = q->elevator->elevator_data;
  2561. bool is_sync = op_is_sync(bio->bi_opf);
  2562. struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
  2563. /*
  2564. * Disallow merge of a sync bio into an async request.
  2565. */
  2566. if (is_sync && !rq_is_sync(rq))
  2567. return false;
  2568. /*
  2569. * Lookup the bfqq that this bio will be queued with. Allow
  2570. * merge only if rq is queued there.
  2571. */
  2572. if (!bfqq)
  2573. return false;
  2574. /*
  2575. * We take advantage of this function to perform an early merge
  2576. * of the queues of possible cooperating processes.
  2577. */
  2578. new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
  2579. if (new_bfqq) {
  2580. /*
  2581. * bic still points to bfqq, then it has not yet been
  2582. * redirected to some other bfq_queue, and a queue
  2583. * merge between bfqq and new_bfqq can be safely
  2584. * fulfilled, i.e., bic can be redirected to new_bfqq
  2585. * and bfqq can be put.
  2586. */
  2587. bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
  2588. new_bfqq);
  2589. /*
  2590. * If we get here, bio will be queued into new_queue,
  2591. * so use new_bfqq to decide whether bio and rq can be
  2592. * merged.
  2593. */
  2594. bfqq = new_bfqq;
  2595. /*
  2596. * Change also bqfd->bio_bfqq, as
  2597. * bfqd->bio_bic now points to new_bfqq, and
  2598. * this function may be invoked again (and then may
  2599. * use again bqfd->bio_bfqq).
  2600. */
  2601. bfqd->bio_bfqq = bfqq;
  2602. }
  2603. return bfqq == RQ_BFQQ(rq);
  2604. }
  2605. /*
  2606. * Set the maximum time for the in-service queue to consume its
  2607. * budget. This prevents seeky processes from lowering the throughput.
  2608. * In practice, a time-slice service scheme is used with seeky
  2609. * processes.
  2610. */
  2611. static void bfq_set_budget_timeout(struct bfq_data *bfqd,
  2612. struct bfq_queue *bfqq)
  2613. {
  2614. unsigned int timeout_coeff;
  2615. if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
  2616. timeout_coeff = 1;
  2617. else
  2618. timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
  2619. bfqd->last_budget_start = ktime_get();
  2620. bfqq->budget_timeout = jiffies +
  2621. bfqd->bfq_timeout * timeout_coeff;
  2622. }
  2623. static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
  2624. struct bfq_queue *bfqq)
  2625. {
  2626. if (bfqq) {
  2627. bfq_clear_bfqq_fifo_expire(bfqq);
  2628. bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
  2629. if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
  2630. bfqq->wr_coeff > 1 &&
  2631. bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  2632. time_is_before_jiffies(bfqq->budget_timeout)) {
  2633. /*
  2634. * For soft real-time queues, move the start
  2635. * of the weight-raising period forward by the
  2636. * time the queue has not received any
  2637. * service. Otherwise, a relatively long
  2638. * service delay is likely to cause the
  2639. * weight-raising period of the queue to end,
  2640. * because of the short duration of the
  2641. * weight-raising period of a soft real-time
  2642. * queue. It is worth noting that this move
  2643. * is not so dangerous for the other queues,
  2644. * because soft real-time queues are not
  2645. * greedy.
  2646. *
  2647. * To not add a further variable, we use the
  2648. * overloaded field budget_timeout to
  2649. * determine for how long the queue has not
  2650. * received service, i.e., how much time has
  2651. * elapsed since the queue expired. However,
  2652. * this is a little imprecise, because
  2653. * budget_timeout is set to jiffies if bfqq
  2654. * not only expires, but also remains with no
  2655. * request.
  2656. */
  2657. if (time_after(bfqq->budget_timeout,
  2658. bfqq->last_wr_start_finish))
  2659. bfqq->last_wr_start_finish +=
  2660. jiffies - bfqq->budget_timeout;
  2661. else
  2662. bfqq->last_wr_start_finish = jiffies;
  2663. }
  2664. bfq_set_budget_timeout(bfqd, bfqq);
  2665. bfq_log_bfqq(bfqd, bfqq,
  2666. "set_in_service_queue, cur-budget = %d",
  2667. bfqq->entity.budget);
  2668. }
  2669. bfqd->in_service_queue = bfqq;
  2670. bfqd->in_serv_last_pos = 0;
  2671. }
  2672. /*
  2673. * Get and set a new queue for service.
  2674. */
  2675. static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
  2676. {
  2677. struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
  2678. __bfq_set_in_service_queue(bfqd, bfqq);
  2679. return bfqq;
  2680. }
  2681. static void bfq_arm_slice_timer(struct bfq_data *bfqd)
  2682. {
  2683. struct bfq_queue *bfqq = bfqd->in_service_queue;
  2684. u32 sl;
  2685. bfq_mark_bfqq_wait_request(bfqq);
  2686. /*
  2687. * We don't want to idle for seeks, but we do want to allow
  2688. * fair distribution of slice time for a process doing back-to-back
  2689. * seeks. So allow a little bit of time for him to submit a new rq.
  2690. */
  2691. sl = bfqd->bfq_slice_idle;
  2692. /*
  2693. * Unless the queue is being weight-raised or the scenario is
  2694. * asymmetric, grant only minimum idle time if the queue
  2695. * is seeky. A long idling is preserved for a weight-raised
  2696. * queue, or, more in general, in an asymmetric scenario,
  2697. * because a long idling is needed for guaranteeing to a queue
  2698. * its reserved share of the throughput (in particular, it is
  2699. * needed if the queue has a higher weight than some other
  2700. * queue).
  2701. */
  2702. if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
  2703. !bfq_asymmetric_scenario(bfqd, bfqq))
  2704. sl = min_t(u64, sl, BFQ_MIN_TT);
  2705. else if (bfqq->wr_coeff > 1)
  2706. sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
  2707. bfqd->last_idling_start = ktime_get();
  2708. bfqd->last_idling_start_jiffies = jiffies;
  2709. hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
  2710. HRTIMER_MODE_REL);
  2711. bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
  2712. }
  2713. /*
  2714. * In autotuning mode, max_budget is dynamically recomputed as the
  2715. * amount of sectors transferred in timeout at the estimated peak
  2716. * rate. This enables BFQ to utilize a full timeslice with a full
  2717. * budget, even if the in-service queue is served at peak rate. And
  2718. * this maximises throughput with sequential workloads.
  2719. */
  2720. static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
  2721. {
  2722. return (u64)bfqd->peak_rate * USEC_PER_MSEC *
  2723. jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
  2724. }
  2725. /*
  2726. * Update parameters related to throughput and responsiveness, as a
  2727. * function of the estimated peak rate. See comments on
  2728. * bfq_calc_max_budget(), and on the ref_wr_duration array.
  2729. */
  2730. static void update_thr_responsiveness_params(struct bfq_data *bfqd)
  2731. {
  2732. if (bfqd->bfq_user_max_budget == 0) {
  2733. bfqd->bfq_max_budget =
  2734. bfq_calc_max_budget(bfqd);
  2735. bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
  2736. }
  2737. }
  2738. static void bfq_reset_rate_computation(struct bfq_data *bfqd,
  2739. struct request *rq)
  2740. {
  2741. if (rq != NULL) { /* new rq dispatch now, reset accordingly */
  2742. bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
  2743. bfqd->peak_rate_samples = 1;
  2744. bfqd->sequential_samples = 0;
  2745. bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
  2746. blk_rq_sectors(rq);
  2747. } else /* no new rq dispatched, just reset the number of samples */
  2748. bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
  2749. bfq_log(bfqd,
  2750. "reset_rate_computation at end, sample %u/%u tot_sects %llu",
  2751. bfqd->peak_rate_samples, bfqd->sequential_samples,
  2752. bfqd->tot_sectors_dispatched);
  2753. }
  2754. static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
  2755. {
  2756. u32 rate, weight, divisor;
  2757. /*
  2758. * For the convergence property to hold (see comments on
  2759. * bfq_update_peak_rate()) and for the assessment to be
  2760. * reliable, a minimum number of samples must be present, and
  2761. * a minimum amount of time must have elapsed. If not so, do
  2762. * not compute new rate. Just reset parameters, to get ready
  2763. * for a new evaluation attempt.
  2764. */
  2765. if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
  2766. bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
  2767. goto reset_computation;
  2768. /*
  2769. * If a new request completion has occurred after last
  2770. * dispatch, then, to approximate the rate at which requests
  2771. * have been served by the device, it is more precise to
  2772. * extend the observation interval to the last completion.
  2773. */
  2774. bfqd->delta_from_first =
  2775. max_t(u64, bfqd->delta_from_first,
  2776. bfqd->last_completion - bfqd->first_dispatch);
  2777. /*
  2778. * Rate computed in sects/usec, and not sects/nsec, for
  2779. * precision issues.
  2780. */
  2781. rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
  2782. div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
  2783. /*
  2784. * Peak rate not updated if:
  2785. * - the percentage of sequential dispatches is below 3/4 of the
  2786. * total, and rate is below the current estimated peak rate
  2787. * - rate is unreasonably high (> 20M sectors/sec)
  2788. */
  2789. if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
  2790. rate <= bfqd->peak_rate) ||
  2791. rate > 20<<BFQ_RATE_SHIFT)
  2792. goto reset_computation;
  2793. /*
  2794. * We have to update the peak rate, at last! To this purpose,
  2795. * we use a low-pass filter. We compute the smoothing constant
  2796. * of the filter as a function of the 'weight' of the new
  2797. * measured rate.
  2798. *
  2799. * As can be seen in next formulas, we define this weight as a
  2800. * quantity proportional to how sequential the workload is,
  2801. * and to how long the observation time interval is.
  2802. *
  2803. * The weight runs from 0 to 8. The maximum value of the
  2804. * weight, 8, yields the minimum value for the smoothing
  2805. * constant. At this minimum value for the smoothing constant,
  2806. * the measured rate contributes for half of the next value of
  2807. * the estimated peak rate.
  2808. *
  2809. * So, the first step is to compute the weight as a function
  2810. * of how sequential the workload is. Note that the weight
  2811. * cannot reach 9, because bfqd->sequential_samples cannot
  2812. * become equal to bfqd->peak_rate_samples, which, in its
  2813. * turn, holds true because bfqd->sequential_samples is not
  2814. * incremented for the first sample.
  2815. */
  2816. weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
  2817. /*
  2818. * Second step: further refine the weight as a function of the
  2819. * duration of the observation interval.
  2820. */
  2821. weight = min_t(u32, 8,
  2822. div_u64(weight * bfqd->delta_from_first,
  2823. BFQ_RATE_REF_INTERVAL));
  2824. /*
  2825. * Divisor ranging from 10, for minimum weight, to 2, for
  2826. * maximum weight.
  2827. */
  2828. divisor = 10 - weight;
  2829. /*
  2830. * Finally, update peak rate:
  2831. *
  2832. * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
  2833. */
  2834. bfqd->peak_rate *= divisor-1;
  2835. bfqd->peak_rate /= divisor;
  2836. rate /= divisor; /* smoothing constant alpha = 1/divisor */
  2837. bfqd->peak_rate += rate;
  2838. /*
  2839. * For a very slow device, bfqd->peak_rate can reach 0 (see
  2840. * the minimum representable values reported in the comments
  2841. * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
  2842. * divisions by zero where bfqd->peak_rate is used as a
  2843. * divisor.
  2844. */
  2845. bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
  2846. update_thr_responsiveness_params(bfqd);
  2847. reset_computation:
  2848. bfq_reset_rate_computation(bfqd, rq);
  2849. }
  2850. /*
  2851. * Update the read/write peak rate (the main quantity used for
  2852. * auto-tuning, see update_thr_responsiveness_params()).
  2853. *
  2854. * It is not trivial to estimate the peak rate (correctly): because of
  2855. * the presence of sw and hw queues between the scheduler and the
  2856. * device components that finally serve I/O requests, it is hard to
  2857. * say exactly when a given dispatched request is served inside the
  2858. * device, and for how long. As a consequence, it is hard to know
  2859. * precisely at what rate a given set of requests is actually served
  2860. * by the device.
  2861. *
  2862. * On the opposite end, the dispatch time of any request is trivially
  2863. * available, and, from this piece of information, the "dispatch rate"
  2864. * of requests can be immediately computed. So, the idea in the next
  2865. * function is to use what is known, namely request dispatch times
  2866. * (plus, when useful, request completion times), to estimate what is
  2867. * unknown, namely in-device request service rate.
  2868. *
  2869. * The main issue is that, because of the above facts, the rate at
  2870. * which a certain set of requests is dispatched over a certain time
  2871. * interval can vary greatly with respect to the rate at which the
  2872. * same requests are then served. But, since the size of any
  2873. * intermediate queue is limited, and the service scheme is lossless
  2874. * (no request is silently dropped), the following obvious convergence
  2875. * property holds: the number of requests dispatched MUST become
  2876. * closer and closer to the number of requests completed as the
  2877. * observation interval grows. This is the key property used in
  2878. * the next function to estimate the peak service rate as a function
  2879. * of the observed dispatch rate. The function assumes to be invoked
  2880. * on every request dispatch.
  2881. */
  2882. static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
  2883. {
  2884. u64 now_ns = ktime_get_ns();
  2885. if (bfqd->peak_rate_samples == 0) { /* first dispatch */
  2886. bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
  2887. bfqd->peak_rate_samples);
  2888. bfq_reset_rate_computation(bfqd, rq);
  2889. goto update_last_values; /* will add one sample */
  2890. }
  2891. /*
  2892. * Device idle for very long: the observation interval lasting
  2893. * up to this dispatch cannot be a valid observation interval
  2894. * for computing a new peak rate (similarly to the late-
  2895. * completion event in bfq_completed_request()). Go to
  2896. * update_rate_and_reset to have the following three steps
  2897. * taken:
  2898. * - close the observation interval at the last (previous)
  2899. * request dispatch or completion
  2900. * - compute rate, if possible, for that observation interval
  2901. * - start a new observation interval with this dispatch
  2902. */
  2903. if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
  2904. bfqd->rq_in_driver == 0)
  2905. goto update_rate_and_reset;
  2906. /* Update sampling information */
  2907. bfqd->peak_rate_samples++;
  2908. if ((bfqd->rq_in_driver > 0 ||
  2909. now_ns - bfqd->last_completion < BFQ_MIN_TT)
  2910. && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
  2911. bfqd->sequential_samples++;
  2912. bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
  2913. /* Reset max observed rq size every 32 dispatches */
  2914. if (likely(bfqd->peak_rate_samples % 32))
  2915. bfqd->last_rq_max_size =
  2916. max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
  2917. else
  2918. bfqd->last_rq_max_size = blk_rq_sectors(rq);
  2919. bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
  2920. /* Target observation interval not yet reached, go on sampling */
  2921. if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
  2922. goto update_last_values;
  2923. update_rate_and_reset:
  2924. bfq_update_rate_reset(bfqd, rq);
  2925. update_last_values:
  2926. bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  2927. if (RQ_BFQQ(rq) == bfqd->in_service_queue)
  2928. bfqd->in_serv_last_pos = bfqd->last_position;
  2929. bfqd->last_dispatch = now_ns;
  2930. }
  2931. /*
  2932. * Remove request from internal lists.
  2933. */
  2934. static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
  2935. {
  2936. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  2937. /*
  2938. * For consistency, the next instruction should have been
  2939. * executed after removing the request from the queue and
  2940. * dispatching it. We execute instead this instruction before
  2941. * bfq_remove_request() (and hence introduce a temporary
  2942. * inconsistency), for efficiency. In fact, should this
  2943. * dispatch occur for a non in-service bfqq, this anticipated
  2944. * increment prevents two counters related to bfqq->dispatched
  2945. * from risking to be, first, uselessly decremented, and then
  2946. * incremented again when the (new) value of bfqq->dispatched
  2947. * happens to be taken into account.
  2948. */
  2949. bfqq->dispatched++;
  2950. bfq_update_peak_rate(q->elevator->elevator_data, rq);
  2951. bfq_remove_request(q, rq);
  2952. }
  2953. /*
  2954. * There is a case where idling does not have to be performed for
  2955. * throughput concerns, but to preserve the throughput share of
  2956. * the process associated with bfqq.
  2957. *
  2958. * To introduce this case, we can note that allowing the drive
  2959. * to enqueue more than one request at a time, and hence
  2960. * delegating de facto final scheduling decisions to the
  2961. * drive's internal scheduler, entails loss of control on the
  2962. * actual request service order. In particular, the critical
  2963. * situation is when requests from different processes happen
  2964. * to be present, at the same time, in the internal queue(s)
  2965. * of the drive. In such a situation, the drive, by deciding
  2966. * the service order of the internally-queued requests, does
  2967. * determine also the actual throughput distribution among
  2968. * these processes. But the drive typically has no notion or
  2969. * concern about per-process throughput distribution, and
  2970. * makes its decisions only on a per-request basis. Therefore,
  2971. * the service distribution enforced by the drive's internal
  2972. * scheduler is likely to coincide with the desired throughput
  2973. * distribution only in a completely symmetric, or favorably
  2974. * skewed scenario where:
  2975. * (i-a) each of these processes must get the same throughput as
  2976. * the others,
  2977. * (i-b) in case (i-a) does not hold, it holds that the process
  2978. * associated with bfqq must receive a lower or equal
  2979. * throughput than any of the other processes;
  2980. * (ii) the I/O of each process has the same properties, in
  2981. * terms of locality (sequential or random), direction
  2982. * (reads or writes), request sizes, greediness
  2983. * (from I/O-bound to sporadic), and so on;
  2984. * In fact, in such a scenario, the drive tends to treat the requests
  2985. * of each process in about the same way as the requests of the
  2986. * others, and thus to provide each of these processes with about the
  2987. * same throughput. This is exactly the desired throughput
  2988. * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
  2989. * even more convenient distribution for (the process associated with)
  2990. * bfqq.
  2991. *
  2992. * In contrast, in any asymmetric or unfavorable scenario, device
  2993. * idling (I/O-dispatch plugging) is certainly needed to guarantee
  2994. * that bfqq receives its assigned fraction of the device throughput
  2995. * (see [1] for details).
  2996. *
  2997. * The problem is that idling may significantly reduce throughput with
  2998. * certain combinations of types of I/O and devices. An important
  2999. * example is sync random I/O on flash storage with command
  3000. * queueing. So, unless bfqq falls in cases where idling also boosts
  3001. * throughput, it is important to check conditions (i-a), i(-b) and
  3002. * (ii) accurately, so as to avoid idling when not strictly needed for
  3003. * service guarantees.
  3004. *
  3005. * Unfortunately, it is extremely difficult to thoroughly check
  3006. * condition (ii). And, in case there are active groups, it becomes
  3007. * very difficult to check conditions (i-a) and (i-b) too. In fact,
  3008. * if there are active groups, then, for conditions (i-a) or (i-b) to
  3009. * become false 'indirectly', it is enough that an active group
  3010. * contains more active processes or sub-groups than some other active
  3011. * group. More precisely, for conditions (i-a) or (i-b) to become
  3012. * false because of such a group, it is not even necessary that the
  3013. * group is (still) active: it is sufficient that, even if the group
  3014. * has become inactive, some of its descendant processes still have
  3015. * some request already dispatched but still waiting for
  3016. * completion. In fact, requests have still to be guaranteed their
  3017. * share of the throughput even after being dispatched. In this
  3018. * respect, it is easy to show that, if a group frequently becomes
  3019. * inactive while still having in-flight requests, and if, when this
  3020. * happens, the group is not considered in the calculation of whether
  3021. * the scenario is asymmetric, then the group may fail to be
  3022. * guaranteed its fair share of the throughput (basically because
  3023. * idling may not be performed for the descendant processes of the
  3024. * group, but it had to be). We address this issue with the following
  3025. * bi-modal behavior, implemented in the function
  3026. * bfq_asymmetric_scenario().
  3027. *
  3028. * If there are groups with requests waiting for completion
  3029. * (as commented above, some of these groups may even be
  3030. * already inactive), then the scenario is tagged as
  3031. * asymmetric, conservatively, without checking any of the
  3032. * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
  3033. * This behavior matches also the fact that groups are created
  3034. * exactly if controlling I/O is a primary concern (to
  3035. * preserve bandwidth and latency guarantees).
  3036. *
  3037. * On the opposite end, if there are no groups with requests waiting
  3038. * for completion, then only conditions (i-a) and (i-b) are actually
  3039. * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
  3040. * idling is not performed, regardless of whether condition (ii)
  3041. * holds. In other words, only if conditions (i-a) and (i-b) do not
  3042. * hold, then idling is allowed, and the device tends to be prevented
  3043. * from queueing many requests, possibly of several processes. Since
  3044. * there are no groups with requests waiting for completion, then, to
  3045. * control conditions (i-a) and (i-b) it is enough to check just
  3046. * whether all the queues with requests waiting for completion also
  3047. * have the same weight.
  3048. *
  3049. * Not checking condition (ii) evidently exposes bfqq to the
  3050. * risk of getting less throughput than its fair share.
  3051. * However, for queues with the same weight, a further
  3052. * mechanism, preemption, mitigates or even eliminates this
  3053. * problem. And it does so without consequences on overall
  3054. * throughput. This mechanism and its benefits are explained
  3055. * in the next three paragraphs.
  3056. *
  3057. * Even if a queue, say Q, is expired when it remains idle, Q
  3058. * can still preempt the new in-service queue if the next
  3059. * request of Q arrives soon (see the comments on
  3060. * bfq_bfqq_update_budg_for_activation). If all queues and
  3061. * groups have the same weight, this form of preemption,
  3062. * combined with the hole-recovery heuristic described in the
  3063. * comments on function bfq_bfqq_update_budg_for_activation,
  3064. * are enough to preserve a correct bandwidth distribution in
  3065. * the mid term, even without idling. In fact, even if not
  3066. * idling allows the internal queues of the device to contain
  3067. * many requests, and thus to reorder requests, we can rather
  3068. * safely assume that the internal scheduler still preserves a
  3069. * minimum of mid-term fairness.
  3070. *
  3071. * More precisely, this preemption-based, idleless approach
  3072. * provides fairness in terms of IOPS, and not sectors per
  3073. * second. This can be seen with a simple example. Suppose
  3074. * that there are two queues with the same weight, but that
  3075. * the first queue receives requests of 8 sectors, while the
  3076. * second queue receives requests of 1024 sectors. In
  3077. * addition, suppose that each of the two queues contains at
  3078. * most one request at a time, which implies that each queue
  3079. * always remains idle after it is served. Finally, after
  3080. * remaining idle, each queue receives very quickly a new
  3081. * request. It follows that the two queues are served
  3082. * alternatively, preempting each other if needed. This
  3083. * implies that, although both queues have the same weight,
  3084. * the queue with large requests receives a service that is
  3085. * 1024/8 times as high as the service received by the other
  3086. * queue.
  3087. *
  3088. * The motivation for using preemption instead of idling (for
  3089. * queues with the same weight) is that, by not idling,
  3090. * service guarantees are preserved (completely or at least in
  3091. * part) without minimally sacrificing throughput. And, if
  3092. * there is no active group, then the primary expectation for
  3093. * this device is probably a high throughput.
  3094. *
  3095. * We are now left only with explaining the two sub-conditions in the
  3096. * additional compound condition that is checked below for deciding
  3097. * whether the scenario is asymmetric. To explain the first
  3098. * sub-condition, we need to add that the function
  3099. * bfq_asymmetric_scenario checks the weights of only
  3100. * non-weight-raised queues, for efficiency reasons (see comments on
  3101. * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
  3102. * is checked explicitly here. More precisely, the compound condition
  3103. * below takes into account also the fact that, even if bfqq is being
  3104. * weight-raised, the scenario is still symmetric if all queues with
  3105. * requests waiting for completion happen to be
  3106. * weight-raised. Actually, we should be even more precise here, and
  3107. * differentiate between interactive weight raising and soft real-time
  3108. * weight raising.
  3109. *
  3110. * The second sub-condition checked in the compound condition is
  3111. * whether there is a fair amount of already in-flight I/O not
  3112. * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
  3113. * following reason. The drive may decide to serve in-flight
  3114. * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
  3115. * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
  3116. * I/O-dispatching is not plugged, then, while bfqq remains empty, a
  3117. * basically uncontrolled amount of I/O from other queues may be
  3118. * dispatched too, possibly causing the service of bfqq's I/O to be
  3119. * delayed even longer in the drive. This problem gets more and more
  3120. * serious as the speed and the queue depth of the drive grow,
  3121. * because, as these two quantities grow, the probability to find no
  3122. * queue busy but many requests in flight grows too. By contrast,
  3123. * plugging I/O dispatching minimizes the delay induced by already
  3124. * in-flight I/O, and enables bfqq to recover the bandwidth it may
  3125. * lose because of this delay.
  3126. *
  3127. * As a side note, it is worth considering that the above
  3128. * device-idling countermeasures may however fail in the following
  3129. * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
  3130. * in a time period during which all symmetry sub-conditions hold, and
  3131. * therefore the device is allowed to enqueue many requests, but at
  3132. * some later point in time some sub-condition stops to hold, then it
  3133. * may become impossible to make requests be served in the desired
  3134. * order until all the requests already queued in the device have been
  3135. * served. The last sub-condition commented above somewhat mitigates
  3136. * this problem for weight-raised queues.
  3137. */
  3138. static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
  3139. struct bfq_queue *bfqq)
  3140. {
  3141. /* No point in idling for bfqq if it won't get requests any longer */
  3142. if (unlikely(!bfqq_process_refs(bfqq)))
  3143. return false;
  3144. return (bfqq->wr_coeff > 1 &&
  3145. (bfqd->wr_busy_queues <
  3146. bfq_tot_busy_queues(bfqd) ||
  3147. bfqd->rq_in_driver >=
  3148. bfqq->dispatched + 4)) ||
  3149. bfq_asymmetric_scenario(bfqd, bfqq);
  3150. }
  3151. static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3152. enum bfqq_expiration reason)
  3153. {
  3154. /*
  3155. * If this bfqq is shared between multiple processes, check
  3156. * to make sure that those processes are still issuing I/Os
  3157. * within the mean seek distance. If not, it may be time to
  3158. * break the queues apart again.
  3159. */
  3160. if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
  3161. bfq_mark_bfqq_split_coop(bfqq);
  3162. /*
  3163. * Consider queues with a higher finish virtual time than
  3164. * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
  3165. * true, then bfqq's bandwidth would be violated if an
  3166. * uncontrolled amount of I/O from these queues were
  3167. * dispatched while bfqq is waiting for its new I/O to
  3168. * arrive. This is exactly what may happen if this is a forced
  3169. * expiration caused by a preemption attempt, and if bfqq is
  3170. * not re-scheduled. To prevent this from happening, re-queue
  3171. * bfqq if it needs I/O-dispatch plugging, even if it is
  3172. * empty. By doing so, bfqq is granted to be served before the
  3173. * above queues (provided that bfqq is of course eligible).
  3174. */
  3175. if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
  3176. !(reason == BFQQE_PREEMPTED &&
  3177. idling_needed_for_service_guarantees(bfqd, bfqq))) {
  3178. if (bfqq->dispatched == 0)
  3179. /*
  3180. * Overloading budget_timeout field to store
  3181. * the time at which the queue remains with no
  3182. * backlog and no outstanding request; used by
  3183. * the weight-raising mechanism.
  3184. */
  3185. bfqq->budget_timeout = jiffies;
  3186. bfq_del_bfqq_busy(bfqd, bfqq, true);
  3187. } else {
  3188. bfq_requeue_bfqq(bfqd, bfqq, true);
  3189. /*
  3190. * Resort priority tree of potential close cooperators.
  3191. * See comments on bfq_pos_tree_add_move() for the unlikely().
  3192. */
  3193. if (unlikely(!bfqd->nonrot_with_queueing &&
  3194. !RB_EMPTY_ROOT(&bfqq->sort_list)))
  3195. bfq_pos_tree_add_move(bfqd, bfqq);
  3196. }
  3197. /*
  3198. * All in-service entities must have been properly deactivated
  3199. * or requeued before executing the next function, which
  3200. * resets all in-service entities as no more in service. This
  3201. * may cause bfqq to be freed. If this happens, the next
  3202. * function returns true.
  3203. */
  3204. return __bfq_bfqd_reset_in_service(bfqd);
  3205. }
  3206. /**
  3207. * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
  3208. * @bfqd: device data.
  3209. * @bfqq: queue to update.
  3210. * @reason: reason for expiration.
  3211. *
  3212. * Handle the feedback on @bfqq budget at queue expiration.
  3213. * See the body for detailed comments.
  3214. */
  3215. static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
  3216. struct bfq_queue *bfqq,
  3217. enum bfqq_expiration reason)
  3218. {
  3219. struct request *next_rq;
  3220. int budget, min_budget;
  3221. min_budget = bfq_min_budget(bfqd);
  3222. if (bfqq->wr_coeff == 1)
  3223. budget = bfqq->max_budget;
  3224. else /*
  3225. * Use a constant, low budget for weight-raised queues,
  3226. * to help achieve a low latency. Keep it slightly higher
  3227. * than the minimum possible budget, to cause a little
  3228. * bit fewer expirations.
  3229. */
  3230. budget = 2 * min_budget;
  3231. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
  3232. bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
  3233. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
  3234. budget, bfq_min_budget(bfqd));
  3235. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
  3236. bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
  3237. if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
  3238. switch (reason) {
  3239. /*
  3240. * Caveat: in all the following cases we trade latency
  3241. * for throughput.
  3242. */
  3243. case BFQQE_TOO_IDLE:
  3244. /*
  3245. * This is the only case where we may reduce
  3246. * the budget: if there is no request of the
  3247. * process still waiting for completion, then
  3248. * we assume (tentatively) that the timer has
  3249. * expired because the batch of requests of
  3250. * the process could have been served with a
  3251. * smaller budget. Hence, betting that
  3252. * process will behave in the same way when it
  3253. * becomes backlogged again, we reduce its
  3254. * next budget. As long as we guess right,
  3255. * this budget cut reduces the latency
  3256. * experienced by the process.
  3257. *
  3258. * However, if there are still outstanding
  3259. * requests, then the process may have not yet
  3260. * issued its next request just because it is
  3261. * still waiting for the completion of some of
  3262. * the still outstanding ones. So in this
  3263. * subcase we do not reduce its budget, on the
  3264. * contrary we increase it to possibly boost
  3265. * the throughput, as discussed in the
  3266. * comments to the BUDGET_TIMEOUT case.
  3267. */
  3268. if (bfqq->dispatched > 0) /* still outstanding reqs */
  3269. budget = min(budget * 2, bfqd->bfq_max_budget);
  3270. else {
  3271. if (budget > 5 * min_budget)
  3272. budget -= 4 * min_budget;
  3273. else
  3274. budget = min_budget;
  3275. }
  3276. break;
  3277. case BFQQE_BUDGET_TIMEOUT:
  3278. /*
  3279. * We double the budget here because it gives
  3280. * the chance to boost the throughput if this
  3281. * is not a seeky process (and has bumped into
  3282. * this timeout because of, e.g., ZBR).
  3283. */
  3284. budget = min(budget * 2, bfqd->bfq_max_budget);
  3285. break;
  3286. case BFQQE_BUDGET_EXHAUSTED:
  3287. /*
  3288. * The process still has backlog, and did not
  3289. * let either the budget timeout or the disk
  3290. * idling timeout expire. Hence it is not
  3291. * seeky, has a short thinktime and may be
  3292. * happy with a higher budget too. So
  3293. * definitely increase the budget of this good
  3294. * candidate to boost the disk throughput.
  3295. */
  3296. budget = min(budget * 4, bfqd->bfq_max_budget);
  3297. break;
  3298. case BFQQE_NO_MORE_REQUESTS:
  3299. /*
  3300. * For queues that expire for this reason, it
  3301. * is particularly important to keep the
  3302. * budget close to the actual service they
  3303. * need. Doing so reduces the timestamp
  3304. * misalignment problem described in the
  3305. * comments in the body of
  3306. * __bfq_activate_entity. In fact, suppose
  3307. * that a queue systematically expires for
  3308. * BFQQE_NO_MORE_REQUESTS and presents a
  3309. * new request in time to enjoy timestamp
  3310. * back-shifting. The larger the budget of the
  3311. * queue is with respect to the service the
  3312. * queue actually requests in each service
  3313. * slot, the more times the queue can be
  3314. * reactivated with the same virtual finish
  3315. * time. It follows that, even if this finish
  3316. * time is pushed to the system virtual time
  3317. * to reduce the consequent timestamp
  3318. * misalignment, the queue unjustly enjoys for
  3319. * many re-activations a lower finish time
  3320. * than all newly activated queues.
  3321. *
  3322. * The service needed by bfqq is measured
  3323. * quite precisely by bfqq->entity.service.
  3324. * Since bfqq does not enjoy device idling,
  3325. * bfqq->entity.service is equal to the number
  3326. * of sectors that the process associated with
  3327. * bfqq requested to read/write before waiting
  3328. * for request completions, or blocking for
  3329. * other reasons.
  3330. */
  3331. budget = max_t(int, bfqq->entity.service, min_budget);
  3332. break;
  3333. default:
  3334. return;
  3335. }
  3336. } else if (!bfq_bfqq_sync(bfqq)) {
  3337. /*
  3338. * Async queues get always the maximum possible
  3339. * budget, as for them we do not care about latency
  3340. * (in addition, their ability to dispatch is limited
  3341. * by the charging factor).
  3342. */
  3343. budget = bfqd->bfq_max_budget;
  3344. }
  3345. bfqq->max_budget = budget;
  3346. if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
  3347. !bfqd->bfq_user_max_budget)
  3348. bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
  3349. /*
  3350. * If there is still backlog, then assign a new budget, making
  3351. * sure that it is large enough for the next request. Since
  3352. * the finish time of bfqq must be kept in sync with the
  3353. * budget, be sure to call __bfq_bfqq_expire() *after* this
  3354. * update.
  3355. *
  3356. * If there is no backlog, then no need to update the budget;
  3357. * it will be updated on the arrival of a new request.
  3358. */
  3359. next_rq = bfqq->next_rq;
  3360. if (next_rq)
  3361. bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
  3362. bfq_serv_to_charge(next_rq, bfqq));
  3363. bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
  3364. next_rq ? blk_rq_sectors(next_rq) : 0,
  3365. bfqq->entity.budget);
  3366. }
  3367. /*
  3368. * Return true if the process associated with bfqq is "slow". The slow
  3369. * flag is used, in addition to the budget timeout, to reduce the
  3370. * amount of service provided to seeky processes, and thus reduce
  3371. * their chances to lower the throughput. More details in the comments
  3372. * on the function bfq_bfqq_expire().
  3373. *
  3374. * An important observation is in order: as discussed in the comments
  3375. * on the function bfq_update_peak_rate(), with devices with internal
  3376. * queues, it is hard if ever possible to know when and for how long
  3377. * an I/O request is processed by the device (apart from the trivial
  3378. * I/O pattern where a new request is dispatched only after the
  3379. * previous one has been completed). This makes it hard to evaluate
  3380. * the real rate at which the I/O requests of each bfq_queue are
  3381. * served. In fact, for an I/O scheduler like BFQ, serving a
  3382. * bfq_queue means just dispatching its requests during its service
  3383. * slot (i.e., until the budget of the queue is exhausted, or the
  3384. * queue remains idle, or, finally, a timeout fires). But, during the
  3385. * service slot of a bfq_queue, around 100 ms at most, the device may
  3386. * be even still processing requests of bfq_queues served in previous
  3387. * service slots. On the opposite end, the requests of the in-service
  3388. * bfq_queue may be completed after the service slot of the queue
  3389. * finishes.
  3390. *
  3391. * Anyway, unless more sophisticated solutions are used
  3392. * (where possible), the sum of the sizes of the requests dispatched
  3393. * during the service slot of a bfq_queue is probably the only
  3394. * approximation available for the service received by the bfq_queue
  3395. * during its service slot. And this sum is the quantity used in this
  3396. * function to evaluate the I/O speed of a process.
  3397. */
  3398. static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3399. bool compensate, enum bfqq_expiration reason,
  3400. unsigned long *delta_ms)
  3401. {
  3402. ktime_t delta_ktime;
  3403. u32 delta_usecs;
  3404. bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
  3405. if (!bfq_bfqq_sync(bfqq))
  3406. return false;
  3407. if (compensate)
  3408. delta_ktime = bfqd->last_idling_start;
  3409. else
  3410. delta_ktime = ktime_get();
  3411. delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
  3412. delta_usecs = ktime_to_us(delta_ktime);
  3413. /* don't use too short time intervals */
  3414. if (delta_usecs < 1000) {
  3415. if (blk_queue_nonrot(bfqd->queue))
  3416. /*
  3417. * give same worst-case guarantees as idling
  3418. * for seeky
  3419. */
  3420. *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
  3421. else /* charge at least one seek */
  3422. *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
  3423. return slow;
  3424. }
  3425. *delta_ms = delta_usecs / USEC_PER_MSEC;
  3426. /*
  3427. * Use only long (> 20ms) intervals to filter out excessive
  3428. * spikes in service rate estimation.
  3429. */
  3430. if (delta_usecs > 20000) {
  3431. /*
  3432. * Caveat for rotational devices: processes doing I/O
  3433. * in the slower disk zones tend to be slow(er) even
  3434. * if not seeky. In this respect, the estimated peak
  3435. * rate is likely to be an average over the disk
  3436. * surface. Accordingly, to not be too harsh with
  3437. * unlucky processes, a process is deemed slow only if
  3438. * its rate has been lower than half of the estimated
  3439. * peak rate.
  3440. */
  3441. slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
  3442. }
  3443. bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
  3444. return slow;
  3445. }
  3446. /*
  3447. * To be deemed as soft real-time, an application must meet two
  3448. * requirements. First, the application must not require an average
  3449. * bandwidth higher than the approximate bandwidth required to playback or
  3450. * record a compressed high-definition video.
  3451. * The next function is invoked on the completion of the last request of a
  3452. * batch, to compute the next-start time instant, soft_rt_next_start, such
  3453. * that, if the next request of the application does not arrive before
  3454. * soft_rt_next_start, then the above requirement on the bandwidth is met.
  3455. *
  3456. * The second requirement is that the request pattern of the application is
  3457. * isochronous, i.e., that, after issuing a request or a batch of requests,
  3458. * the application stops issuing new requests until all its pending requests
  3459. * have been completed. After that, the application may issue a new batch,
  3460. * and so on.
  3461. * For this reason the next function is invoked to compute
  3462. * soft_rt_next_start only for applications that meet this requirement,
  3463. * whereas soft_rt_next_start is set to infinity for applications that do
  3464. * not.
  3465. *
  3466. * Unfortunately, even a greedy (i.e., I/O-bound) application may
  3467. * happen to meet, occasionally or systematically, both the above
  3468. * bandwidth and isochrony requirements. This may happen at least in
  3469. * the following circumstances. First, if the CPU load is high. The
  3470. * application may stop issuing requests while the CPUs are busy
  3471. * serving other processes, then restart, then stop again for a while,
  3472. * and so on. The other circumstances are related to the storage
  3473. * device: the storage device is highly loaded or reaches a low-enough
  3474. * throughput with the I/O of the application (e.g., because the I/O
  3475. * is random and/or the device is slow). In all these cases, the
  3476. * I/O of the application may be simply slowed down enough to meet
  3477. * the bandwidth and isochrony requirements. To reduce the probability
  3478. * that greedy applications are deemed as soft real-time in these
  3479. * corner cases, a further rule is used in the computation of
  3480. * soft_rt_next_start: the return value of this function is forced to
  3481. * be higher than the maximum between the following two quantities.
  3482. *
  3483. * (a) Current time plus: (1) the maximum time for which the arrival
  3484. * of a request is waited for when a sync queue becomes idle,
  3485. * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
  3486. * postpone for a moment the reason for adding a few extra
  3487. * jiffies; we get back to it after next item (b). Lower-bounding
  3488. * the return value of this function with the current time plus
  3489. * bfqd->bfq_slice_idle tends to filter out greedy applications,
  3490. * because the latter issue their next request as soon as possible
  3491. * after the last one has been completed. In contrast, a soft
  3492. * real-time application spends some time processing data, after a
  3493. * batch of its requests has been completed.
  3494. *
  3495. * (b) Current value of bfqq->soft_rt_next_start. As pointed out
  3496. * above, greedy applications may happen to meet both the
  3497. * bandwidth and isochrony requirements under heavy CPU or
  3498. * storage-device load. In more detail, in these scenarios, these
  3499. * applications happen, only for limited time periods, to do I/O
  3500. * slowly enough to meet all the requirements described so far,
  3501. * including the filtering in above item (a). These slow-speed
  3502. * time intervals are usually interspersed between other time
  3503. * intervals during which these applications do I/O at a very high
  3504. * speed. Fortunately, exactly because of the high speed of the
  3505. * I/O in the high-speed intervals, the values returned by this
  3506. * function happen to be so high, near the end of any such
  3507. * high-speed interval, to be likely to fall *after* the end of
  3508. * the low-speed time interval that follows. These high values are
  3509. * stored in bfqq->soft_rt_next_start after each invocation of
  3510. * this function. As a consequence, if the last value of
  3511. * bfqq->soft_rt_next_start is constantly used to lower-bound the
  3512. * next value that this function may return, then, from the very
  3513. * beginning of a low-speed interval, bfqq->soft_rt_next_start is
  3514. * likely to be constantly kept so high that any I/O request
  3515. * issued during the low-speed interval is considered as arriving
  3516. * to soon for the application to be deemed as soft
  3517. * real-time. Then, in the high-speed interval that follows, the
  3518. * application will not be deemed as soft real-time, just because
  3519. * it will do I/O at a high speed. And so on.
  3520. *
  3521. * Getting back to the filtering in item (a), in the following two
  3522. * cases this filtering might be easily passed by a greedy
  3523. * application, if the reference quantity was just
  3524. * bfqd->bfq_slice_idle:
  3525. * 1) HZ is so low that the duration of a jiffy is comparable to or
  3526. * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
  3527. * devices with HZ=100. The time granularity may be so coarse
  3528. * that the approximation, in jiffies, of bfqd->bfq_slice_idle
  3529. * is rather lower than the exact value.
  3530. * 2) jiffies, instead of increasing at a constant rate, may stop increasing
  3531. * for a while, then suddenly 'jump' by several units to recover the lost
  3532. * increments. This seems to happen, e.g., inside virtual machines.
  3533. * To address this issue, in the filtering in (a) we do not use as a
  3534. * reference time interval just bfqd->bfq_slice_idle, but
  3535. * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
  3536. * minimum number of jiffies for which the filter seems to be quite
  3537. * precise also in embedded systems and KVM/QEMU virtual machines.
  3538. */
  3539. static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
  3540. struct bfq_queue *bfqq)
  3541. {
  3542. return max3(bfqq->soft_rt_next_start,
  3543. bfqq->last_idle_bklogged +
  3544. HZ * bfqq->service_from_backlogged /
  3545. bfqd->bfq_wr_max_softrt_rate,
  3546. jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
  3547. }
  3548. /**
  3549. * bfq_bfqq_expire - expire a queue.
  3550. * @bfqd: device owning the queue.
  3551. * @bfqq: the queue to expire.
  3552. * @compensate: if true, compensate for the time spent idling.
  3553. * @reason: the reason causing the expiration.
  3554. *
  3555. * If the process associated with bfqq does slow I/O (e.g., because it
  3556. * issues random requests), we charge bfqq with the time it has been
  3557. * in service instead of the service it has received (see
  3558. * bfq_bfqq_charge_time for details on how this goal is achieved). As
  3559. * a consequence, bfqq will typically get higher timestamps upon
  3560. * reactivation, and hence it will be rescheduled as if it had
  3561. * received more service than what it has actually received. In the
  3562. * end, bfqq receives less service in proportion to how slowly its
  3563. * associated process consumes its budgets (and hence how seriously it
  3564. * tends to lower the throughput). In addition, this time-charging
  3565. * strategy guarantees time fairness among slow processes. In
  3566. * contrast, if the process associated with bfqq is not slow, we
  3567. * charge bfqq exactly with the service it has received.
  3568. *
  3569. * Charging time to the first type of queues and the exact service to
  3570. * the other has the effect of using the WF2Q+ policy to schedule the
  3571. * former on a timeslice basis, without violating service domain
  3572. * guarantees among the latter.
  3573. */
  3574. void bfq_bfqq_expire(struct bfq_data *bfqd,
  3575. struct bfq_queue *bfqq,
  3576. bool compensate,
  3577. enum bfqq_expiration reason)
  3578. {
  3579. bool slow;
  3580. unsigned long delta = 0;
  3581. struct bfq_entity *entity = &bfqq->entity;
  3582. /*
  3583. * Check whether the process is slow (see bfq_bfqq_is_slow).
  3584. */
  3585. slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
  3586. /*
  3587. * As above explained, charge slow (typically seeky) and
  3588. * timed-out queues with the time and not the service
  3589. * received, to favor sequential workloads.
  3590. *
  3591. * Processes doing I/O in the slower disk zones will tend to
  3592. * be slow(er) even if not seeky. Therefore, since the
  3593. * estimated peak rate is actually an average over the disk
  3594. * surface, these processes may timeout just for bad luck. To
  3595. * avoid punishing them, do not charge time to processes that
  3596. * succeeded in consuming at least 2/3 of their budget. This
  3597. * allows BFQ to preserve enough elasticity to still perform
  3598. * bandwidth, and not time, distribution with little unlucky
  3599. * or quasi-sequential processes.
  3600. */
  3601. if (bfqq->wr_coeff == 1 &&
  3602. (slow ||
  3603. (reason == BFQQE_BUDGET_TIMEOUT &&
  3604. bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
  3605. bfq_bfqq_charge_time(bfqd, bfqq, delta);
  3606. if (reason == BFQQE_TOO_IDLE &&
  3607. entity->service <= 2 * entity->budget / 10)
  3608. bfq_clear_bfqq_IO_bound(bfqq);
  3609. if (bfqd->low_latency && bfqq->wr_coeff == 1)
  3610. bfqq->last_wr_start_finish = jiffies;
  3611. if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
  3612. RB_EMPTY_ROOT(&bfqq->sort_list)) {
  3613. /*
  3614. * If we get here, and there are no outstanding
  3615. * requests, then the request pattern is isochronous
  3616. * (see the comments on the function
  3617. * bfq_bfqq_softrt_next_start()). Thus we can compute
  3618. * soft_rt_next_start. And we do it, unless bfqq is in
  3619. * interactive weight raising. We do not do it in the
  3620. * latter subcase, for the following reason. bfqq may
  3621. * be conveying the I/O needed to load a soft
  3622. * real-time application. Such an application will
  3623. * actually exhibit a soft real-time I/O pattern after
  3624. * it finally starts doing its job. But, if
  3625. * soft_rt_next_start is computed here for an
  3626. * interactive bfqq, and bfqq had received a lot of
  3627. * service before remaining with no outstanding
  3628. * request (likely to happen on a fast device), then
  3629. * soft_rt_next_start would be assigned such a high
  3630. * value that, for a very long time, bfqq would be
  3631. * prevented from being possibly considered as soft
  3632. * real time.
  3633. *
  3634. * If, instead, the queue still has outstanding
  3635. * requests, then we have to wait for the completion
  3636. * of all the outstanding requests to discover whether
  3637. * the request pattern is actually isochronous.
  3638. */
  3639. if (bfqq->dispatched == 0 &&
  3640. bfqq->wr_coeff != bfqd->bfq_wr_coeff)
  3641. bfqq->soft_rt_next_start =
  3642. bfq_bfqq_softrt_next_start(bfqd, bfqq);
  3643. else if (bfqq->dispatched > 0) {
  3644. /*
  3645. * Schedule an update of soft_rt_next_start to when
  3646. * the task may be discovered to be isochronous.
  3647. */
  3648. bfq_mark_bfqq_softrt_update(bfqq);
  3649. }
  3650. }
  3651. bfq_log_bfqq(bfqd, bfqq,
  3652. "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
  3653. slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
  3654. /*
  3655. * bfqq expired, so no total service time needs to be computed
  3656. * any longer: reset state machine for measuring total service
  3657. * times.
  3658. */
  3659. bfqd->rqs_injected = bfqd->wait_dispatch = false;
  3660. bfqd->waited_rq = NULL;
  3661. /*
  3662. * Increase, decrease or leave budget unchanged according to
  3663. * reason.
  3664. */
  3665. __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
  3666. if (__bfq_bfqq_expire(bfqd, bfqq, reason))
  3667. /* bfqq is gone, no more actions on it */
  3668. return;
  3669. /* mark bfqq as waiting a request only if a bic still points to it */
  3670. if (!bfq_bfqq_busy(bfqq) &&
  3671. reason != BFQQE_BUDGET_TIMEOUT &&
  3672. reason != BFQQE_BUDGET_EXHAUSTED) {
  3673. bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
  3674. /*
  3675. * Not setting service to 0, because, if the next rq
  3676. * arrives in time, the queue will go on receiving
  3677. * service with this same budget (as if it never expired)
  3678. */
  3679. } else
  3680. entity->service = 0;
  3681. /*
  3682. * Reset the received-service counter for every parent entity.
  3683. * Differently from what happens with bfqq->entity.service,
  3684. * the resetting of this counter never needs to be postponed
  3685. * for parent entities. In fact, in case bfqq may have a
  3686. * chance to go on being served using the last, partially
  3687. * consumed budget, bfqq->entity.service needs to be kept,
  3688. * because if bfqq then actually goes on being served using
  3689. * the same budget, the last value of bfqq->entity.service is
  3690. * needed to properly decrement bfqq->entity.budget by the
  3691. * portion already consumed. In contrast, it is not necessary
  3692. * to keep entity->service for parent entities too, because
  3693. * the bubble up of the new value of bfqq->entity.budget will
  3694. * make sure that the budgets of parent entities are correct,
  3695. * even in case bfqq and thus parent entities go on receiving
  3696. * service with the same budget.
  3697. */
  3698. entity = entity->parent;
  3699. for_each_entity(entity)
  3700. entity->service = 0;
  3701. }
  3702. /*
  3703. * Budget timeout is not implemented through a dedicated timer, but
  3704. * just checked on request arrivals and completions, as well as on
  3705. * idle timer expirations.
  3706. */
  3707. static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
  3708. {
  3709. return time_is_before_eq_jiffies(bfqq->budget_timeout);
  3710. }
  3711. /*
  3712. * If we expire a queue that is actively waiting (i.e., with the
  3713. * device idled) for the arrival of a new request, then we may incur
  3714. * the timestamp misalignment problem described in the body of the
  3715. * function __bfq_activate_entity. Hence we return true only if this
  3716. * condition does not hold, or if the queue is slow enough to deserve
  3717. * only to be kicked off for preserving a high throughput.
  3718. */
  3719. static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
  3720. {
  3721. bfq_log_bfqq(bfqq->bfqd, bfqq,
  3722. "may_budget_timeout: wait_request %d left %d timeout %d",
  3723. bfq_bfqq_wait_request(bfqq),
  3724. bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
  3725. bfq_bfqq_budget_timeout(bfqq));
  3726. return (!bfq_bfqq_wait_request(bfqq) ||
  3727. bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
  3728. &&
  3729. bfq_bfqq_budget_timeout(bfqq);
  3730. }
  3731. static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
  3732. struct bfq_queue *bfqq)
  3733. {
  3734. bool rot_without_queueing =
  3735. !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
  3736. bfqq_sequential_and_IO_bound,
  3737. idling_boosts_thr;
  3738. /* No point in idling for bfqq if it won't get requests any longer */
  3739. if (unlikely(!bfqq_process_refs(bfqq)))
  3740. return false;
  3741. bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
  3742. bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
  3743. /*
  3744. * The next variable takes into account the cases where idling
  3745. * boosts the throughput.
  3746. *
  3747. * The value of the variable is computed considering, first, that
  3748. * idling is virtually always beneficial for the throughput if:
  3749. * (a) the device is not NCQ-capable and rotational, or
  3750. * (b) regardless of the presence of NCQ, the device is rotational and
  3751. * the request pattern for bfqq is I/O-bound and sequential, or
  3752. * (c) regardless of whether it is rotational, the device is
  3753. * not NCQ-capable and the request pattern for bfqq is
  3754. * I/O-bound and sequential.
  3755. *
  3756. * Secondly, and in contrast to the above item (b), idling an
  3757. * NCQ-capable flash-based device would not boost the
  3758. * throughput even with sequential I/O; rather it would lower
  3759. * the throughput in proportion to how fast the device
  3760. * is. Accordingly, the next variable is true if any of the
  3761. * above conditions (a), (b) or (c) is true, and, in
  3762. * particular, happens to be false if bfqd is an NCQ-capable
  3763. * flash-based device.
  3764. */
  3765. idling_boosts_thr = rot_without_queueing ||
  3766. ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
  3767. bfqq_sequential_and_IO_bound);
  3768. /*
  3769. * The return value of this function is equal to that of
  3770. * idling_boosts_thr, unless a special case holds. In this
  3771. * special case, described below, idling may cause problems to
  3772. * weight-raised queues.
  3773. *
  3774. * When the request pool is saturated (e.g., in the presence
  3775. * of write hogs), if the processes associated with
  3776. * non-weight-raised queues ask for requests at a lower rate,
  3777. * then processes associated with weight-raised queues have a
  3778. * higher probability to get a request from the pool
  3779. * immediately (or at least soon) when they need one. Thus
  3780. * they have a higher probability to actually get a fraction
  3781. * of the device throughput proportional to their high
  3782. * weight. This is especially true with NCQ-capable drives,
  3783. * which enqueue several requests in advance, and further
  3784. * reorder internally-queued requests.
  3785. *
  3786. * For this reason, we force to false the return value if
  3787. * there are weight-raised busy queues. In this case, and if
  3788. * bfqq is not weight-raised, this guarantees that the device
  3789. * is not idled for bfqq (if, instead, bfqq is weight-raised,
  3790. * then idling will be guaranteed by another variable, see
  3791. * below). Combined with the timestamping rules of BFQ (see
  3792. * [1] for details), this behavior causes bfqq, and hence any
  3793. * sync non-weight-raised queue, to get a lower number of
  3794. * requests served, and thus to ask for a lower number of
  3795. * requests from the request pool, before the busy
  3796. * weight-raised queues get served again. This often mitigates
  3797. * starvation problems in the presence of heavy write
  3798. * workloads and NCQ, thereby guaranteeing a higher
  3799. * application and system responsiveness in these hostile
  3800. * scenarios.
  3801. */
  3802. return idling_boosts_thr &&
  3803. bfqd->wr_busy_queues == 0;
  3804. }
  3805. /*
  3806. * For a queue that becomes empty, device idling is allowed only if
  3807. * this function returns true for that queue. As a consequence, since
  3808. * device idling plays a critical role for both throughput boosting
  3809. * and service guarantees, the return value of this function plays a
  3810. * critical role as well.
  3811. *
  3812. * In a nutshell, this function returns true only if idling is
  3813. * beneficial for throughput or, even if detrimental for throughput,
  3814. * idling is however necessary to preserve service guarantees (low
  3815. * latency, desired throughput distribution, ...). In particular, on
  3816. * NCQ-capable devices, this function tries to return false, so as to
  3817. * help keep the drives' internal queues full, whenever this helps the
  3818. * device boost the throughput without causing any service-guarantee
  3819. * issue.
  3820. *
  3821. * Most of the issues taken into account to get the return value of
  3822. * this function are not trivial. We discuss these issues in the two
  3823. * functions providing the main pieces of information needed by this
  3824. * function.
  3825. */
  3826. static bool bfq_better_to_idle(struct bfq_queue *bfqq)
  3827. {
  3828. struct bfq_data *bfqd = bfqq->bfqd;
  3829. bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
  3830. /* No point in idling for bfqq if it won't get requests any longer */
  3831. if (unlikely(!bfqq_process_refs(bfqq)))
  3832. return false;
  3833. if (unlikely(bfqd->strict_guarantees))
  3834. return true;
  3835. /*
  3836. * Idling is performed only if slice_idle > 0. In addition, we
  3837. * do not idle if
  3838. * (a) bfqq is async
  3839. * (b) bfqq is in the idle io prio class: in this case we do
  3840. * not idle because we want to minimize the bandwidth that
  3841. * queues in this class can steal to higher-priority queues
  3842. */
  3843. if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
  3844. bfq_class_idle(bfqq))
  3845. return false;
  3846. idling_boosts_thr_with_no_issue =
  3847. idling_boosts_thr_without_issues(bfqd, bfqq);
  3848. idling_needed_for_service_guar =
  3849. idling_needed_for_service_guarantees(bfqd, bfqq);
  3850. /*
  3851. * We have now the two components we need to compute the
  3852. * return value of the function, which is true only if idling
  3853. * either boosts the throughput (without issues), or is
  3854. * necessary to preserve service guarantees.
  3855. */
  3856. return idling_boosts_thr_with_no_issue ||
  3857. idling_needed_for_service_guar;
  3858. }
  3859. /*
  3860. * If the in-service queue is empty but the function bfq_better_to_idle
  3861. * returns true, then:
  3862. * 1) the queue must remain in service and cannot be expired, and
  3863. * 2) the device must be idled to wait for the possible arrival of a new
  3864. * request for the queue.
  3865. * See the comments on the function bfq_better_to_idle for the reasons
  3866. * why performing device idling is the best choice to boost the throughput
  3867. * and preserve service guarantees when bfq_better_to_idle itself
  3868. * returns true.
  3869. */
  3870. static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
  3871. {
  3872. return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
  3873. }
  3874. /*
  3875. * This function chooses the queue from which to pick the next extra
  3876. * I/O request to inject, if it finds a compatible queue. See the
  3877. * comments on bfq_update_inject_limit() for details on the injection
  3878. * mechanism, and for the definitions of the quantities mentioned
  3879. * below.
  3880. */
  3881. static struct bfq_queue *
  3882. bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
  3883. {
  3884. struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
  3885. unsigned int limit = in_serv_bfqq->inject_limit;
  3886. /*
  3887. * If
  3888. * - bfqq is not weight-raised and therefore does not carry
  3889. * time-critical I/O,
  3890. * or
  3891. * - regardless of whether bfqq is weight-raised, bfqq has
  3892. * however a long think time, during which it can absorb the
  3893. * effect of an appropriate number of extra I/O requests
  3894. * from other queues (see bfq_update_inject_limit for
  3895. * details on the computation of this number);
  3896. * then injection can be performed without restrictions.
  3897. */
  3898. bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
  3899. !bfq_bfqq_has_short_ttime(in_serv_bfqq);
  3900. /*
  3901. * If
  3902. * - the baseline total service time could not be sampled yet,
  3903. * so the inject limit happens to be still 0, and
  3904. * - a lot of time has elapsed since the plugging of I/O
  3905. * dispatching started, so drive speed is being wasted
  3906. * significantly;
  3907. * then temporarily raise inject limit to one request.
  3908. */
  3909. if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
  3910. bfq_bfqq_wait_request(in_serv_bfqq) &&
  3911. time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
  3912. bfqd->bfq_slice_idle)
  3913. )
  3914. limit = 1;
  3915. if (bfqd->rq_in_driver >= limit)
  3916. return NULL;
  3917. /*
  3918. * Linear search of the source queue for injection; but, with
  3919. * a high probability, very few steps are needed to find a
  3920. * candidate queue, i.e., a queue with enough budget left for
  3921. * its next request. In fact:
  3922. * - BFQ dynamically updates the budget of every queue so as
  3923. * to accommodate the expected backlog of the queue;
  3924. * - if a queue gets all its requests dispatched as injected
  3925. * service, then the queue is removed from the active list
  3926. * (and re-added only if it gets new requests, but then it
  3927. * is assigned again enough budget for its new backlog).
  3928. */
  3929. list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
  3930. if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
  3931. (in_serv_always_inject || bfqq->wr_coeff > 1) &&
  3932. bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
  3933. bfq_bfqq_budget_left(bfqq)) {
  3934. /*
  3935. * Allow for only one large in-flight request
  3936. * on non-rotational devices, for the
  3937. * following reason. On non-rotationl drives,
  3938. * large requests take much longer than
  3939. * smaller requests to be served. In addition,
  3940. * the drive prefers to serve large requests
  3941. * w.r.t. to small ones, if it can choose. So,
  3942. * having more than one large requests queued
  3943. * in the drive may easily make the next first
  3944. * request of the in-service queue wait for so
  3945. * long to break bfqq's service guarantees. On
  3946. * the bright side, large requests let the
  3947. * drive reach a very high throughput, even if
  3948. * there is only one in-flight large request
  3949. * at a time.
  3950. */
  3951. if (blk_queue_nonrot(bfqd->queue) &&
  3952. blk_rq_sectors(bfqq->next_rq) >=
  3953. BFQQ_SECT_THR_NONROT)
  3954. limit = min_t(unsigned int, 1, limit);
  3955. else
  3956. limit = in_serv_bfqq->inject_limit;
  3957. if (bfqd->rq_in_driver < limit) {
  3958. bfqd->rqs_injected = true;
  3959. return bfqq;
  3960. }
  3961. }
  3962. return NULL;
  3963. }
  3964. /*
  3965. * Select a queue for service. If we have a current queue in service,
  3966. * check whether to continue servicing it, or retrieve and set a new one.
  3967. */
  3968. static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
  3969. {
  3970. struct bfq_queue *bfqq;
  3971. struct request *next_rq;
  3972. enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
  3973. bfqq = bfqd->in_service_queue;
  3974. if (!bfqq)
  3975. goto new_queue;
  3976. bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
  3977. /*
  3978. * Do not expire bfqq for budget timeout if bfqq may be about
  3979. * to enjoy device idling. The reason why, in this case, we
  3980. * prevent bfqq from expiring is the same as in the comments
  3981. * on the case where bfq_bfqq_must_idle() returns true, in
  3982. * bfq_completed_request().
  3983. */
  3984. if (bfq_may_expire_for_budg_timeout(bfqq) &&
  3985. !bfq_bfqq_must_idle(bfqq))
  3986. goto expire;
  3987. check_queue:
  3988. /*
  3989. * This loop is rarely executed more than once. Even when it
  3990. * happens, it is much more convenient to re-execute this loop
  3991. * than to return NULL and trigger a new dispatch to get a
  3992. * request served.
  3993. */
  3994. next_rq = bfqq->next_rq;
  3995. /*
  3996. * If bfqq has requests queued and it has enough budget left to
  3997. * serve them, keep the queue, otherwise expire it.
  3998. */
  3999. if (next_rq) {
  4000. if (bfq_serv_to_charge(next_rq, bfqq) >
  4001. bfq_bfqq_budget_left(bfqq)) {
  4002. /*
  4003. * Expire the queue for budget exhaustion,
  4004. * which makes sure that the next budget is
  4005. * enough to serve the next request, even if
  4006. * it comes from the fifo expired path.
  4007. */
  4008. reason = BFQQE_BUDGET_EXHAUSTED;
  4009. goto expire;
  4010. } else {
  4011. /*
  4012. * The idle timer may be pending because we may
  4013. * not disable disk idling even when a new request
  4014. * arrives.
  4015. */
  4016. if (bfq_bfqq_wait_request(bfqq)) {
  4017. /*
  4018. * If we get here: 1) at least a new request
  4019. * has arrived but we have not disabled the
  4020. * timer because the request was too small,
  4021. * 2) then the block layer has unplugged
  4022. * the device, causing the dispatch to be
  4023. * invoked.
  4024. *
  4025. * Since the device is unplugged, now the
  4026. * requests are probably large enough to
  4027. * provide a reasonable throughput.
  4028. * So we disable idling.
  4029. */
  4030. bfq_clear_bfqq_wait_request(bfqq);
  4031. hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
  4032. }
  4033. goto keep_queue;
  4034. }
  4035. }
  4036. /*
  4037. * No requests pending. However, if the in-service queue is idling
  4038. * for a new request, or has requests waiting for a completion and
  4039. * may idle after their completion, then keep it anyway.
  4040. *
  4041. * Yet, inject service from other queues if it boosts
  4042. * throughput and is possible.
  4043. */
  4044. if (bfq_bfqq_wait_request(bfqq) ||
  4045. (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
  4046. struct bfq_queue *async_bfqq =
  4047. bfqq->bic && bfqq->bic->bfqq[0] &&
  4048. bfq_bfqq_busy(bfqq->bic->bfqq[0]) &&
  4049. bfqq->bic->bfqq[0]->next_rq ?
  4050. bfqq->bic->bfqq[0] : NULL;
  4051. /*
  4052. * The next three mutually-exclusive ifs decide
  4053. * whether to try injection, and choose the queue to
  4054. * pick an I/O request from.
  4055. *
  4056. * The first if checks whether the process associated
  4057. * with bfqq has also async I/O pending. If so, it
  4058. * injects such I/O unconditionally. Injecting async
  4059. * I/O from the same process can cause no harm to the
  4060. * process. On the contrary, it can only increase
  4061. * bandwidth and reduce latency for the process.
  4062. *
  4063. * The second if checks whether there happens to be a
  4064. * non-empty waker queue for bfqq, i.e., a queue whose
  4065. * I/O needs to be completed for bfqq to receive new
  4066. * I/O. This happens, e.g., if bfqq is associated with
  4067. * a process that does some sync. A sync generates
  4068. * extra blocking I/O, which must be completed before
  4069. * the process associated with bfqq can go on with its
  4070. * I/O. If the I/O of the waker queue is not served,
  4071. * then bfqq remains empty, and no I/O is dispatched,
  4072. * until the idle timeout fires for bfqq. This is
  4073. * likely to result in lower bandwidth and higher
  4074. * latencies for bfqq, and in a severe loss of total
  4075. * throughput. The best action to take is therefore to
  4076. * serve the waker queue as soon as possible. So do it
  4077. * (without relying on the third alternative below for
  4078. * eventually serving waker_bfqq's I/O; see the last
  4079. * paragraph for further details). This systematic
  4080. * injection of I/O from the waker queue does not
  4081. * cause any delay to bfqq's I/O. On the contrary,
  4082. * next bfqq's I/O is brought forward dramatically,
  4083. * for it is not blocked for milliseconds.
  4084. *
  4085. * The third if checks whether bfqq is a queue for
  4086. * which it is better to avoid injection. It is so if
  4087. * bfqq delivers more throughput when served without
  4088. * any further I/O from other queues in the middle, or
  4089. * if the service times of bfqq's I/O requests both
  4090. * count more than overall throughput, and may be
  4091. * easily increased by injection (this happens if bfqq
  4092. * has a short think time). If none of these
  4093. * conditions holds, then a candidate queue for
  4094. * injection is looked for through
  4095. * bfq_choose_bfqq_for_injection(). Note that the
  4096. * latter may return NULL (for example if the inject
  4097. * limit for bfqq is currently 0).
  4098. *
  4099. * NOTE: motivation for the second alternative
  4100. *
  4101. * Thanks to the way the inject limit is updated in
  4102. * bfq_update_has_short_ttime(), it is rather likely
  4103. * that, if I/O is being plugged for bfqq and the
  4104. * waker queue has pending I/O requests that are
  4105. * blocking bfqq's I/O, then the third alternative
  4106. * above lets the waker queue get served before the
  4107. * I/O-plugging timeout fires. So one may deem the
  4108. * second alternative superfluous. It is not, because
  4109. * the third alternative may be way less effective in
  4110. * case of a synchronization. For two main
  4111. * reasons. First, throughput may be low because the
  4112. * inject limit may be too low to guarantee the same
  4113. * amount of injected I/O, from the waker queue or
  4114. * other queues, that the second alternative
  4115. * guarantees (the second alternative unconditionally
  4116. * injects a pending I/O request of the waker queue
  4117. * for each bfq_dispatch_request()). Second, with the
  4118. * third alternative, the duration of the plugging,
  4119. * i.e., the time before bfqq finally receives new I/O,
  4120. * may not be minimized, because the waker queue may
  4121. * happen to be served only after other queues.
  4122. */
  4123. if (async_bfqq &&
  4124. icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
  4125. bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
  4126. bfq_bfqq_budget_left(async_bfqq))
  4127. bfqq = bfqq->bic->bfqq[0];
  4128. else if (bfq_bfqq_has_waker(bfqq) &&
  4129. bfq_bfqq_busy(bfqq->waker_bfqq) &&
  4130. bfqq->waker_bfqq->next_rq &&
  4131. bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
  4132. bfqq->waker_bfqq) <=
  4133. bfq_bfqq_budget_left(bfqq->waker_bfqq)
  4134. )
  4135. bfqq = bfqq->waker_bfqq;
  4136. else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
  4137. (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
  4138. !bfq_bfqq_has_short_ttime(bfqq)))
  4139. bfqq = bfq_choose_bfqq_for_injection(bfqd);
  4140. else
  4141. bfqq = NULL;
  4142. goto keep_queue;
  4143. }
  4144. reason = BFQQE_NO_MORE_REQUESTS;
  4145. expire:
  4146. bfq_bfqq_expire(bfqd, bfqq, false, reason);
  4147. new_queue:
  4148. bfqq = bfq_set_in_service_queue(bfqd);
  4149. if (bfqq) {
  4150. bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
  4151. goto check_queue;
  4152. }
  4153. keep_queue:
  4154. if (bfqq)
  4155. bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
  4156. else
  4157. bfq_log(bfqd, "select_queue: no queue returned");
  4158. return bfqq;
  4159. }
  4160. static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  4161. {
  4162. struct bfq_entity *entity = &bfqq->entity;
  4163. if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
  4164. bfq_log_bfqq(bfqd, bfqq,
  4165. "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
  4166. jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
  4167. jiffies_to_msecs(bfqq->wr_cur_max_time),
  4168. bfqq->wr_coeff,
  4169. bfqq->entity.weight, bfqq->entity.orig_weight);
  4170. if (entity->prio_changed)
  4171. bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
  4172. /*
  4173. * If the queue was activated in a burst, or too much
  4174. * time has elapsed from the beginning of this
  4175. * weight-raising period, then end weight raising.
  4176. */
  4177. if (bfq_bfqq_in_large_burst(bfqq))
  4178. bfq_bfqq_end_wr(bfqq);
  4179. else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
  4180. bfqq->wr_cur_max_time)) {
  4181. if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
  4182. time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
  4183. bfq_wr_duration(bfqd)))
  4184. bfq_bfqq_end_wr(bfqq);
  4185. else {
  4186. switch_back_to_interactive_wr(bfqq, bfqd);
  4187. bfqq->entity.prio_changed = 1;
  4188. }
  4189. }
  4190. if (bfqq->wr_coeff > 1 &&
  4191. bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
  4192. bfqq->service_from_wr > max_service_from_wr) {
  4193. /* see comments on max_service_from_wr */
  4194. bfq_bfqq_end_wr(bfqq);
  4195. }
  4196. }
  4197. /*
  4198. * To improve latency (for this or other queues), immediately
  4199. * update weight both if it must be raised and if it must be
  4200. * lowered. Since, entity may be on some active tree here, and
  4201. * might have a pending change of its ioprio class, invoke
  4202. * next function with the last parameter unset (see the
  4203. * comments on the function).
  4204. */
  4205. if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
  4206. __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
  4207. entity, false);
  4208. }
  4209. /*
  4210. * Dispatch next request from bfqq.
  4211. */
  4212. static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
  4213. struct bfq_queue *bfqq)
  4214. {
  4215. struct request *rq = bfqq->next_rq;
  4216. unsigned long service_to_charge;
  4217. service_to_charge = bfq_serv_to_charge(rq, bfqq);
  4218. bfq_bfqq_served(bfqq, service_to_charge);
  4219. if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
  4220. bfqd->wait_dispatch = false;
  4221. bfqd->waited_rq = rq;
  4222. }
  4223. bfq_dispatch_remove(bfqd->queue, rq);
  4224. if (bfqq != bfqd->in_service_queue)
  4225. goto return_rq;
  4226. /*
  4227. * If weight raising has to terminate for bfqq, then next
  4228. * function causes an immediate update of bfqq's weight,
  4229. * without waiting for next activation. As a consequence, on
  4230. * expiration, bfqq will be timestamped as if has never been
  4231. * weight-raised during this service slot, even if it has
  4232. * received part or even most of the service as a
  4233. * weight-raised queue. This inflates bfqq's timestamps, which
  4234. * is beneficial, as bfqq is then more willing to leave the
  4235. * device immediately to possible other weight-raised queues.
  4236. */
  4237. bfq_update_wr_data(bfqd, bfqq);
  4238. /*
  4239. * Expire bfqq, pretending that its budget expired, if bfqq
  4240. * belongs to CLASS_IDLE and other queues are waiting for
  4241. * service.
  4242. */
  4243. if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
  4244. goto return_rq;
  4245. bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
  4246. return_rq:
  4247. return rq;
  4248. }
  4249. static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
  4250. {
  4251. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4252. /*
  4253. * Avoiding lock: a race on bfqd->busy_queues should cause at
  4254. * most a call to dispatch for nothing
  4255. */
  4256. return !list_empty_careful(&bfqd->dispatch) ||
  4257. bfq_tot_busy_queues(bfqd) > 0;
  4258. }
  4259. static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
  4260. {
  4261. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4262. struct request *rq = NULL;
  4263. struct bfq_queue *bfqq = NULL;
  4264. if (!list_empty(&bfqd->dispatch)) {
  4265. rq = list_first_entry(&bfqd->dispatch, struct request,
  4266. queuelist);
  4267. list_del_init(&rq->queuelist);
  4268. bfqq = RQ_BFQQ(rq);
  4269. if (bfqq) {
  4270. /*
  4271. * Increment counters here, because this
  4272. * dispatch does not follow the standard
  4273. * dispatch flow (where counters are
  4274. * incremented)
  4275. */
  4276. bfqq->dispatched++;
  4277. goto inc_in_driver_start_rq;
  4278. }
  4279. /*
  4280. * We exploit the bfq_finish_requeue_request hook to
  4281. * decrement rq_in_driver, but
  4282. * bfq_finish_requeue_request will not be invoked on
  4283. * this request. So, to avoid unbalance, just start
  4284. * this request, without incrementing rq_in_driver. As
  4285. * a negative consequence, rq_in_driver is deceptively
  4286. * lower than it should be while this request is in
  4287. * service. This may cause bfq_schedule_dispatch to be
  4288. * invoked uselessly.
  4289. *
  4290. * As for implementing an exact solution, the
  4291. * bfq_finish_requeue_request hook, if defined, is
  4292. * probably invoked also on this request. So, by
  4293. * exploiting this hook, we could 1) increment
  4294. * rq_in_driver here, and 2) decrement it in
  4295. * bfq_finish_requeue_request. Such a solution would
  4296. * let the value of the counter be always accurate,
  4297. * but it would entail using an extra interface
  4298. * function. This cost seems higher than the benefit,
  4299. * being the frequency of non-elevator-private
  4300. * requests very low.
  4301. */
  4302. goto start_rq;
  4303. }
  4304. bfq_log(bfqd, "dispatch requests: %d busy queues",
  4305. bfq_tot_busy_queues(bfqd));
  4306. if (bfq_tot_busy_queues(bfqd) == 0)
  4307. goto exit;
  4308. /*
  4309. * Force device to serve one request at a time if
  4310. * strict_guarantees is true. Forcing this service scheme is
  4311. * currently the ONLY way to guarantee that the request
  4312. * service order enforced by the scheduler is respected by a
  4313. * queueing device. Otherwise the device is free even to make
  4314. * some unlucky request wait for as long as the device
  4315. * wishes.
  4316. *
  4317. * Of course, serving one request at a time may cause loss of
  4318. * throughput.
  4319. */
  4320. if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
  4321. goto exit;
  4322. bfqq = bfq_select_queue(bfqd);
  4323. if (!bfqq)
  4324. goto exit;
  4325. rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
  4326. if (rq) {
  4327. inc_in_driver_start_rq:
  4328. bfqd->rq_in_driver++;
  4329. start_rq:
  4330. rq->rq_flags |= RQF_STARTED;
  4331. }
  4332. exit:
  4333. return rq;
  4334. }
  4335. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  4336. static void bfq_update_dispatch_stats(struct request_queue *q,
  4337. struct request *rq,
  4338. struct bfq_queue *in_serv_queue,
  4339. bool idle_timer_disabled)
  4340. {
  4341. struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
  4342. if (!idle_timer_disabled && !bfqq)
  4343. return;
  4344. /*
  4345. * rq and bfqq are guaranteed to exist until this function
  4346. * ends, for the following reasons. First, rq can be
  4347. * dispatched to the device, and then can be completed and
  4348. * freed, only after this function ends. Second, rq cannot be
  4349. * merged (and thus freed because of a merge) any longer,
  4350. * because it has already started. Thus rq cannot be freed
  4351. * before this function ends, and, since rq has a reference to
  4352. * bfqq, the same guarantee holds for bfqq too.
  4353. *
  4354. * In addition, the following queue lock guarantees that
  4355. * bfqq_group(bfqq) exists as well.
  4356. */
  4357. spin_lock_irq(&q->queue_lock);
  4358. if (idle_timer_disabled)
  4359. /*
  4360. * Since the idle timer has been disabled,
  4361. * in_serv_queue contained some request when
  4362. * __bfq_dispatch_request was invoked above, which
  4363. * implies that rq was picked exactly from
  4364. * in_serv_queue. Thus in_serv_queue == bfqq, and is
  4365. * therefore guaranteed to exist because of the above
  4366. * arguments.
  4367. */
  4368. bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
  4369. if (bfqq) {
  4370. struct bfq_group *bfqg = bfqq_group(bfqq);
  4371. bfqg_stats_update_avg_queue_size(bfqg);
  4372. bfqg_stats_set_start_empty_time(bfqg);
  4373. bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
  4374. }
  4375. spin_unlock_irq(&q->queue_lock);
  4376. }
  4377. #else
  4378. static inline void bfq_update_dispatch_stats(struct request_queue *q,
  4379. struct request *rq,
  4380. struct bfq_queue *in_serv_queue,
  4381. bool idle_timer_disabled) {}
  4382. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  4383. static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
  4384. {
  4385. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4386. struct request *rq;
  4387. struct bfq_queue *in_serv_queue;
  4388. bool waiting_rq, idle_timer_disabled = false;
  4389. spin_lock_irq(&bfqd->lock);
  4390. in_serv_queue = bfqd->in_service_queue;
  4391. waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
  4392. rq = __bfq_dispatch_request(hctx);
  4393. if (in_serv_queue == bfqd->in_service_queue) {
  4394. idle_timer_disabled =
  4395. waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
  4396. }
  4397. spin_unlock_irq(&bfqd->lock);
  4398. bfq_update_dispatch_stats(hctx->queue, rq,
  4399. idle_timer_disabled ? in_serv_queue : NULL,
  4400. idle_timer_disabled);
  4401. return rq;
  4402. }
  4403. /*
  4404. * Task holds one reference to the queue, dropped when task exits. Each rq
  4405. * in-flight on this queue also holds a reference, dropped when rq is freed.
  4406. *
  4407. * Scheduler lock must be held here. Recall not to use bfqq after calling
  4408. * this function on it.
  4409. */
  4410. void bfq_put_queue(struct bfq_queue *bfqq)
  4411. {
  4412. struct bfq_queue *item;
  4413. struct hlist_node *n;
  4414. struct bfq_group *bfqg = bfqq_group(bfqq);
  4415. if (bfqq->bfqd)
  4416. bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
  4417. bfqq, bfqq->ref);
  4418. bfqq->ref--;
  4419. if (bfqq->ref)
  4420. return;
  4421. if (!hlist_unhashed(&bfqq->burst_list_node)) {
  4422. hlist_del_init(&bfqq->burst_list_node);
  4423. /*
  4424. * Decrement also burst size after the removal, if the
  4425. * process associated with bfqq is exiting, and thus
  4426. * does not contribute to the burst any longer. This
  4427. * decrement helps filter out false positives of large
  4428. * bursts, when some short-lived process (often due to
  4429. * the execution of commands by some service) happens
  4430. * to start and exit while a complex application is
  4431. * starting, and thus spawning several processes that
  4432. * do I/O (and that *must not* be treated as a large
  4433. * burst, see comments on bfq_handle_burst).
  4434. *
  4435. * In particular, the decrement is performed only if:
  4436. * 1) bfqq is not a merged queue, because, if it is,
  4437. * then this free of bfqq is not triggered by the exit
  4438. * of the process bfqq is associated with, but exactly
  4439. * by the fact that bfqq has just been merged.
  4440. * 2) burst_size is greater than 0, to handle
  4441. * unbalanced decrements. Unbalanced decrements may
  4442. * happen in te following case: bfqq is inserted into
  4443. * the current burst list--without incrementing
  4444. * bust_size--because of a split, but the current
  4445. * burst list is not the burst list bfqq belonged to
  4446. * (see comments on the case of a split in
  4447. * bfq_set_request).
  4448. */
  4449. if (bfqq->bic && bfqq->bfqd->burst_size > 0)
  4450. bfqq->bfqd->burst_size--;
  4451. }
  4452. /*
  4453. * bfqq does not exist any longer, so it cannot be woken by
  4454. * any other queue, and cannot wake any other queue. Then bfqq
  4455. * must be removed from the woken list of its possible waker
  4456. * queue, and all queues in the woken list of bfqq must stop
  4457. * having a waker queue. Strictly speaking, these updates
  4458. * should be performed when bfqq remains with no I/O source
  4459. * attached to it, which happens before bfqq gets freed. In
  4460. * particular, this happens when the last process associated
  4461. * with bfqq exits or gets associated with a different
  4462. * queue. However, both events lead to bfqq being freed soon,
  4463. * and dangling references would come out only after bfqq gets
  4464. * freed. So these updates are done here, as a simple and safe
  4465. * way to handle all cases.
  4466. */
  4467. /* remove bfqq from woken list */
  4468. if (!hlist_unhashed(&bfqq->woken_list_node))
  4469. hlist_del_init(&bfqq->woken_list_node);
  4470. /* reset waker for all queues in woken list */
  4471. hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
  4472. woken_list_node) {
  4473. item->waker_bfqq = NULL;
  4474. bfq_clear_bfqq_has_waker(item);
  4475. hlist_del_init(&item->woken_list_node);
  4476. }
  4477. if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
  4478. bfqq->bfqd->last_completed_rq_bfqq = NULL;
  4479. kmem_cache_free(bfq_pool, bfqq);
  4480. bfqg_and_blkg_put(bfqg);
  4481. }
  4482. static void bfq_put_cooperator(struct bfq_queue *bfqq)
  4483. {
  4484. struct bfq_queue *__bfqq, *next;
  4485. /*
  4486. * If this queue was scheduled to merge with another queue, be
  4487. * sure to drop the reference taken on that queue (and others in
  4488. * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
  4489. */
  4490. __bfqq = bfqq->new_bfqq;
  4491. while (__bfqq) {
  4492. if (__bfqq == bfqq)
  4493. break;
  4494. next = __bfqq->new_bfqq;
  4495. bfq_put_queue(__bfqq);
  4496. __bfqq = next;
  4497. }
  4498. }
  4499. static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  4500. {
  4501. if (bfqq == bfqd->in_service_queue) {
  4502. __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
  4503. bfq_schedule_dispatch(bfqd);
  4504. }
  4505. bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
  4506. bfq_put_cooperator(bfqq);
  4507. bfq_release_process_ref(bfqd, bfqq);
  4508. }
  4509. static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
  4510. {
  4511. struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
  4512. struct bfq_data *bfqd;
  4513. if (bfqq)
  4514. bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
  4515. if (bfqq && bfqd) {
  4516. unsigned long flags;
  4517. spin_lock_irqsave(&bfqd->lock, flags);
  4518. bfqq->bic = NULL;
  4519. bfq_exit_bfqq(bfqd, bfqq);
  4520. bic_set_bfqq(bic, NULL, is_sync);
  4521. spin_unlock_irqrestore(&bfqd->lock, flags);
  4522. }
  4523. }
  4524. static void bfq_exit_icq(struct io_cq *icq)
  4525. {
  4526. struct bfq_io_cq *bic = icq_to_bic(icq);
  4527. bfq_exit_icq_bfqq(bic, true);
  4528. bfq_exit_icq_bfqq(bic, false);
  4529. }
  4530. /*
  4531. * Update the entity prio values; note that the new values will not
  4532. * be used until the next (re)activation.
  4533. */
  4534. static void
  4535. bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
  4536. {
  4537. struct task_struct *tsk = current;
  4538. int ioprio_class;
  4539. struct bfq_data *bfqd = bfqq->bfqd;
  4540. if (!bfqd)
  4541. return;
  4542. ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
  4543. switch (ioprio_class) {
  4544. default:
  4545. pr_err("bdi %s: bfq: bad prio class %d\n",
  4546. bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
  4547. ioprio_class);
  4548. fallthrough;
  4549. case IOPRIO_CLASS_NONE:
  4550. /*
  4551. * No prio set, inherit CPU scheduling settings.
  4552. */
  4553. bfqq->new_ioprio = task_nice_ioprio(tsk);
  4554. bfqq->new_ioprio_class = task_nice_ioclass(tsk);
  4555. break;
  4556. case IOPRIO_CLASS_RT:
  4557. bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
  4558. bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
  4559. break;
  4560. case IOPRIO_CLASS_BE:
  4561. bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
  4562. bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
  4563. break;
  4564. case IOPRIO_CLASS_IDLE:
  4565. bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
  4566. bfqq->new_ioprio = 7;
  4567. break;
  4568. }
  4569. if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
  4570. pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
  4571. bfqq->new_ioprio);
  4572. bfqq->new_ioprio = IOPRIO_BE_NR - 1;
  4573. }
  4574. bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
  4575. bfqq->entity.prio_changed = 1;
  4576. }
  4577. static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  4578. struct bio *bio, bool is_sync,
  4579. struct bfq_io_cq *bic);
  4580. static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
  4581. {
  4582. struct bfq_data *bfqd = bic_to_bfqd(bic);
  4583. struct bfq_queue *bfqq;
  4584. int ioprio = bic->icq.ioc->ioprio;
  4585. /*
  4586. * This condition may trigger on a newly created bic, be sure to
  4587. * drop the lock before returning.
  4588. */
  4589. if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
  4590. return;
  4591. bic->ioprio = ioprio;
  4592. bfqq = bic_to_bfqq(bic, false);
  4593. if (bfqq) {
  4594. bfq_release_process_ref(bfqd, bfqq);
  4595. bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
  4596. bic_set_bfqq(bic, bfqq, false);
  4597. }
  4598. bfqq = bic_to_bfqq(bic, true);
  4599. if (bfqq)
  4600. bfq_set_next_ioprio_data(bfqq, bic);
  4601. }
  4602. static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  4603. struct bfq_io_cq *bic, pid_t pid, int is_sync)
  4604. {
  4605. RB_CLEAR_NODE(&bfqq->entity.rb_node);
  4606. INIT_LIST_HEAD(&bfqq->fifo);
  4607. INIT_HLIST_NODE(&bfqq->burst_list_node);
  4608. INIT_HLIST_NODE(&bfqq->woken_list_node);
  4609. INIT_HLIST_HEAD(&bfqq->woken_list);
  4610. bfqq->ref = 0;
  4611. bfqq->bfqd = bfqd;
  4612. if (bic)
  4613. bfq_set_next_ioprio_data(bfqq, bic);
  4614. if (is_sync) {
  4615. /*
  4616. * No need to mark as has_short_ttime if in
  4617. * idle_class, because no device idling is performed
  4618. * for queues in idle class
  4619. */
  4620. if (!bfq_class_idle(bfqq))
  4621. /* tentatively mark as has_short_ttime */
  4622. bfq_mark_bfqq_has_short_ttime(bfqq);
  4623. bfq_mark_bfqq_sync(bfqq);
  4624. bfq_mark_bfqq_just_created(bfqq);
  4625. } else
  4626. bfq_clear_bfqq_sync(bfqq);
  4627. /* set end request to minus infinity from now */
  4628. bfqq->ttime.last_end_request = ktime_get_ns() + 1;
  4629. bfq_mark_bfqq_IO_bound(bfqq);
  4630. bfqq->pid = pid;
  4631. /* Tentative initial value to trade off between thr and lat */
  4632. bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
  4633. bfqq->budget_timeout = bfq_smallest_from_now();
  4634. bfqq->wr_coeff = 1;
  4635. bfqq->last_wr_start_finish = jiffies;
  4636. bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
  4637. bfqq->split_time = bfq_smallest_from_now();
  4638. /*
  4639. * To not forget the possibly high bandwidth consumed by a
  4640. * process/queue in the recent past,
  4641. * bfq_bfqq_softrt_next_start() returns a value at least equal
  4642. * to the current value of bfqq->soft_rt_next_start (see
  4643. * comments on bfq_bfqq_softrt_next_start). Set
  4644. * soft_rt_next_start to now, to mean that bfqq has consumed
  4645. * no bandwidth so far.
  4646. */
  4647. bfqq->soft_rt_next_start = jiffies;
  4648. /* first request is almost certainly seeky */
  4649. bfqq->seek_history = 1;
  4650. }
  4651. static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
  4652. struct bfq_group *bfqg,
  4653. int ioprio_class, int ioprio)
  4654. {
  4655. switch (ioprio_class) {
  4656. case IOPRIO_CLASS_RT:
  4657. return &bfqg->async_bfqq[0][ioprio];
  4658. case IOPRIO_CLASS_NONE:
  4659. ioprio = IOPRIO_NORM;
  4660. fallthrough;
  4661. case IOPRIO_CLASS_BE:
  4662. return &bfqg->async_bfqq[1][ioprio];
  4663. case IOPRIO_CLASS_IDLE:
  4664. return &bfqg->async_idle_bfqq;
  4665. default:
  4666. return NULL;
  4667. }
  4668. }
  4669. static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  4670. struct bio *bio, bool is_sync,
  4671. struct bfq_io_cq *bic)
  4672. {
  4673. const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
  4674. const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
  4675. struct bfq_queue **async_bfqq = NULL;
  4676. struct bfq_queue *bfqq;
  4677. struct bfq_group *bfqg;
  4678. rcu_read_lock();
  4679. bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
  4680. if (!bfqg) {
  4681. bfqq = &bfqd->oom_bfqq;
  4682. goto out;
  4683. }
  4684. if (!is_sync) {
  4685. async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
  4686. ioprio);
  4687. bfqq = *async_bfqq;
  4688. if (bfqq)
  4689. goto out;
  4690. }
  4691. bfqq = kmem_cache_alloc_node(bfq_pool,
  4692. GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
  4693. bfqd->queue->node);
  4694. if (bfqq) {
  4695. bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
  4696. is_sync);
  4697. bfq_init_entity(&bfqq->entity, bfqg);
  4698. bfq_log_bfqq(bfqd, bfqq, "allocated");
  4699. } else {
  4700. bfqq = &bfqd->oom_bfqq;
  4701. bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
  4702. goto out;
  4703. }
  4704. /*
  4705. * Pin the queue now that it's allocated, scheduler exit will
  4706. * prune it.
  4707. */
  4708. if (async_bfqq) {
  4709. bfqq->ref++; /*
  4710. * Extra group reference, w.r.t. sync
  4711. * queue. This extra reference is removed
  4712. * only if bfqq->bfqg disappears, to
  4713. * guarantee that this queue is not freed
  4714. * until its group goes away.
  4715. */
  4716. bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
  4717. bfqq, bfqq->ref);
  4718. *async_bfqq = bfqq;
  4719. }
  4720. out:
  4721. bfqq->ref++; /* get a process reference to this queue */
  4722. bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
  4723. rcu_read_unlock();
  4724. return bfqq;
  4725. }
  4726. static void bfq_update_io_thinktime(struct bfq_data *bfqd,
  4727. struct bfq_queue *bfqq)
  4728. {
  4729. struct bfq_ttime *ttime = &bfqq->ttime;
  4730. u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
  4731. elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
  4732. ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
  4733. ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
  4734. ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
  4735. ttime->ttime_samples);
  4736. }
  4737. static void
  4738. bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  4739. struct request *rq)
  4740. {
  4741. bfqq->seek_history <<= 1;
  4742. bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
  4743. if (bfqq->wr_coeff > 1 &&
  4744. bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  4745. BFQQ_TOTALLY_SEEKY(bfqq))
  4746. bfq_bfqq_end_wr(bfqq);
  4747. }
  4748. static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
  4749. struct bfq_queue *bfqq,
  4750. struct bfq_io_cq *bic)
  4751. {
  4752. bool has_short_ttime = true, state_changed;
  4753. /*
  4754. * No need to update has_short_ttime if bfqq is async or in
  4755. * idle io prio class, or if bfq_slice_idle is zero, because
  4756. * no device idling is performed for bfqq in this case.
  4757. */
  4758. if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
  4759. bfqd->bfq_slice_idle == 0)
  4760. return;
  4761. /* Idle window just restored, statistics are meaningless. */
  4762. if (time_is_after_eq_jiffies(bfqq->split_time +
  4763. bfqd->bfq_wr_min_idle_time))
  4764. return;
  4765. /* Think time is infinite if no process is linked to
  4766. * bfqq. Otherwise check average think time to
  4767. * decide whether to mark as has_short_ttime
  4768. */
  4769. if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
  4770. (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
  4771. bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
  4772. has_short_ttime = false;
  4773. state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
  4774. if (has_short_ttime)
  4775. bfq_mark_bfqq_has_short_ttime(bfqq);
  4776. else
  4777. bfq_clear_bfqq_has_short_ttime(bfqq);
  4778. /*
  4779. * Until the base value for the total service time gets
  4780. * finally computed for bfqq, the inject limit does depend on
  4781. * the think-time state (short|long). In particular, the limit
  4782. * is 0 or 1 if the think time is deemed, respectively, as
  4783. * short or long (details in the comments in
  4784. * bfq_update_inject_limit()). Accordingly, the next
  4785. * instructions reset the inject limit if the think-time state
  4786. * has changed and the above base value is still to be
  4787. * computed.
  4788. *
  4789. * However, the reset is performed only if more than 100 ms
  4790. * have elapsed since the last update of the inject limit, or
  4791. * (inclusive) if the change is from short to long think
  4792. * time. The reason for this waiting is as follows.
  4793. *
  4794. * bfqq may have a long think time because of a
  4795. * synchronization with some other queue, i.e., because the
  4796. * I/O of some other queue may need to be completed for bfqq
  4797. * to receive new I/O. Details in the comments on the choice
  4798. * of the queue for injection in bfq_select_queue().
  4799. *
  4800. * As stressed in those comments, if such a synchronization is
  4801. * actually in place, then, without injection on bfqq, the
  4802. * blocking I/O cannot happen to served while bfqq is in
  4803. * service. As a consequence, if bfqq is granted
  4804. * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
  4805. * is dispatched, until the idle timeout fires. This is likely
  4806. * to result in lower bandwidth and higher latencies for bfqq,
  4807. * and in a severe loss of total throughput.
  4808. *
  4809. * On the opposite end, a non-zero inject limit may allow the
  4810. * I/O that blocks bfqq to be executed soon, and therefore
  4811. * bfqq to receive new I/O soon.
  4812. *
  4813. * But, if the blocking gets actually eliminated, then the
  4814. * next think-time sample for bfqq may be very low. This in
  4815. * turn may cause bfqq's think time to be deemed
  4816. * short. Without the 100 ms barrier, this new state change
  4817. * would cause the body of the next if to be executed
  4818. * immediately. But this would set to 0 the inject
  4819. * limit. Without injection, the blocking I/O would cause the
  4820. * think time of bfqq to become long again, and therefore the
  4821. * inject limit to be raised again, and so on. The only effect
  4822. * of such a steady oscillation between the two think-time
  4823. * states would be to prevent effective injection on bfqq.
  4824. *
  4825. * In contrast, if the inject limit is not reset during such a
  4826. * long time interval as 100 ms, then the number of short
  4827. * think time samples can grow significantly before the reset
  4828. * is performed. As a consequence, the think time state can
  4829. * become stable before the reset. Therefore there will be no
  4830. * state change when the 100 ms elapse, and no reset of the
  4831. * inject limit. The inject limit remains steadily equal to 1
  4832. * both during and after the 100 ms. So injection can be
  4833. * performed at all times, and throughput gets boosted.
  4834. *
  4835. * An inject limit equal to 1 is however in conflict, in
  4836. * general, with the fact that the think time of bfqq is
  4837. * short, because injection may be likely to delay bfqq's I/O
  4838. * (as explained in the comments in
  4839. * bfq_update_inject_limit()). But this does not happen in
  4840. * this special case, because bfqq's low think time is due to
  4841. * an effective handling of a synchronization, through
  4842. * injection. In this special case, bfqq's I/O does not get
  4843. * delayed by injection; on the contrary, bfqq's I/O is
  4844. * brought forward, because it is not blocked for
  4845. * milliseconds.
  4846. *
  4847. * In addition, serving the blocking I/O much sooner, and much
  4848. * more frequently than once per I/O-plugging timeout, makes
  4849. * it much quicker to detect a waker queue (the concept of
  4850. * waker queue is defined in the comments in
  4851. * bfq_add_request()). This makes it possible to start sooner
  4852. * to boost throughput more effectively, by injecting the I/O
  4853. * of the waker queue unconditionally on every
  4854. * bfq_dispatch_request().
  4855. *
  4856. * One last, important benefit of not resetting the inject
  4857. * limit before 100 ms is that, during this time interval, the
  4858. * base value for the total service time is likely to get
  4859. * finally computed for bfqq, freeing the inject limit from
  4860. * its relation with the think time.
  4861. */
  4862. if (state_changed && bfqq->last_serv_time_ns == 0 &&
  4863. (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  4864. msecs_to_jiffies(100)) ||
  4865. !has_short_ttime))
  4866. bfq_reset_inject_limit(bfqd, bfqq);
  4867. }
  4868. /*
  4869. * Called when a new fs request (rq) is added to bfqq. Check if there's
  4870. * something we should do about it.
  4871. */
  4872. static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  4873. struct request *rq)
  4874. {
  4875. if (rq->cmd_flags & REQ_META)
  4876. bfqq->meta_pending++;
  4877. bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  4878. if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
  4879. bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
  4880. blk_rq_sectors(rq) < 32;
  4881. bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
  4882. /*
  4883. * There is just this request queued: if
  4884. * - the request is small, and
  4885. * - we are idling to boost throughput, and
  4886. * - the queue is not to be expired,
  4887. * then just exit.
  4888. *
  4889. * In this way, if the device is being idled to wait
  4890. * for a new request from the in-service queue, we
  4891. * avoid unplugging the device and committing the
  4892. * device to serve just a small request. In contrast
  4893. * we wait for the block layer to decide when to
  4894. * unplug the device: hopefully, new requests will be
  4895. * merged to this one quickly, then the device will be
  4896. * unplugged and larger requests will be dispatched.
  4897. */
  4898. if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
  4899. !budget_timeout)
  4900. return;
  4901. /*
  4902. * A large enough request arrived, or idling is being
  4903. * performed to preserve service guarantees, or
  4904. * finally the queue is to be expired: in all these
  4905. * cases disk idling is to be stopped, so clear
  4906. * wait_request flag and reset timer.
  4907. */
  4908. bfq_clear_bfqq_wait_request(bfqq);
  4909. hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
  4910. /*
  4911. * The queue is not empty, because a new request just
  4912. * arrived. Hence we can safely expire the queue, in
  4913. * case of budget timeout, without risking that the
  4914. * timestamps of the queue are not updated correctly.
  4915. * See [1] for more details.
  4916. */
  4917. if (budget_timeout)
  4918. bfq_bfqq_expire(bfqd, bfqq, false,
  4919. BFQQE_BUDGET_TIMEOUT);
  4920. }
  4921. }
  4922. /* returns true if it causes the idle timer to be disabled */
  4923. static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
  4924. {
  4925. struct bfq_queue *bfqq = RQ_BFQQ(rq),
  4926. *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
  4927. bool waiting, idle_timer_disabled = false;
  4928. if (new_bfqq) {
  4929. /*
  4930. * Release the request's reference to the old bfqq
  4931. * and make sure one is taken to the shared queue.
  4932. */
  4933. new_bfqq->allocated++;
  4934. bfqq->allocated--;
  4935. new_bfqq->ref++;
  4936. /*
  4937. * If the bic associated with the process
  4938. * issuing this request still points to bfqq
  4939. * (and thus has not been already redirected
  4940. * to new_bfqq or even some other bfq_queue),
  4941. * then complete the merge and redirect it to
  4942. * new_bfqq.
  4943. */
  4944. if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
  4945. bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
  4946. bfqq, new_bfqq);
  4947. bfq_clear_bfqq_just_created(bfqq);
  4948. /*
  4949. * rq is about to be enqueued into new_bfqq,
  4950. * release rq reference on bfqq
  4951. */
  4952. bfq_put_queue(bfqq);
  4953. rq->elv.priv[1] = new_bfqq;
  4954. bfqq = new_bfqq;
  4955. }
  4956. bfq_update_io_thinktime(bfqd, bfqq);
  4957. bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
  4958. bfq_update_io_seektime(bfqd, bfqq, rq);
  4959. waiting = bfqq && bfq_bfqq_wait_request(bfqq);
  4960. bfq_add_request(rq);
  4961. idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
  4962. rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
  4963. list_add_tail(&rq->queuelist, &bfqq->fifo);
  4964. bfq_rq_enqueued(bfqd, bfqq, rq);
  4965. return idle_timer_disabled;
  4966. }
  4967. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  4968. static void bfq_update_insert_stats(struct request_queue *q,
  4969. struct bfq_queue *bfqq,
  4970. bool idle_timer_disabled,
  4971. unsigned int cmd_flags)
  4972. {
  4973. if (!bfqq)
  4974. return;
  4975. /*
  4976. * bfqq still exists, because it can disappear only after
  4977. * either it is merged with another queue, or the process it
  4978. * is associated with exits. But both actions must be taken by
  4979. * the same process currently executing this flow of
  4980. * instructions.
  4981. *
  4982. * In addition, the following queue lock guarantees that
  4983. * bfqq_group(bfqq) exists as well.
  4984. */
  4985. spin_lock_irq(&q->queue_lock);
  4986. bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
  4987. if (idle_timer_disabled)
  4988. bfqg_stats_update_idle_time(bfqq_group(bfqq));
  4989. spin_unlock_irq(&q->queue_lock);
  4990. }
  4991. #else
  4992. static inline void bfq_update_insert_stats(struct request_queue *q,
  4993. struct bfq_queue *bfqq,
  4994. bool idle_timer_disabled,
  4995. unsigned int cmd_flags) {}
  4996. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  4997. static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  4998. bool at_head)
  4999. {
  5000. struct request_queue *q = hctx->queue;
  5001. struct bfq_data *bfqd = q->elevator->elevator_data;
  5002. struct bfq_queue *bfqq;
  5003. bool idle_timer_disabled = false;
  5004. unsigned int cmd_flags;
  5005. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  5006. if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
  5007. bfqg_stats_update_legacy_io(q, rq);
  5008. #endif
  5009. spin_lock_irq(&bfqd->lock);
  5010. if (blk_mq_sched_try_insert_merge(q, rq)) {
  5011. spin_unlock_irq(&bfqd->lock);
  5012. return;
  5013. }
  5014. spin_unlock_irq(&bfqd->lock);
  5015. blk_mq_sched_request_inserted(rq);
  5016. spin_lock_irq(&bfqd->lock);
  5017. bfqq = bfq_init_rq(rq);
  5018. if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
  5019. if (at_head)
  5020. list_add(&rq->queuelist, &bfqd->dispatch);
  5021. else
  5022. list_add_tail(&rq->queuelist, &bfqd->dispatch);
  5023. } else {
  5024. idle_timer_disabled = __bfq_insert_request(bfqd, rq);
  5025. /*
  5026. * Update bfqq, because, if a queue merge has occurred
  5027. * in __bfq_insert_request, then rq has been
  5028. * redirected into a new queue.
  5029. */
  5030. bfqq = RQ_BFQQ(rq);
  5031. if (rq_mergeable(rq)) {
  5032. elv_rqhash_add(q, rq);
  5033. if (!q->last_merge)
  5034. q->last_merge = rq;
  5035. }
  5036. }
  5037. /*
  5038. * Cache cmd_flags before releasing scheduler lock, because rq
  5039. * may disappear afterwards (for example, because of a request
  5040. * merge).
  5041. */
  5042. cmd_flags = rq->cmd_flags;
  5043. spin_unlock_irq(&bfqd->lock);
  5044. bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
  5045. cmd_flags);
  5046. }
  5047. static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
  5048. struct list_head *list, bool at_head)
  5049. {
  5050. while (!list_empty(list)) {
  5051. struct request *rq;
  5052. rq = list_first_entry(list, struct request, queuelist);
  5053. list_del_init(&rq->queuelist);
  5054. bfq_insert_request(hctx, rq, at_head);
  5055. }
  5056. }
  5057. static void bfq_update_hw_tag(struct bfq_data *bfqd)
  5058. {
  5059. struct bfq_queue *bfqq = bfqd->in_service_queue;
  5060. bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
  5061. bfqd->rq_in_driver);
  5062. if (bfqd->hw_tag == 1)
  5063. return;
  5064. /*
  5065. * This sample is valid if the number of outstanding requests
  5066. * is large enough to allow a queueing behavior. Note that the
  5067. * sum is not exact, as it's not taking into account deactivated
  5068. * requests.
  5069. */
  5070. if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
  5071. return;
  5072. /*
  5073. * If active queue hasn't enough requests and can idle, bfq might not
  5074. * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  5075. * case
  5076. */
  5077. if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
  5078. bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
  5079. BFQ_HW_QUEUE_THRESHOLD &&
  5080. bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
  5081. return;
  5082. if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
  5083. return;
  5084. bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
  5085. bfqd->max_rq_in_driver = 0;
  5086. bfqd->hw_tag_samples = 0;
  5087. bfqd->nonrot_with_queueing =
  5088. blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
  5089. }
  5090. static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
  5091. {
  5092. u64 now_ns;
  5093. u32 delta_us;
  5094. bfq_update_hw_tag(bfqd);
  5095. bfqd->rq_in_driver--;
  5096. bfqq->dispatched--;
  5097. if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
  5098. /*
  5099. * Set budget_timeout (which we overload to store the
  5100. * time at which the queue remains with no backlog and
  5101. * no outstanding request; used by the weight-raising
  5102. * mechanism).
  5103. */
  5104. bfqq->budget_timeout = jiffies;
  5105. bfq_weights_tree_remove(bfqd, bfqq);
  5106. }
  5107. now_ns = ktime_get_ns();
  5108. bfqq->ttime.last_end_request = now_ns;
  5109. /*
  5110. * Using us instead of ns, to get a reasonable precision in
  5111. * computing rate in next check.
  5112. */
  5113. delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
  5114. /*
  5115. * If the request took rather long to complete, and, according
  5116. * to the maximum request size recorded, this completion latency
  5117. * implies that the request was certainly served at a very low
  5118. * rate (less than 1M sectors/sec), then the whole observation
  5119. * interval that lasts up to this time instant cannot be a
  5120. * valid time interval for computing a new peak rate. Invoke
  5121. * bfq_update_rate_reset to have the following three steps
  5122. * taken:
  5123. * - close the observation interval at the last (previous)
  5124. * request dispatch or completion
  5125. * - compute rate, if possible, for that observation interval
  5126. * - reset to zero samples, which will trigger a proper
  5127. * re-initialization of the observation interval on next
  5128. * dispatch
  5129. */
  5130. if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
  5131. (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
  5132. 1UL<<(BFQ_RATE_SHIFT - 10))
  5133. bfq_update_rate_reset(bfqd, NULL);
  5134. bfqd->last_completion = now_ns;
  5135. bfqd->last_completed_rq_bfqq = bfqq;
  5136. /*
  5137. * If we are waiting to discover whether the request pattern
  5138. * of the task associated with the queue is actually
  5139. * isochronous, and both requisites for this condition to hold
  5140. * are now satisfied, then compute soft_rt_next_start (see the
  5141. * comments on the function bfq_bfqq_softrt_next_start()). We
  5142. * do not compute soft_rt_next_start if bfqq is in interactive
  5143. * weight raising (see the comments in bfq_bfqq_expire() for
  5144. * an explanation). We schedule this delayed update when bfqq
  5145. * expires, if it still has in-flight requests.
  5146. */
  5147. if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
  5148. RB_EMPTY_ROOT(&bfqq->sort_list) &&
  5149. bfqq->wr_coeff != bfqd->bfq_wr_coeff)
  5150. bfqq->soft_rt_next_start =
  5151. bfq_bfqq_softrt_next_start(bfqd, bfqq);
  5152. /*
  5153. * If this is the in-service queue, check if it needs to be expired,
  5154. * or if we want to idle in case it has no pending requests.
  5155. */
  5156. if (bfqd->in_service_queue == bfqq) {
  5157. if (bfq_bfqq_must_idle(bfqq)) {
  5158. if (bfqq->dispatched == 0)
  5159. bfq_arm_slice_timer(bfqd);
  5160. /*
  5161. * If we get here, we do not expire bfqq, even
  5162. * if bfqq was in budget timeout or had no
  5163. * more requests (as controlled in the next
  5164. * conditional instructions). The reason for
  5165. * not expiring bfqq is as follows.
  5166. *
  5167. * Here bfqq->dispatched > 0 holds, but
  5168. * bfq_bfqq_must_idle() returned true. This
  5169. * implies that, even if no request arrives
  5170. * for bfqq before bfqq->dispatched reaches 0,
  5171. * bfqq will, however, not be expired on the
  5172. * completion event that causes bfqq->dispatch
  5173. * to reach zero. In contrast, on this event,
  5174. * bfqq will start enjoying device idling
  5175. * (I/O-dispatch plugging).
  5176. *
  5177. * But, if we expired bfqq here, bfqq would
  5178. * not have the chance to enjoy device idling
  5179. * when bfqq->dispatched finally reaches
  5180. * zero. This would expose bfqq to violation
  5181. * of its reserved service guarantees.
  5182. */
  5183. return;
  5184. } else if (bfq_may_expire_for_budg_timeout(bfqq))
  5185. bfq_bfqq_expire(bfqd, bfqq, false,
  5186. BFQQE_BUDGET_TIMEOUT);
  5187. else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
  5188. (bfqq->dispatched == 0 ||
  5189. !bfq_better_to_idle(bfqq)))
  5190. bfq_bfqq_expire(bfqd, bfqq, false,
  5191. BFQQE_NO_MORE_REQUESTS);
  5192. }
  5193. if (!bfqd->rq_in_driver)
  5194. bfq_schedule_dispatch(bfqd);
  5195. }
  5196. static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
  5197. {
  5198. bfqq->allocated--;
  5199. bfq_put_queue(bfqq);
  5200. }
  5201. /*
  5202. * The processes associated with bfqq may happen to generate their
  5203. * cumulative I/O at a lower rate than the rate at which the device
  5204. * could serve the same I/O. This is rather probable, e.g., if only
  5205. * one process is associated with bfqq and the device is an SSD. It
  5206. * results in bfqq becoming often empty while in service. In this
  5207. * respect, if BFQ is allowed to switch to another queue when bfqq
  5208. * remains empty, then the device goes on being fed with I/O requests,
  5209. * and the throughput is not affected. In contrast, if BFQ is not
  5210. * allowed to switch to another queue---because bfqq is sync and
  5211. * I/O-dispatch needs to be plugged while bfqq is temporarily
  5212. * empty---then, during the service of bfqq, there will be frequent
  5213. * "service holes", i.e., time intervals during which bfqq gets empty
  5214. * and the device can only consume the I/O already queued in its
  5215. * hardware queues. During service holes, the device may even get to
  5216. * remaining idle. In the end, during the service of bfqq, the device
  5217. * is driven at a lower speed than the one it can reach with the kind
  5218. * of I/O flowing through bfqq.
  5219. *
  5220. * To counter this loss of throughput, BFQ implements a "request
  5221. * injection mechanism", which tries to fill the above service holes
  5222. * with I/O requests taken from other queues. The hard part in this
  5223. * mechanism is finding the right amount of I/O to inject, so as to
  5224. * both boost throughput and not break bfqq's bandwidth and latency
  5225. * guarantees. In this respect, the mechanism maintains a per-queue
  5226. * inject limit, computed as below. While bfqq is empty, the injection
  5227. * mechanism dispatches extra I/O requests only until the total number
  5228. * of I/O requests in flight---i.e., already dispatched but not yet
  5229. * completed---remains lower than this limit.
  5230. *
  5231. * A first definition comes in handy to introduce the algorithm by
  5232. * which the inject limit is computed. We define as first request for
  5233. * bfqq, an I/O request for bfqq that arrives while bfqq is in
  5234. * service, and causes bfqq to switch from empty to non-empty. The
  5235. * algorithm updates the limit as a function of the effect of
  5236. * injection on the service times of only the first requests of
  5237. * bfqq. The reason for this restriction is that these are the
  5238. * requests whose service time is affected most, because they are the
  5239. * first to arrive after injection possibly occurred.
  5240. *
  5241. * To evaluate the effect of injection, the algorithm measures the
  5242. * "total service time" of first requests. We define as total service
  5243. * time of an I/O request, the time that elapses since when the
  5244. * request is enqueued into bfqq, to when it is completed. This
  5245. * quantity allows the whole effect of injection to be measured. It is
  5246. * easy to see why. Suppose that some requests of other queues are
  5247. * actually injected while bfqq is empty, and that a new request R
  5248. * then arrives for bfqq. If the device does start to serve all or
  5249. * part of the injected requests during the service hole, then,
  5250. * because of this extra service, it may delay the next invocation of
  5251. * the dispatch hook of BFQ. Then, even after R gets eventually
  5252. * dispatched, the device may delay the actual service of R if it is
  5253. * still busy serving the extra requests, or if it decides to serve,
  5254. * before R, some extra request still present in its queues. As a
  5255. * conclusion, the cumulative extra delay caused by injection can be
  5256. * easily evaluated by just comparing the total service time of first
  5257. * requests with and without injection.
  5258. *
  5259. * The limit-update algorithm works as follows. On the arrival of a
  5260. * first request of bfqq, the algorithm measures the total time of the
  5261. * request only if one of the three cases below holds, and, for each
  5262. * case, it updates the limit as described below:
  5263. *
  5264. * (1) If there is no in-flight request. This gives a baseline for the
  5265. * total service time of the requests of bfqq. If the baseline has
  5266. * not been computed yet, then, after computing it, the limit is
  5267. * set to 1, to start boosting throughput, and to prepare the
  5268. * ground for the next case. If the baseline has already been
  5269. * computed, then it is updated, in case it results to be lower
  5270. * than the previous value.
  5271. *
  5272. * (2) If the limit is higher than 0 and there are in-flight
  5273. * requests. By comparing the total service time in this case with
  5274. * the above baseline, it is possible to know at which extent the
  5275. * current value of the limit is inflating the total service
  5276. * time. If the inflation is below a certain threshold, then bfqq
  5277. * is assumed to be suffering from no perceivable loss of its
  5278. * service guarantees, and the limit is even tentatively
  5279. * increased. If the inflation is above the threshold, then the
  5280. * limit is decreased. Due to the lack of any hysteresis, this
  5281. * logic makes the limit oscillate even in steady workload
  5282. * conditions. Yet we opted for it, because it is fast in reaching
  5283. * the best value for the limit, as a function of the current I/O
  5284. * workload. To reduce oscillations, this step is disabled for a
  5285. * short time interval after the limit happens to be decreased.
  5286. *
  5287. * (3) Periodically, after resetting the limit, to make sure that the
  5288. * limit eventually drops in case the workload changes. This is
  5289. * needed because, after the limit has gone safely up for a
  5290. * certain workload, it is impossible to guess whether the
  5291. * baseline total service time may have changed, without measuring
  5292. * it again without injection. A more effective version of this
  5293. * step might be to just sample the baseline, by interrupting
  5294. * injection only once, and then to reset/lower the limit only if
  5295. * the total service time with the current limit does happen to be
  5296. * too large.
  5297. *
  5298. * More details on each step are provided in the comments on the
  5299. * pieces of code that implement these steps: the branch handling the
  5300. * transition from empty to non empty in bfq_add_request(), the branch
  5301. * handling injection in bfq_select_queue(), and the function
  5302. * bfq_choose_bfqq_for_injection(). These comments also explain some
  5303. * exceptions, made by the injection mechanism in some special cases.
  5304. */
  5305. static void bfq_update_inject_limit(struct bfq_data *bfqd,
  5306. struct bfq_queue *bfqq)
  5307. {
  5308. u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
  5309. unsigned int old_limit = bfqq->inject_limit;
  5310. if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
  5311. u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
  5312. if (tot_time_ns >= threshold && old_limit > 0) {
  5313. bfqq->inject_limit--;
  5314. bfqq->decrease_time_jif = jiffies;
  5315. } else if (tot_time_ns < threshold &&
  5316. old_limit <= bfqd->max_rq_in_driver)
  5317. bfqq->inject_limit++;
  5318. }
  5319. /*
  5320. * Either we still have to compute the base value for the
  5321. * total service time, and there seem to be the right
  5322. * conditions to do it, or we can lower the last base value
  5323. * computed.
  5324. *
  5325. * NOTE: (bfqd->rq_in_driver == 1) means that there is no I/O
  5326. * request in flight, because this function is in the code
  5327. * path that handles the completion of a request of bfqq, and,
  5328. * in particular, this function is executed before
  5329. * bfqd->rq_in_driver is decremented in such a code path.
  5330. */
  5331. if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
  5332. tot_time_ns < bfqq->last_serv_time_ns) {
  5333. if (bfqq->last_serv_time_ns == 0) {
  5334. /*
  5335. * Now we certainly have a base value: make sure we
  5336. * start trying injection.
  5337. */
  5338. bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
  5339. }
  5340. bfqq->last_serv_time_ns = tot_time_ns;
  5341. } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
  5342. /*
  5343. * No I/O injected and no request still in service in
  5344. * the drive: these are the exact conditions for
  5345. * computing the base value of the total service time
  5346. * for bfqq. So let's update this value, because it is
  5347. * rather variable. For example, it varies if the size
  5348. * or the spatial locality of the I/O requests in bfqq
  5349. * change.
  5350. */
  5351. bfqq->last_serv_time_ns = tot_time_ns;
  5352. /* update complete, not waiting for any request completion any longer */
  5353. bfqd->waited_rq = NULL;
  5354. bfqd->rqs_injected = false;
  5355. }
  5356. /*
  5357. * Handle either a requeue or a finish for rq. The things to do are
  5358. * the same in both cases: all references to rq are to be dropped. In
  5359. * particular, rq is considered completed from the point of view of
  5360. * the scheduler.
  5361. */
  5362. static void bfq_finish_requeue_request(struct request *rq)
  5363. {
  5364. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  5365. struct bfq_data *bfqd;
  5366. /*
  5367. * rq either is not associated with any icq, or is an already
  5368. * requeued request that has not (yet) been re-inserted into
  5369. * a bfq_queue.
  5370. */
  5371. if (!rq->elv.icq || !bfqq)
  5372. return;
  5373. bfqd = bfqq->bfqd;
  5374. if (rq->rq_flags & RQF_STARTED)
  5375. bfqg_stats_update_completion(bfqq_group(bfqq),
  5376. rq->start_time_ns,
  5377. rq->io_start_time_ns,
  5378. rq->cmd_flags);
  5379. if (likely(rq->rq_flags & RQF_STARTED)) {
  5380. unsigned long flags;
  5381. spin_lock_irqsave(&bfqd->lock, flags);
  5382. if (rq == bfqd->waited_rq)
  5383. bfq_update_inject_limit(bfqd, bfqq);
  5384. bfq_completed_request(bfqq, bfqd);
  5385. bfq_finish_requeue_request_body(bfqq);
  5386. spin_unlock_irqrestore(&bfqd->lock, flags);
  5387. } else {
  5388. /*
  5389. * Request rq may be still/already in the scheduler,
  5390. * in which case we need to remove it (this should
  5391. * never happen in case of requeue). And we cannot
  5392. * defer such a check and removal, to avoid
  5393. * inconsistencies in the time interval from the end
  5394. * of this function to the start of the deferred work.
  5395. * This situation seems to occur only in process
  5396. * context, as a consequence of a merge. In the
  5397. * current version of the code, this implies that the
  5398. * lock is held.
  5399. */
  5400. if (!RB_EMPTY_NODE(&rq->rb_node)) {
  5401. bfq_remove_request(rq->q, rq);
  5402. bfqg_stats_update_io_remove(bfqq_group(bfqq),
  5403. rq->cmd_flags);
  5404. }
  5405. bfq_finish_requeue_request_body(bfqq);
  5406. }
  5407. /*
  5408. * Reset private fields. In case of a requeue, this allows
  5409. * this function to correctly do nothing if it is spuriously
  5410. * invoked again on this same request (see the check at the
  5411. * beginning of the function). Probably, a better general
  5412. * design would be to prevent blk-mq from invoking the requeue
  5413. * or finish hooks of an elevator, for a request that is not
  5414. * referred by that elevator.
  5415. *
  5416. * Resetting the following fields would break the
  5417. * request-insertion logic if rq is re-inserted into a bfq
  5418. * internal queue, without a re-preparation. Here we assume
  5419. * that re-insertions of requeued requests, without
  5420. * re-preparation, can happen only for pass_through or at_head
  5421. * requests (which are not re-inserted into bfq internal
  5422. * queues).
  5423. */
  5424. rq->elv.priv[0] = NULL;
  5425. rq->elv.priv[1] = NULL;
  5426. }
  5427. /*
  5428. * Removes the association between the current task and bfqq, assuming
  5429. * that bic points to the bfq iocontext of the task.
  5430. * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
  5431. * was the last process referring to that bfqq.
  5432. */
  5433. static struct bfq_queue *
  5434. bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
  5435. {
  5436. bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
  5437. if (bfqq_process_refs(bfqq) == 1) {
  5438. bfqq->pid = current->pid;
  5439. bfq_clear_bfqq_coop(bfqq);
  5440. bfq_clear_bfqq_split_coop(bfqq);
  5441. return bfqq;
  5442. }
  5443. bic_set_bfqq(bic, NULL, 1);
  5444. bfq_put_cooperator(bfqq);
  5445. bfq_release_process_ref(bfqq->bfqd, bfqq);
  5446. return NULL;
  5447. }
  5448. static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
  5449. struct bfq_io_cq *bic,
  5450. struct bio *bio,
  5451. bool split, bool is_sync,
  5452. bool *new_queue)
  5453. {
  5454. struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
  5455. if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
  5456. return bfqq;
  5457. if (new_queue)
  5458. *new_queue = true;
  5459. if (bfqq)
  5460. bfq_put_queue(bfqq);
  5461. bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
  5462. bic_set_bfqq(bic, bfqq, is_sync);
  5463. if (split && is_sync) {
  5464. if ((bic->was_in_burst_list && bfqd->large_burst) ||
  5465. bic->saved_in_large_burst)
  5466. bfq_mark_bfqq_in_large_burst(bfqq);
  5467. else {
  5468. bfq_clear_bfqq_in_large_burst(bfqq);
  5469. if (bic->was_in_burst_list)
  5470. /*
  5471. * If bfqq was in the current
  5472. * burst list before being
  5473. * merged, then we have to add
  5474. * it back. And we do not need
  5475. * to increase burst_size, as
  5476. * we did not decrement
  5477. * burst_size when we removed
  5478. * bfqq from the burst list as
  5479. * a consequence of a merge
  5480. * (see comments in
  5481. * bfq_put_queue). In this
  5482. * respect, it would be rather
  5483. * costly to know whether the
  5484. * current burst list is still
  5485. * the same burst list from
  5486. * which bfqq was removed on
  5487. * the merge. To avoid this
  5488. * cost, if bfqq was in a
  5489. * burst list, then we add
  5490. * bfqq to the current burst
  5491. * list without any further
  5492. * check. This can cause
  5493. * inappropriate insertions,
  5494. * but rarely enough to not
  5495. * harm the detection of large
  5496. * bursts significantly.
  5497. */
  5498. hlist_add_head(&bfqq->burst_list_node,
  5499. &bfqd->burst_list);
  5500. }
  5501. bfqq->split_time = jiffies;
  5502. }
  5503. return bfqq;
  5504. }
  5505. /*
  5506. * Only reset private fields. The actual request preparation will be
  5507. * performed by bfq_init_rq, when rq is either inserted or merged. See
  5508. * comments on bfq_init_rq for the reason behind this delayed
  5509. * preparation.
  5510. */
  5511. static void bfq_prepare_request(struct request *rq)
  5512. {
  5513. /*
  5514. * Regardless of whether we have an icq attached, we have to
  5515. * clear the scheduler pointers, as they might point to
  5516. * previously allocated bic/bfqq structs.
  5517. */
  5518. rq->elv.priv[0] = rq->elv.priv[1] = NULL;
  5519. }
  5520. /*
  5521. * If needed, init rq, allocate bfq data structures associated with
  5522. * rq, and increment reference counters in the destination bfq_queue
  5523. * for rq. Return the destination bfq_queue for rq, or NULL is rq is
  5524. * not associated with any bfq_queue.
  5525. *
  5526. * This function is invoked by the functions that perform rq insertion
  5527. * or merging. One may have expected the above preparation operations
  5528. * to be performed in bfq_prepare_request, and not delayed to when rq
  5529. * is inserted or merged. The rationale behind this delayed
  5530. * preparation is that, after the prepare_request hook is invoked for
  5531. * rq, rq may still be transformed into a request with no icq, i.e., a
  5532. * request not associated with any queue. No bfq hook is invoked to
  5533. * signal this transformation. As a consequence, should these
  5534. * preparation operations be performed when the prepare_request hook
  5535. * is invoked, and should rq be transformed one moment later, bfq
  5536. * would end up in an inconsistent state, because it would have
  5537. * incremented some queue counters for an rq destined to
  5538. * transformation, without any chance to correctly lower these
  5539. * counters back. In contrast, no transformation can still happen for
  5540. * rq after rq has been inserted or merged. So, it is safe to execute
  5541. * these preparation operations when rq is finally inserted or merged.
  5542. */
  5543. static struct bfq_queue *bfq_init_rq(struct request *rq)
  5544. {
  5545. struct request_queue *q = rq->q;
  5546. struct bio *bio = rq->bio;
  5547. struct bfq_data *bfqd = q->elevator->elevator_data;
  5548. struct bfq_io_cq *bic;
  5549. const int is_sync = rq_is_sync(rq);
  5550. struct bfq_queue *bfqq;
  5551. bool new_queue = false;
  5552. bool bfqq_already_existing = false, split = false;
  5553. if (unlikely(!rq->elv.icq))
  5554. return NULL;
  5555. /*
  5556. * Assuming that elv.priv[1] is set only if everything is set
  5557. * for this rq. This holds true, because this function is
  5558. * invoked only for insertion or merging, and, after such
  5559. * events, a request cannot be manipulated any longer before
  5560. * being removed from bfq.
  5561. */
  5562. if (rq->elv.priv[1])
  5563. return rq->elv.priv[1];
  5564. bic = icq_to_bic(rq->elv.icq);
  5565. bfq_check_ioprio_change(bic, bio);
  5566. bfq_bic_update_cgroup(bic, bio);
  5567. bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
  5568. &new_queue);
  5569. if (likely(!new_queue)) {
  5570. /* If the queue was seeky for too long, break it apart. */
  5571. if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
  5572. bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
  5573. /* Update bic before losing reference to bfqq */
  5574. if (bfq_bfqq_in_large_burst(bfqq))
  5575. bic->saved_in_large_burst = true;
  5576. bfqq = bfq_split_bfqq(bic, bfqq);
  5577. split = true;
  5578. if (!bfqq)
  5579. bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
  5580. true, is_sync,
  5581. NULL);
  5582. else
  5583. bfqq_already_existing = true;
  5584. }
  5585. }
  5586. bfqq->allocated++;
  5587. bfqq->ref++;
  5588. bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
  5589. rq, bfqq, bfqq->ref);
  5590. rq->elv.priv[0] = bic;
  5591. rq->elv.priv[1] = bfqq;
  5592. /*
  5593. * If a bfq_queue has only one process reference, it is owned
  5594. * by only this bic: we can then set bfqq->bic = bic. in
  5595. * addition, if the queue has also just been split, we have to
  5596. * resume its state.
  5597. */
  5598. if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
  5599. bfqq->bic = bic;
  5600. if (split) {
  5601. /*
  5602. * The queue has just been split from a shared
  5603. * queue: restore the idle window and the
  5604. * possible weight raising period.
  5605. */
  5606. bfq_bfqq_resume_state(bfqq, bfqd, bic,
  5607. bfqq_already_existing);
  5608. }
  5609. }
  5610. /*
  5611. * Consider bfqq as possibly belonging to a burst of newly
  5612. * created queues only if:
  5613. * 1) A burst is actually happening (bfqd->burst_size > 0)
  5614. * or
  5615. * 2) There is no other active queue. In fact, if, in
  5616. * contrast, there are active queues not belonging to the
  5617. * possible burst bfqq may belong to, then there is no gain
  5618. * in considering bfqq as belonging to a burst, and
  5619. * therefore in not weight-raising bfqq. See comments on
  5620. * bfq_handle_burst().
  5621. *
  5622. * This filtering also helps eliminating false positives,
  5623. * occurring when bfqq does not belong to an actual large
  5624. * burst, but some background task (e.g., a service) happens
  5625. * to trigger the creation of new queues very close to when
  5626. * bfqq and its possible companion queues are created. See
  5627. * comments on bfq_handle_burst() for further details also on
  5628. * this issue.
  5629. */
  5630. if (unlikely(bfq_bfqq_just_created(bfqq) &&
  5631. (bfqd->burst_size > 0 ||
  5632. bfq_tot_busy_queues(bfqd) == 0)))
  5633. bfq_handle_burst(bfqd, bfqq);
  5634. return bfqq;
  5635. }
  5636. static void
  5637. bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  5638. {
  5639. enum bfqq_expiration reason;
  5640. unsigned long flags;
  5641. spin_lock_irqsave(&bfqd->lock, flags);
  5642. /*
  5643. * Considering that bfqq may be in race, we should firstly check
  5644. * whether bfqq is in service before doing something on it. If
  5645. * the bfqq in race is not in service, it has already been expired
  5646. * through __bfq_bfqq_expire func and its wait_request flags has
  5647. * been cleared in __bfq_bfqd_reset_in_service func.
  5648. */
  5649. if (bfqq != bfqd->in_service_queue) {
  5650. spin_unlock_irqrestore(&bfqd->lock, flags);
  5651. return;
  5652. }
  5653. bfq_clear_bfqq_wait_request(bfqq);
  5654. if (bfq_bfqq_budget_timeout(bfqq))
  5655. /*
  5656. * Also here the queue can be safely expired
  5657. * for budget timeout without wasting
  5658. * guarantees
  5659. */
  5660. reason = BFQQE_BUDGET_TIMEOUT;
  5661. else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
  5662. /*
  5663. * The queue may not be empty upon timer expiration,
  5664. * because we may not disable the timer when the
  5665. * first request of the in-service queue arrives
  5666. * during disk idling.
  5667. */
  5668. reason = BFQQE_TOO_IDLE;
  5669. else
  5670. goto schedule_dispatch;
  5671. bfq_bfqq_expire(bfqd, bfqq, true, reason);
  5672. schedule_dispatch:
  5673. spin_unlock_irqrestore(&bfqd->lock, flags);
  5674. bfq_schedule_dispatch(bfqd);
  5675. }
  5676. /*
  5677. * Handler of the expiration of the timer running if the in-service queue
  5678. * is idling inside its time slice.
  5679. */
  5680. static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
  5681. {
  5682. struct bfq_data *bfqd = container_of(timer, struct bfq_data,
  5683. idle_slice_timer);
  5684. struct bfq_queue *bfqq = bfqd->in_service_queue;
  5685. /*
  5686. * Theoretical race here: the in-service queue can be NULL or
  5687. * different from the queue that was idling if a new request
  5688. * arrives for the current queue and there is a full dispatch
  5689. * cycle that changes the in-service queue. This can hardly
  5690. * happen, but in the worst case we just expire a queue too
  5691. * early.
  5692. */
  5693. if (bfqq)
  5694. bfq_idle_slice_timer_body(bfqd, bfqq);
  5695. return HRTIMER_NORESTART;
  5696. }
  5697. static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
  5698. struct bfq_queue **bfqq_ptr)
  5699. {
  5700. struct bfq_queue *bfqq = *bfqq_ptr;
  5701. bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
  5702. if (bfqq) {
  5703. bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
  5704. bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
  5705. bfqq, bfqq->ref);
  5706. bfq_put_queue(bfqq);
  5707. *bfqq_ptr = NULL;
  5708. }
  5709. }
  5710. /*
  5711. * Release all the bfqg references to its async queues. If we are
  5712. * deallocating the group these queues may still contain requests, so
  5713. * we reparent them to the root cgroup (i.e., the only one that will
  5714. * exist for sure until all the requests on a device are gone).
  5715. */
  5716. void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
  5717. {
  5718. int i, j;
  5719. for (i = 0; i < 2; i++)
  5720. for (j = 0; j < IOPRIO_BE_NR; j++)
  5721. __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
  5722. __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
  5723. }
  5724. /*
  5725. * See the comments on bfq_limit_depth for the purpose of
  5726. * the depths set in the function. Return minimum shallow depth we'll use.
  5727. */
  5728. static unsigned int bfq_update_depths(struct bfq_data *bfqd,
  5729. struct sbitmap_queue *bt)
  5730. {
  5731. unsigned int i, j, min_shallow = UINT_MAX;
  5732. /*
  5733. * In-word depths if no bfq_queue is being weight-raised:
  5734. * leaving 25% of tags only for sync reads.
  5735. *
  5736. * In next formulas, right-shift the value
  5737. * (1U<<bt->sb.shift), instead of computing directly
  5738. * (1U<<(bt->sb.shift - something)), to be robust against
  5739. * any possible value of bt->sb.shift, without having to
  5740. * limit 'something'.
  5741. */
  5742. /* no more than 50% of tags for async I/O */
  5743. bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
  5744. /*
  5745. * no more than 75% of tags for sync writes (25% extra tags
  5746. * w.r.t. async I/O, to prevent async I/O from starving sync
  5747. * writes)
  5748. */
  5749. bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
  5750. /*
  5751. * In-word depths in case some bfq_queue is being weight-
  5752. * raised: leaving ~63% of tags for sync reads. This is the
  5753. * highest percentage for which, in our tests, application
  5754. * start-up times didn't suffer from any regression due to tag
  5755. * shortage.
  5756. */
  5757. /* no more than ~18% of tags for async I/O */
  5758. bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
  5759. /* no more than ~37% of tags for sync writes (~20% extra tags) */
  5760. bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
  5761. for (i = 0; i < 2; i++)
  5762. for (j = 0; j < 2; j++)
  5763. min_shallow = min(min_shallow, bfqd->word_depths[i][j]);
  5764. return min_shallow;
  5765. }
  5766. static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
  5767. {
  5768. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  5769. struct blk_mq_tags *tags = hctx->sched_tags;
  5770. unsigned int min_shallow;
  5771. min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags);
  5772. sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow);
  5773. }
  5774. static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
  5775. {
  5776. bfq_depth_updated(hctx);
  5777. return 0;
  5778. }
  5779. static void bfq_exit_queue(struct elevator_queue *e)
  5780. {
  5781. struct bfq_data *bfqd = e->elevator_data;
  5782. struct bfq_queue *bfqq, *n;
  5783. hrtimer_cancel(&bfqd->idle_slice_timer);
  5784. spin_lock_irq(&bfqd->lock);
  5785. list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
  5786. bfq_deactivate_bfqq(bfqd, bfqq, false, false);
  5787. spin_unlock_irq(&bfqd->lock);
  5788. hrtimer_cancel(&bfqd->idle_slice_timer);
  5789. /* release oom-queue reference to root group */
  5790. bfqg_and_blkg_put(bfqd->root_group);
  5791. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  5792. blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
  5793. #else
  5794. spin_lock_irq(&bfqd->lock);
  5795. bfq_put_async_queues(bfqd, bfqd->root_group);
  5796. kfree(bfqd->root_group);
  5797. spin_unlock_irq(&bfqd->lock);
  5798. #endif
  5799. wbt_enable_default(bfqd->queue);
  5800. kfree(bfqd);
  5801. }
  5802. static void bfq_init_root_group(struct bfq_group *root_group,
  5803. struct bfq_data *bfqd)
  5804. {
  5805. int i;
  5806. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  5807. root_group->entity.parent = NULL;
  5808. root_group->my_entity = NULL;
  5809. root_group->bfqd = bfqd;
  5810. #endif
  5811. root_group->rq_pos_tree = RB_ROOT;
  5812. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  5813. root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  5814. root_group->sched_data.bfq_class_idle_last_service = jiffies;
  5815. }
  5816. static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
  5817. {
  5818. struct bfq_data *bfqd;
  5819. struct elevator_queue *eq;
  5820. eq = elevator_alloc(q, e);
  5821. if (!eq)
  5822. return -ENOMEM;
  5823. bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
  5824. if (!bfqd) {
  5825. kobject_put(&eq->kobj);
  5826. return -ENOMEM;
  5827. }
  5828. eq->elevator_data = bfqd;
  5829. spin_lock_irq(&q->queue_lock);
  5830. q->elevator = eq;
  5831. spin_unlock_irq(&q->queue_lock);
  5832. /*
  5833. * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
  5834. * Grab a permanent reference to it, so that the normal code flow
  5835. * will not attempt to free it.
  5836. */
  5837. bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
  5838. bfqd->oom_bfqq.ref++;
  5839. bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
  5840. bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
  5841. bfqd->oom_bfqq.entity.new_weight =
  5842. bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
  5843. /* oom_bfqq does not participate to bursts */
  5844. bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
  5845. /*
  5846. * Trigger weight initialization, according to ioprio, at the
  5847. * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
  5848. * class won't be changed any more.
  5849. */
  5850. bfqd->oom_bfqq.entity.prio_changed = 1;
  5851. bfqd->queue = q;
  5852. INIT_LIST_HEAD(&bfqd->dispatch);
  5853. hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
  5854. HRTIMER_MODE_REL);
  5855. bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
  5856. bfqd->queue_weights_tree = RB_ROOT_CACHED;
  5857. bfqd->num_groups_with_pending_reqs = 0;
  5858. INIT_LIST_HEAD(&bfqd->active_list);
  5859. INIT_LIST_HEAD(&bfqd->idle_list);
  5860. INIT_HLIST_HEAD(&bfqd->burst_list);
  5861. bfqd->hw_tag = -1;
  5862. bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
  5863. bfqd->bfq_max_budget = bfq_default_max_budget;
  5864. bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
  5865. bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
  5866. bfqd->bfq_back_max = bfq_back_max;
  5867. bfqd->bfq_back_penalty = bfq_back_penalty;
  5868. bfqd->bfq_slice_idle = bfq_slice_idle;
  5869. bfqd->bfq_timeout = bfq_timeout;
  5870. bfqd->bfq_requests_within_timer = 120;
  5871. bfqd->bfq_large_burst_thresh = 8;
  5872. bfqd->bfq_burst_interval = msecs_to_jiffies(180);
  5873. bfqd->low_latency = true;
  5874. /*
  5875. * Trade-off between responsiveness and fairness.
  5876. */
  5877. bfqd->bfq_wr_coeff = 30;
  5878. bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
  5879. bfqd->bfq_wr_max_time = 0;
  5880. bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
  5881. bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
  5882. bfqd->bfq_wr_max_softrt_rate = 7000; /*
  5883. * Approximate rate required
  5884. * to playback or record a
  5885. * high-definition compressed
  5886. * video.
  5887. */
  5888. bfqd->wr_busy_queues = 0;
  5889. /*
  5890. * Begin by assuming, optimistically, that the device peak
  5891. * rate is equal to 2/3 of the highest reference rate.
  5892. */
  5893. bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
  5894. ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
  5895. bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
  5896. spin_lock_init(&bfqd->lock);
  5897. /*
  5898. * The invocation of the next bfq_create_group_hierarchy
  5899. * function is the head of a chain of function calls
  5900. * (bfq_create_group_hierarchy->blkcg_activate_policy->
  5901. * blk_mq_freeze_queue) that may lead to the invocation of the
  5902. * has_work hook function. For this reason,
  5903. * bfq_create_group_hierarchy is invoked only after all
  5904. * scheduler data has been initialized, apart from the fields
  5905. * that can be initialized only after invoking
  5906. * bfq_create_group_hierarchy. This, in particular, enables
  5907. * has_work to correctly return false. Of course, to avoid
  5908. * other inconsistencies, the blk-mq stack must then refrain
  5909. * from invoking further scheduler hooks before this init
  5910. * function is finished.
  5911. */
  5912. bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
  5913. if (!bfqd->root_group)
  5914. goto out_free;
  5915. bfq_init_root_group(bfqd->root_group, bfqd);
  5916. bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
  5917. wbt_disable_default(q);
  5918. return 0;
  5919. out_free:
  5920. kfree(bfqd);
  5921. kobject_put(&eq->kobj);
  5922. return -ENOMEM;
  5923. }
  5924. static void bfq_slab_kill(void)
  5925. {
  5926. kmem_cache_destroy(bfq_pool);
  5927. }
  5928. static int __init bfq_slab_setup(void)
  5929. {
  5930. bfq_pool = KMEM_CACHE(bfq_queue, 0);
  5931. if (!bfq_pool)
  5932. return -ENOMEM;
  5933. return 0;
  5934. }
  5935. static ssize_t bfq_var_show(unsigned int var, char *page)
  5936. {
  5937. return sprintf(page, "%u\n", var);
  5938. }
  5939. static int bfq_var_store(unsigned long *var, const char *page)
  5940. {
  5941. unsigned long new_val;
  5942. int ret = kstrtoul(page, 10, &new_val);
  5943. if (ret)
  5944. return ret;
  5945. *var = new_val;
  5946. return 0;
  5947. }
  5948. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  5949. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  5950. { \
  5951. struct bfq_data *bfqd = e->elevator_data; \
  5952. u64 __data = __VAR; \
  5953. if (__CONV == 1) \
  5954. __data = jiffies_to_msecs(__data); \
  5955. else if (__CONV == 2) \
  5956. __data = div_u64(__data, NSEC_PER_MSEC); \
  5957. return bfq_var_show(__data, (page)); \
  5958. }
  5959. SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
  5960. SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
  5961. SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
  5962. SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
  5963. SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
  5964. SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
  5965. SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
  5966. SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
  5967. SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
  5968. #undef SHOW_FUNCTION
  5969. #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
  5970. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  5971. { \
  5972. struct bfq_data *bfqd = e->elevator_data; \
  5973. u64 __data = __VAR; \
  5974. __data = div_u64(__data, NSEC_PER_USEC); \
  5975. return bfq_var_show(__data, (page)); \
  5976. }
  5977. USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
  5978. #undef USEC_SHOW_FUNCTION
  5979. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  5980. static ssize_t \
  5981. __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  5982. { \
  5983. struct bfq_data *bfqd = e->elevator_data; \
  5984. unsigned long __data, __min = (MIN), __max = (MAX); \
  5985. int ret; \
  5986. \
  5987. ret = bfq_var_store(&__data, (page)); \
  5988. if (ret) \
  5989. return ret; \
  5990. if (__data < __min) \
  5991. __data = __min; \
  5992. else if (__data > __max) \
  5993. __data = __max; \
  5994. if (__CONV == 1) \
  5995. *(__PTR) = msecs_to_jiffies(__data); \
  5996. else if (__CONV == 2) \
  5997. *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
  5998. else \
  5999. *(__PTR) = __data; \
  6000. return count; \
  6001. }
  6002. STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
  6003. INT_MAX, 2);
  6004. STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
  6005. INT_MAX, 2);
  6006. STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
  6007. STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
  6008. INT_MAX, 0);
  6009. STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
  6010. #undef STORE_FUNCTION
  6011. #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
  6012. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
  6013. { \
  6014. struct bfq_data *bfqd = e->elevator_data; \
  6015. unsigned long __data, __min = (MIN), __max = (MAX); \
  6016. int ret; \
  6017. \
  6018. ret = bfq_var_store(&__data, (page)); \
  6019. if (ret) \
  6020. return ret; \
  6021. if (__data < __min) \
  6022. __data = __min; \
  6023. else if (__data > __max) \
  6024. __data = __max; \
  6025. *(__PTR) = (u64)__data * NSEC_PER_USEC; \
  6026. return count; \
  6027. }
  6028. USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
  6029. UINT_MAX);
  6030. #undef USEC_STORE_FUNCTION
  6031. static ssize_t bfq_max_budget_store(struct elevator_queue *e,
  6032. const char *page, size_t count)
  6033. {
  6034. struct bfq_data *bfqd = e->elevator_data;
  6035. unsigned long __data;
  6036. int ret;
  6037. ret = bfq_var_store(&__data, (page));
  6038. if (ret)
  6039. return ret;
  6040. if (__data == 0)
  6041. bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
  6042. else {
  6043. if (__data > INT_MAX)
  6044. __data = INT_MAX;
  6045. bfqd->bfq_max_budget = __data;
  6046. }
  6047. bfqd->bfq_user_max_budget = __data;
  6048. return count;
  6049. }
  6050. /*
  6051. * Leaving this name to preserve name compatibility with cfq
  6052. * parameters, but this timeout is used for both sync and async.
  6053. */
  6054. static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
  6055. const char *page, size_t count)
  6056. {
  6057. struct bfq_data *bfqd = e->elevator_data;
  6058. unsigned long __data;
  6059. int ret;
  6060. ret = bfq_var_store(&__data, (page));
  6061. if (ret)
  6062. return ret;
  6063. if (__data < 1)
  6064. __data = 1;
  6065. else if (__data > INT_MAX)
  6066. __data = INT_MAX;
  6067. bfqd->bfq_timeout = msecs_to_jiffies(__data);
  6068. if (bfqd->bfq_user_max_budget == 0)
  6069. bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
  6070. return count;
  6071. }
  6072. static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
  6073. const char *page, size_t count)
  6074. {
  6075. struct bfq_data *bfqd = e->elevator_data;
  6076. unsigned long __data;
  6077. int ret;
  6078. ret = bfq_var_store(&__data, (page));
  6079. if (ret)
  6080. return ret;
  6081. if (__data > 1)
  6082. __data = 1;
  6083. if (!bfqd->strict_guarantees && __data == 1
  6084. && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
  6085. bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
  6086. bfqd->strict_guarantees = __data;
  6087. return count;
  6088. }
  6089. static ssize_t bfq_low_latency_store(struct elevator_queue *e,
  6090. const char *page, size_t count)
  6091. {
  6092. struct bfq_data *bfqd = e->elevator_data;
  6093. unsigned long __data;
  6094. int ret;
  6095. ret = bfq_var_store(&__data, (page));
  6096. if (ret)
  6097. return ret;
  6098. if (__data > 1)
  6099. __data = 1;
  6100. if (__data == 0 && bfqd->low_latency != 0)
  6101. bfq_end_wr(bfqd);
  6102. bfqd->low_latency = __data;
  6103. return count;
  6104. }
  6105. #define BFQ_ATTR(name) \
  6106. __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
  6107. static struct elv_fs_entry bfq_attrs[] = {
  6108. BFQ_ATTR(fifo_expire_sync),
  6109. BFQ_ATTR(fifo_expire_async),
  6110. BFQ_ATTR(back_seek_max),
  6111. BFQ_ATTR(back_seek_penalty),
  6112. BFQ_ATTR(slice_idle),
  6113. BFQ_ATTR(slice_idle_us),
  6114. BFQ_ATTR(max_budget),
  6115. BFQ_ATTR(timeout_sync),
  6116. BFQ_ATTR(strict_guarantees),
  6117. BFQ_ATTR(low_latency),
  6118. __ATTR_NULL
  6119. };
  6120. static struct elevator_type iosched_bfq_mq = {
  6121. .ops = {
  6122. .limit_depth = bfq_limit_depth,
  6123. .prepare_request = bfq_prepare_request,
  6124. .requeue_request = bfq_finish_requeue_request,
  6125. .finish_request = bfq_finish_requeue_request,
  6126. .exit_icq = bfq_exit_icq,
  6127. .insert_requests = bfq_insert_requests,
  6128. .dispatch_request = bfq_dispatch_request,
  6129. .next_request = elv_rb_latter_request,
  6130. .former_request = elv_rb_former_request,
  6131. .allow_merge = bfq_allow_bio_merge,
  6132. .bio_merge = bfq_bio_merge,
  6133. .request_merge = bfq_request_merge,
  6134. .requests_merged = bfq_requests_merged,
  6135. .request_merged = bfq_request_merged,
  6136. .has_work = bfq_has_work,
  6137. .depth_updated = bfq_depth_updated,
  6138. .init_hctx = bfq_init_hctx,
  6139. .init_sched = bfq_init_queue,
  6140. .exit_sched = bfq_exit_queue,
  6141. },
  6142. .icq_size = sizeof(struct bfq_io_cq),
  6143. .icq_align = __alignof__(struct bfq_io_cq),
  6144. .elevator_attrs = bfq_attrs,
  6145. .elevator_name = "bfq",
  6146. .elevator_owner = THIS_MODULE,
  6147. };
  6148. MODULE_ALIAS("bfq-iosched");
  6149. static int __init bfq_init(void)
  6150. {
  6151. int ret;
  6152. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6153. ret = blkcg_policy_register(&blkcg_policy_bfq);
  6154. if (ret)
  6155. return ret;
  6156. #endif
  6157. ret = -ENOMEM;
  6158. if (bfq_slab_setup())
  6159. goto err_pol_unreg;
  6160. /*
  6161. * Times to load large popular applications for the typical
  6162. * systems installed on the reference devices (see the
  6163. * comments before the definition of the next
  6164. * array). Actually, we use slightly lower values, as the
  6165. * estimated peak rate tends to be smaller than the actual
  6166. * peak rate. The reason for this last fact is that estimates
  6167. * are computed over much shorter time intervals than the long
  6168. * intervals typically used for benchmarking. Why? First, to
  6169. * adapt more quickly to variations. Second, because an I/O
  6170. * scheduler cannot rely on a peak-rate-evaluation workload to
  6171. * be run for a long time.
  6172. */
  6173. ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */
  6174. ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */
  6175. ret = elv_register(&iosched_bfq_mq);
  6176. if (ret)
  6177. goto slab_kill;
  6178. return 0;
  6179. slab_kill:
  6180. bfq_slab_kill();
  6181. err_pol_unreg:
  6182. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6183. blkcg_policy_unregister(&blkcg_policy_bfq);
  6184. #endif
  6185. return ret;
  6186. }
  6187. static void __exit bfq_exit(void)
  6188. {
  6189. elv_unregister(&iosched_bfq_mq);
  6190. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6191. blkcg_policy_unregister(&blkcg_policy_bfq);
  6192. #endif
  6193. bfq_slab_kill();
  6194. }
  6195. module_init(bfq_init);
  6196. module_exit(bfq_exit);
  6197. MODULE_AUTHOR("Paolo Valente");
  6198. MODULE_LICENSE("GPL");
  6199. MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");