fair.c 305 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  4. *
  5. * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Interactivity improvements by Mike Galbraith
  8. * (C) 2007 Mike Galbraith <efault@gmx.de>
  9. *
  10. * Various enhancements by Dmitry Adamushko.
  11. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  12. *
  13. * Group scheduling enhancements by Srivatsa Vaddagiri
  14. * Copyright IBM Corporation, 2007
  15. * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  16. *
  17. * Scaled math optimizations by Thomas Gleixner
  18. * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  19. *
  20. * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  21. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  22. */
  23. #include "sched.h"
  24. #include <trace/hooks/sched.h>
  25. EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_runtime);
  26. /*
  27. * Targeted preemption latency for CPU-bound tasks:
  28. *
  29. * NOTE: this latency value is not the same as the concept of
  30. * 'timeslice length' - timeslices in CFS are of variable length
  31. * and have no persistent notion like in traditional, time-slice
  32. * based scheduling concepts.
  33. *
  34. * (to see the precise effective timeslice length of your workload,
  35. * run vmstat and monitor the context-switches (cs) field)
  36. *
  37. * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
  38. */
  39. unsigned int sysctl_sched_latency = 6000000ULL;
  40. EXPORT_SYMBOL_GPL(sysctl_sched_latency);
  41. static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
  42. /*
  43. * The initial- and re-scaling of tunables is configurable
  44. *
  45. * Options are:
  46. *
  47. * SCHED_TUNABLESCALING_NONE - unscaled, always *1
  48. * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
  49. * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
  50. *
  51. * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
  52. */
  53. enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
  54. /*
  55. * Minimal preemption granularity for CPU-bound tasks:
  56. *
  57. * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
  58. */
  59. unsigned int sysctl_sched_min_granularity = 750000ULL;
  60. EXPORT_SYMBOL_GPL(sysctl_sched_min_granularity);
  61. static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
  62. /*
  63. * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
  64. */
  65. static unsigned int sched_nr_latency = 8;
  66. /*
  67. * After fork, child runs first. If set to 0 (default) then
  68. * parent will (try to) run first.
  69. */
  70. unsigned int sysctl_sched_child_runs_first __read_mostly;
  71. /*
  72. * SCHED_OTHER wake-up granularity.
  73. *
  74. * This option delays the preemption effects of decoupled workloads
  75. * and reduces their over-scheduling. Synchronous workloads will still
  76. * have immediate wakeup/sleep latencies.
  77. *
  78. * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  79. */
  80. unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
  81. static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
  82. const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  83. int sched_thermal_decay_shift;
  84. static int __init setup_sched_thermal_decay_shift(char *str)
  85. {
  86. int _shift = 0;
  87. if (kstrtoint(str, 0, &_shift))
  88. pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
  89. sched_thermal_decay_shift = clamp(_shift, 0, 10);
  90. return 1;
  91. }
  92. __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
  93. #ifdef CONFIG_SMP
  94. /*
  95. * For asym packing, by default the lower numbered CPU has higher priority.
  96. */
  97. int __weak arch_asym_cpu_priority(int cpu)
  98. {
  99. return -cpu;
  100. }
  101. /*
  102. * The margin used when comparing utilization with CPU capacity.
  103. *
  104. * (default: ~20%)
  105. */
  106. #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
  107. #endif
  108. #ifdef CONFIG_CFS_BANDWIDTH
  109. /*
  110. * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
  111. * each time a cfs_rq requests quota.
  112. *
  113. * Note: in the case that the slice exceeds the runtime remaining (either due
  114. * to consumption or the quota being specified to be smaller than the slice)
  115. * we will always only issue the remaining available time.
  116. *
  117. * (default: 5 msec, units: microseconds)
  118. */
  119. unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
  120. #endif
  121. static inline void update_load_add(struct load_weight *lw, unsigned long inc)
  122. {
  123. lw->weight += inc;
  124. lw->inv_weight = 0;
  125. }
  126. static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  127. {
  128. lw->weight -= dec;
  129. lw->inv_weight = 0;
  130. }
  131. static inline void update_load_set(struct load_weight *lw, unsigned long w)
  132. {
  133. lw->weight = w;
  134. lw->inv_weight = 0;
  135. }
  136. /*
  137. * Increase the granularity value when there are more CPUs,
  138. * because with more CPUs the 'effective latency' as visible
  139. * to users decreases. But the relationship is not linear,
  140. * so pick a second-best guess by going with the log2 of the
  141. * number of CPUs.
  142. *
  143. * This idea comes from the SD scheduler of Con Kolivas:
  144. */
  145. static unsigned int get_update_sysctl_factor(void)
  146. {
  147. unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
  148. unsigned int factor;
  149. switch (sysctl_sched_tunable_scaling) {
  150. case SCHED_TUNABLESCALING_NONE:
  151. factor = 1;
  152. break;
  153. case SCHED_TUNABLESCALING_LINEAR:
  154. factor = cpus;
  155. break;
  156. case SCHED_TUNABLESCALING_LOG:
  157. default:
  158. factor = 1 + ilog2(cpus);
  159. break;
  160. }
  161. return factor;
  162. }
  163. static void update_sysctl(void)
  164. {
  165. unsigned int factor = get_update_sysctl_factor();
  166. #define SET_SYSCTL(name) \
  167. (sysctl_##name = (factor) * normalized_sysctl_##name)
  168. SET_SYSCTL(sched_min_granularity);
  169. SET_SYSCTL(sched_latency);
  170. SET_SYSCTL(sched_wakeup_granularity);
  171. #undef SET_SYSCTL
  172. }
  173. void __init sched_init_granularity(void)
  174. {
  175. update_sysctl();
  176. }
  177. #define WMULT_CONST (~0U)
  178. #define WMULT_SHIFT 32
  179. static void __update_inv_weight(struct load_weight *lw)
  180. {
  181. unsigned long w;
  182. if (likely(lw->inv_weight))
  183. return;
  184. w = scale_load_down(lw->weight);
  185. if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
  186. lw->inv_weight = 1;
  187. else if (unlikely(!w))
  188. lw->inv_weight = WMULT_CONST;
  189. else
  190. lw->inv_weight = WMULT_CONST / w;
  191. }
  192. /*
  193. * delta_exec * weight / lw.weight
  194. * OR
  195. * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
  196. *
  197. * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
  198. * we're guaranteed shift stays positive because inv_weight is guaranteed to
  199. * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
  200. *
  201. * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
  202. * weight/lw.weight <= 1, and therefore our shift will also be positive.
  203. */
  204. static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
  205. {
  206. u64 fact = scale_load_down(weight);
  207. int shift = WMULT_SHIFT;
  208. __update_inv_weight(lw);
  209. if (unlikely(fact >> 32)) {
  210. while (fact >> 32) {
  211. fact >>= 1;
  212. shift--;
  213. }
  214. }
  215. fact = mul_u32_u32(fact, lw->inv_weight);
  216. while (fact >> 32) {
  217. fact >>= 1;
  218. shift--;
  219. }
  220. return mul_u64_u32_shr(delta_exec, fact, shift);
  221. }
  222. const struct sched_class fair_sched_class;
  223. /**************************************************************
  224. * CFS operations on generic schedulable entities:
  225. */
  226. #ifdef CONFIG_FAIR_GROUP_SCHED
  227. static inline struct task_struct *task_of(struct sched_entity *se)
  228. {
  229. SCHED_WARN_ON(!entity_is_task(se));
  230. return container_of(se, struct task_struct, se);
  231. }
  232. /* Walk up scheduling entities hierarchy */
  233. #define for_each_sched_entity(se) \
  234. for (; se; se = se->parent)
  235. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  236. {
  237. return p->se.cfs_rq;
  238. }
  239. /* runqueue on which this entity is (to be) queued */
  240. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  241. {
  242. return se->cfs_rq;
  243. }
  244. /* runqueue "owned" by this group */
  245. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  246. {
  247. return grp->my_q;
  248. }
  249. static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
  250. {
  251. if (!path)
  252. return;
  253. if (cfs_rq && task_group_is_autogroup(cfs_rq->tg))
  254. autogroup_path(cfs_rq->tg, path, len);
  255. else if (cfs_rq && cfs_rq->tg->css.cgroup)
  256. cgroup_path(cfs_rq->tg->css.cgroup, path, len);
  257. else
  258. strlcpy(path, "(null)", len);
  259. }
  260. static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  261. {
  262. struct rq *rq = rq_of(cfs_rq);
  263. int cpu = cpu_of(rq);
  264. if (cfs_rq->on_list)
  265. return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
  266. cfs_rq->on_list = 1;
  267. /*
  268. * Ensure we either appear before our parent (if already
  269. * enqueued) or force our parent to appear after us when it is
  270. * enqueued. The fact that we always enqueue bottom-up
  271. * reduces this to two cases and a special case for the root
  272. * cfs_rq. Furthermore, it also means that we will always reset
  273. * tmp_alone_branch either when the branch is connected
  274. * to a tree or when we reach the top of the tree
  275. */
  276. if (cfs_rq->tg->parent &&
  277. cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
  278. /*
  279. * If parent is already on the list, we add the child
  280. * just before. Thanks to circular linked property of
  281. * the list, this means to put the child at the tail
  282. * of the list that starts by parent.
  283. */
  284. list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
  285. &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
  286. /*
  287. * The branch is now connected to its tree so we can
  288. * reset tmp_alone_branch to the beginning of the
  289. * list.
  290. */
  291. rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
  292. return true;
  293. }
  294. if (!cfs_rq->tg->parent) {
  295. /*
  296. * cfs rq without parent should be put
  297. * at the tail of the list.
  298. */
  299. list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
  300. &rq->leaf_cfs_rq_list);
  301. /*
  302. * We have reach the top of a tree so we can reset
  303. * tmp_alone_branch to the beginning of the list.
  304. */
  305. rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
  306. return true;
  307. }
  308. /*
  309. * The parent has not already been added so we want to
  310. * make sure that it will be put after us.
  311. * tmp_alone_branch points to the begin of the branch
  312. * where we will add parent.
  313. */
  314. list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
  315. /*
  316. * update tmp_alone_branch to points to the new begin
  317. * of the branch
  318. */
  319. rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
  320. return false;
  321. }
  322. static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  323. {
  324. if (cfs_rq->on_list) {
  325. struct rq *rq = rq_of(cfs_rq);
  326. /*
  327. * With cfs_rq being unthrottled/throttled during an enqueue,
  328. * it can happen the tmp_alone_branch points the a leaf that
  329. * we finally want to del. In this case, tmp_alone_branch moves
  330. * to the prev element but it will point to rq->leaf_cfs_rq_list
  331. * at the end of the enqueue.
  332. */
  333. if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
  334. rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
  335. list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
  336. cfs_rq->on_list = 0;
  337. }
  338. }
  339. static inline void assert_list_leaf_cfs_rq(struct rq *rq)
  340. {
  341. SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
  342. }
  343. /* Iterate thr' all leaf cfs_rq's on a runqueue */
  344. #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
  345. list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
  346. leaf_cfs_rq_list)
  347. /* Do the two (enqueued) entities belong to the same group ? */
  348. static inline struct cfs_rq *
  349. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  350. {
  351. if (se->cfs_rq == pse->cfs_rq)
  352. return se->cfs_rq;
  353. return NULL;
  354. }
  355. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  356. {
  357. return se->parent;
  358. }
  359. static void
  360. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  361. {
  362. int se_depth, pse_depth;
  363. /*
  364. * preemption test can be made between sibling entities who are in the
  365. * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
  366. * both tasks until we find their ancestors who are siblings of common
  367. * parent.
  368. */
  369. /* First walk up until both entities are at same depth */
  370. se_depth = (*se)->depth;
  371. pse_depth = (*pse)->depth;
  372. while (se_depth > pse_depth) {
  373. se_depth--;
  374. *se = parent_entity(*se);
  375. }
  376. while (pse_depth > se_depth) {
  377. pse_depth--;
  378. *pse = parent_entity(*pse);
  379. }
  380. while (!is_same_group(*se, *pse)) {
  381. *se = parent_entity(*se);
  382. *pse = parent_entity(*pse);
  383. }
  384. }
  385. #else /* !CONFIG_FAIR_GROUP_SCHED */
  386. static inline struct task_struct *task_of(struct sched_entity *se)
  387. {
  388. return container_of(se, struct task_struct, se);
  389. }
  390. #define for_each_sched_entity(se) \
  391. for (; se; se = NULL)
  392. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  393. {
  394. return &task_rq(p)->cfs;
  395. }
  396. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  397. {
  398. struct task_struct *p = task_of(se);
  399. struct rq *rq = task_rq(p);
  400. return &rq->cfs;
  401. }
  402. /* runqueue "owned" by this group */
  403. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  404. {
  405. return NULL;
  406. }
  407. static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
  408. {
  409. if (path)
  410. strlcpy(path, "(null)", len);
  411. }
  412. static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  413. {
  414. return true;
  415. }
  416. static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  417. {
  418. }
  419. static inline void assert_list_leaf_cfs_rq(struct rq *rq)
  420. {
  421. }
  422. #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
  423. for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
  424. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  425. {
  426. return NULL;
  427. }
  428. static inline void
  429. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  430. {
  431. }
  432. #endif /* CONFIG_FAIR_GROUP_SCHED */
  433. static __always_inline
  434. void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
  435. /**************************************************************
  436. * Scheduling class tree data structure manipulation methods:
  437. */
  438. static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
  439. {
  440. s64 delta = (s64)(vruntime - max_vruntime);
  441. if (delta > 0)
  442. max_vruntime = vruntime;
  443. return max_vruntime;
  444. }
  445. static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
  446. {
  447. s64 delta = (s64)(vruntime - min_vruntime);
  448. if (delta < 0)
  449. min_vruntime = vruntime;
  450. return min_vruntime;
  451. }
  452. static inline int entity_before(struct sched_entity *a,
  453. struct sched_entity *b)
  454. {
  455. return (s64)(a->vruntime - b->vruntime) < 0;
  456. }
  457. static void update_min_vruntime(struct cfs_rq *cfs_rq)
  458. {
  459. struct sched_entity *curr = cfs_rq->curr;
  460. struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
  461. u64 vruntime = cfs_rq->min_vruntime;
  462. if (curr) {
  463. if (curr->on_rq)
  464. vruntime = curr->vruntime;
  465. else
  466. curr = NULL;
  467. }
  468. if (leftmost) { /* non-empty tree */
  469. struct sched_entity *se;
  470. se = rb_entry(leftmost, struct sched_entity, run_node);
  471. if (!curr)
  472. vruntime = se->vruntime;
  473. else
  474. vruntime = min_vruntime(vruntime, se->vruntime);
  475. }
  476. /* ensure we never gain time by being placed backwards. */
  477. cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
  478. #ifndef CONFIG_64BIT
  479. smp_wmb();
  480. cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
  481. #endif
  482. }
  483. /*
  484. * Enqueue an entity into the rb-tree:
  485. */
  486. static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  487. {
  488. struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
  489. struct rb_node *parent = NULL;
  490. struct sched_entity *entry;
  491. bool leftmost = true;
  492. trace_android_rvh_enqueue_entity(cfs_rq, se);
  493. /*
  494. * Find the right place in the rbtree:
  495. */
  496. while (*link) {
  497. parent = *link;
  498. entry = rb_entry(parent, struct sched_entity, run_node);
  499. /*
  500. * We dont care about collisions. Nodes with
  501. * the same key stay together.
  502. */
  503. if (entity_before(se, entry)) {
  504. link = &parent->rb_left;
  505. } else {
  506. link = &parent->rb_right;
  507. leftmost = false;
  508. }
  509. }
  510. rb_link_node(&se->run_node, parent, link);
  511. rb_insert_color_cached(&se->run_node,
  512. &cfs_rq->tasks_timeline, leftmost);
  513. }
  514. static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  515. {
  516. trace_android_rvh_dequeue_entity(cfs_rq, se);
  517. rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
  518. }
  519. struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
  520. {
  521. struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
  522. if (!left)
  523. return NULL;
  524. return rb_entry(left, struct sched_entity, run_node);
  525. }
  526. static struct sched_entity *__pick_next_entity(struct sched_entity *se)
  527. {
  528. struct rb_node *next = rb_next(&se->run_node);
  529. if (!next)
  530. return NULL;
  531. return rb_entry(next, struct sched_entity, run_node);
  532. }
  533. #ifdef CONFIG_SCHED_DEBUG
  534. struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  535. {
  536. struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
  537. if (!last)
  538. return NULL;
  539. return rb_entry(last, struct sched_entity, run_node);
  540. }
  541. /**************************************************************
  542. * Scheduling class statistics methods:
  543. */
  544. int sched_proc_update_handler(struct ctl_table *table, int write,
  545. void *buffer, size_t *lenp, loff_t *ppos)
  546. {
  547. int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  548. unsigned int factor = get_update_sysctl_factor();
  549. if (ret || !write)
  550. return ret;
  551. sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
  552. sysctl_sched_min_granularity);
  553. #define WRT_SYSCTL(name) \
  554. (normalized_sysctl_##name = sysctl_##name / (factor))
  555. WRT_SYSCTL(sched_min_granularity);
  556. WRT_SYSCTL(sched_latency);
  557. WRT_SYSCTL(sched_wakeup_granularity);
  558. #undef WRT_SYSCTL
  559. return 0;
  560. }
  561. #endif
  562. /*
  563. * delta /= w
  564. */
  565. static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
  566. {
  567. if (unlikely(se->load.weight != NICE_0_LOAD))
  568. delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
  569. return delta;
  570. }
  571. /*
  572. * The idea is to set a period in which each task runs once.
  573. *
  574. * When there are too many tasks (sched_nr_latency) we have to stretch
  575. * this period because otherwise the slices get too small.
  576. *
  577. * p = (nr <= nl) ? l : l*nr/nl
  578. */
  579. static u64 __sched_period(unsigned long nr_running)
  580. {
  581. if (unlikely(nr_running > sched_nr_latency))
  582. return nr_running * sysctl_sched_min_granularity;
  583. else
  584. return sysctl_sched_latency;
  585. }
  586. /*
  587. * We calculate the wall-time slice from the period by taking a part
  588. * proportional to the weight.
  589. *
  590. * s = p*P[w/rw]
  591. */
  592. static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  593. {
  594. unsigned int nr_running = cfs_rq->nr_running;
  595. u64 slice;
  596. if (sched_feat(ALT_PERIOD))
  597. nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
  598. slice = __sched_period(nr_running + !se->on_rq);
  599. for_each_sched_entity(se) {
  600. struct load_weight *load;
  601. struct load_weight lw;
  602. cfs_rq = cfs_rq_of(se);
  603. load = &cfs_rq->load;
  604. if (unlikely(!se->on_rq)) {
  605. lw = cfs_rq->load;
  606. update_load_add(&lw, se->load.weight);
  607. load = &lw;
  608. }
  609. slice = __calc_delta(slice, se->load.weight, load);
  610. }
  611. if (sched_feat(BASE_SLICE))
  612. slice = max(slice, (u64)sysctl_sched_min_granularity);
  613. return slice;
  614. }
  615. /*
  616. * We calculate the vruntime slice of a to-be-inserted task.
  617. *
  618. * vs = s/w
  619. */
  620. static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  621. {
  622. return calc_delta_fair(sched_slice(cfs_rq, se), se);
  623. }
  624. #include "pelt.h"
  625. #ifdef CONFIG_SMP
  626. static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
  627. static unsigned long task_h_load(struct task_struct *p);
  628. static unsigned long capacity_of(int cpu);
  629. /* Give new sched_entity start runnable values to heavy its load in infant time */
  630. void init_entity_runnable_average(struct sched_entity *se)
  631. {
  632. struct sched_avg *sa = &se->avg;
  633. memset(sa, 0, sizeof(*sa));
  634. /*
  635. * Tasks are initialized with full load to be seen as heavy tasks until
  636. * they get a chance to stabilize to their real load level.
  637. * Group entities are initialized with zero load to reflect the fact that
  638. * nothing has been attached to the task group yet.
  639. */
  640. if (entity_is_task(se))
  641. sa->load_avg = scale_load_down(se->load.weight);
  642. /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
  643. }
  644. static void attach_entity_cfs_rq(struct sched_entity *se);
  645. /*
  646. * With new tasks being created, their initial util_avgs are extrapolated
  647. * based on the cfs_rq's current util_avg:
  648. *
  649. * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
  650. *
  651. * However, in many cases, the above util_avg does not give a desired
  652. * value. Moreover, the sum of the util_avgs may be divergent, such
  653. * as when the series is a harmonic series.
  654. *
  655. * To solve this problem, we also cap the util_avg of successive tasks to
  656. * only 1/2 of the left utilization budget:
  657. *
  658. * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
  659. *
  660. * where n denotes the nth task and cpu_scale the CPU capacity.
  661. *
  662. * For example, for a CPU with 1024 of capacity, a simplest series from
  663. * the beginning would be like:
  664. *
  665. * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
  666. * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
  667. *
  668. * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
  669. * if util_avg > util_avg_cap.
  670. */
  671. void post_init_entity_util_avg(struct task_struct *p)
  672. {
  673. struct sched_entity *se = &p->se;
  674. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  675. struct sched_avg *sa = &se->avg;
  676. long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
  677. long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
  678. if (cap > 0) {
  679. if (cfs_rq->avg.util_avg != 0) {
  680. sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
  681. sa->util_avg /= (cfs_rq->avg.load_avg + 1);
  682. if (sa->util_avg > cap)
  683. sa->util_avg = cap;
  684. } else {
  685. sa->util_avg = cap;
  686. }
  687. }
  688. sa->runnable_avg = sa->util_avg;
  689. if (p->sched_class != &fair_sched_class) {
  690. /*
  691. * For !fair tasks do:
  692. *
  693. update_cfs_rq_load_avg(now, cfs_rq);
  694. attach_entity_load_avg(cfs_rq, se);
  695. switched_from_fair(rq, p);
  696. *
  697. * such that the next switched_to_fair() has the
  698. * expected state.
  699. */
  700. se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
  701. return;
  702. }
  703. /* Hook before this se's util is attached to cfs_rq's util */
  704. trace_android_rvh_post_init_entity_util_avg(se);
  705. attach_entity_cfs_rq(se);
  706. }
  707. #else /* !CONFIG_SMP */
  708. void init_entity_runnable_average(struct sched_entity *se)
  709. {
  710. }
  711. void post_init_entity_util_avg(struct task_struct *p)
  712. {
  713. }
  714. static void update_tg_load_avg(struct cfs_rq *cfs_rq)
  715. {
  716. }
  717. #endif /* CONFIG_SMP */
  718. /*
  719. * Update the current task's runtime statistics.
  720. */
  721. static void update_curr(struct cfs_rq *cfs_rq)
  722. {
  723. struct sched_entity *curr = cfs_rq->curr;
  724. u64 now = rq_clock_task(rq_of(cfs_rq));
  725. u64 delta_exec;
  726. if (unlikely(!curr))
  727. return;
  728. delta_exec = now - curr->exec_start;
  729. if (unlikely((s64)delta_exec <= 0))
  730. return;
  731. curr->exec_start = now;
  732. schedstat_set(curr->statistics.exec_max,
  733. max(delta_exec, curr->statistics.exec_max));
  734. curr->sum_exec_runtime += delta_exec;
  735. schedstat_add(cfs_rq->exec_clock, delta_exec);
  736. curr->vruntime += calc_delta_fair(delta_exec, curr);
  737. update_min_vruntime(cfs_rq);
  738. if (entity_is_task(curr)) {
  739. struct task_struct *curtask = task_of(curr);
  740. trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
  741. cgroup_account_cputime(curtask, delta_exec);
  742. account_group_exec_runtime(curtask, delta_exec);
  743. }
  744. account_cfs_rq_runtime(cfs_rq, delta_exec);
  745. }
  746. static void update_curr_fair(struct rq *rq)
  747. {
  748. update_curr(cfs_rq_of(&rq->curr->se));
  749. }
  750. static inline void
  751. update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  752. {
  753. u64 wait_start, prev_wait_start;
  754. if (!schedstat_enabled())
  755. return;
  756. wait_start = rq_clock(rq_of(cfs_rq));
  757. prev_wait_start = schedstat_val(se->statistics.wait_start);
  758. if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
  759. likely(wait_start > prev_wait_start))
  760. wait_start -= prev_wait_start;
  761. __schedstat_set(se->statistics.wait_start, wait_start);
  762. }
  763. static inline void
  764. update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  765. {
  766. struct task_struct *p;
  767. u64 delta;
  768. if (!schedstat_enabled())
  769. return;
  770. delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
  771. if (entity_is_task(se)) {
  772. p = task_of(se);
  773. if (task_on_rq_migrating(p)) {
  774. /*
  775. * Preserve migrating task's wait time so wait_start
  776. * time stamp can be adjusted to accumulate wait time
  777. * prior to migration.
  778. */
  779. __schedstat_set(se->statistics.wait_start, delta);
  780. return;
  781. }
  782. trace_sched_stat_wait(p, delta);
  783. }
  784. __schedstat_set(se->statistics.wait_max,
  785. max(schedstat_val(se->statistics.wait_max), delta));
  786. __schedstat_inc(se->statistics.wait_count);
  787. __schedstat_add(se->statistics.wait_sum, delta);
  788. __schedstat_set(se->statistics.wait_start, 0);
  789. }
  790. static inline void
  791. update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
  792. {
  793. struct task_struct *tsk = NULL;
  794. u64 sleep_start, block_start;
  795. if (!schedstat_enabled())
  796. return;
  797. sleep_start = schedstat_val(se->statistics.sleep_start);
  798. block_start = schedstat_val(se->statistics.block_start);
  799. if (entity_is_task(se))
  800. tsk = task_of(se);
  801. if (sleep_start) {
  802. u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
  803. if ((s64)delta < 0)
  804. delta = 0;
  805. if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
  806. __schedstat_set(se->statistics.sleep_max, delta);
  807. __schedstat_set(se->statistics.sleep_start, 0);
  808. __schedstat_add(se->statistics.sum_sleep_runtime, delta);
  809. if (tsk) {
  810. account_scheduler_latency(tsk, delta >> 10, 1);
  811. trace_sched_stat_sleep(tsk, delta);
  812. }
  813. }
  814. if (block_start) {
  815. u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
  816. if ((s64)delta < 0)
  817. delta = 0;
  818. if (unlikely(delta > schedstat_val(se->statistics.block_max)))
  819. __schedstat_set(se->statistics.block_max, delta);
  820. __schedstat_set(se->statistics.block_start, 0);
  821. __schedstat_add(se->statistics.sum_sleep_runtime, delta);
  822. if (tsk) {
  823. if (tsk->in_iowait) {
  824. __schedstat_add(se->statistics.iowait_sum, delta);
  825. __schedstat_inc(se->statistics.iowait_count);
  826. trace_sched_stat_iowait(tsk, delta);
  827. }
  828. trace_sched_stat_blocked(tsk, delta);
  829. /*
  830. * Blocking time is in units of nanosecs, so shift by
  831. * 20 to get a milliseconds-range estimation of the
  832. * amount of time that the task spent sleeping:
  833. */
  834. if (unlikely(prof_on == SLEEP_PROFILING)) {
  835. profile_hits(SLEEP_PROFILING,
  836. (void *)get_wchan(tsk),
  837. delta >> 20);
  838. }
  839. account_scheduler_latency(tsk, delta >> 10, 0);
  840. }
  841. }
  842. }
  843. /*
  844. * Task is being enqueued - update stats:
  845. */
  846. static inline void
  847. update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  848. {
  849. if (!schedstat_enabled())
  850. return;
  851. /*
  852. * Are we enqueueing a waiting task? (for current tasks
  853. * a dequeue/enqueue event is a NOP)
  854. */
  855. if (se != cfs_rq->curr)
  856. update_stats_wait_start(cfs_rq, se);
  857. if (flags & ENQUEUE_WAKEUP)
  858. update_stats_enqueue_sleeper(cfs_rq, se);
  859. }
  860. static inline void
  861. update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  862. {
  863. if (!schedstat_enabled())
  864. return;
  865. /*
  866. * Mark the end of the wait period if dequeueing a
  867. * waiting task:
  868. */
  869. if (se != cfs_rq->curr)
  870. update_stats_wait_end(cfs_rq, se);
  871. if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
  872. struct task_struct *tsk = task_of(se);
  873. if (tsk->state & TASK_INTERRUPTIBLE)
  874. __schedstat_set(se->statistics.sleep_start,
  875. rq_clock(rq_of(cfs_rq)));
  876. if (tsk->state & TASK_UNINTERRUPTIBLE)
  877. __schedstat_set(se->statistics.block_start,
  878. rq_clock(rq_of(cfs_rq)));
  879. }
  880. }
  881. /*
  882. * We are picking a new current task - update its stats:
  883. */
  884. static inline void
  885. update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  886. {
  887. /*
  888. * We are starting a new run period:
  889. */
  890. se->exec_start = rq_clock_task(rq_of(cfs_rq));
  891. }
  892. /**************************************************
  893. * Scheduling class queueing methods:
  894. */
  895. #ifdef CONFIG_NUMA_BALANCING
  896. /*
  897. * Approximate time to scan a full NUMA task in ms. The task scan period is
  898. * calculated based on the tasks virtual memory size and
  899. * numa_balancing_scan_size.
  900. */
  901. unsigned int sysctl_numa_balancing_scan_period_min = 1000;
  902. unsigned int sysctl_numa_balancing_scan_period_max = 60000;
  903. /* Portion of address space to scan in MB */
  904. unsigned int sysctl_numa_balancing_scan_size = 256;
  905. /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
  906. unsigned int sysctl_numa_balancing_scan_delay = 1000;
  907. struct numa_group {
  908. refcount_t refcount;
  909. spinlock_t lock; /* nr_tasks, tasks */
  910. int nr_tasks;
  911. pid_t gid;
  912. int active_nodes;
  913. struct rcu_head rcu;
  914. unsigned long total_faults;
  915. unsigned long max_faults_cpu;
  916. /*
  917. * Faults_cpu is used to decide whether memory should move
  918. * towards the CPU. As a consequence, these stats are weighted
  919. * more by CPU use than by memory faults.
  920. */
  921. unsigned long *faults_cpu;
  922. unsigned long faults[];
  923. };
  924. /*
  925. * For functions that can be called in multiple contexts that permit reading
  926. * ->numa_group (see struct task_struct for locking rules).
  927. */
  928. static struct numa_group *deref_task_numa_group(struct task_struct *p)
  929. {
  930. return rcu_dereference_check(p->numa_group, p == current ||
  931. (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
  932. }
  933. static struct numa_group *deref_curr_numa_group(struct task_struct *p)
  934. {
  935. return rcu_dereference_protected(p->numa_group, p == current);
  936. }
  937. static inline unsigned long group_faults_priv(struct numa_group *ng);
  938. static inline unsigned long group_faults_shared(struct numa_group *ng);
  939. static unsigned int task_nr_scan_windows(struct task_struct *p)
  940. {
  941. unsigned long rss = 0;
  942. unsigned long nr_scan_pages;
  943. /*
  944. * Calculations based on RSS as non-present and empty pages are skipped
  945. * by the PTE scanner and NUMA hinting faults should be trapped based
  946. * on resident pages
  947. */
  948. nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
  949. rss = get_mm_rss(p->mm);
  950. if (!rss)
  951. rss = nr_scan_pages;
  952. rss = round_up(rss, nr_scan_pages);
  953. return rss / nr_scan_pages;
  954. }
  955. /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
  956. #define MAX_SCAN_WINDOW 2560
  957. static unsigned int task_scan_min(struct task_struct *p)
  958. {
  959. unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
  960. unsigned int scan, floor;
  961. unsigned int windows = 1;
  962. if (scan_size < MAX_SCAN_WINDOW)
  963. windows = MAX_SCAN_WINDOW / scan_size;
  964. floor = 1000 / windows;
  965. scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
  966. return max_t(unsigned int, floor, scan);
  967. }
  968. static unsigned int task_scan_start(struct task_struct *p)
  969. {
  970. unsigned long smin = task_scan_min(p);
  971. unsigned long period = smin;
  972. struct numa_group *ng;
  973. /* Scale the maximum scan period with the amount of shared memory. */
  974. rcu_read_lock();
  975. ng = rcu_dereference(p->numa_group);
  976. if (ng) {
  977. unsigned long shared = group_faults_shared(ng);
  978. unsigned long private = group_faults_priv(ng);
  979. period *= refcount_read(&ng->refcount);
  980. period *= shared + 1;
  981. period /= private + shared + 1;
  982. }
  983. rcu_read_unlock();
  984. return max(smin, period);
  985. }
  986. static unsigned int task_scan_max(struct task_struct *p)
  987. {
  988. unsigned long smin = task_scan_min(p);
  989. unsigned long smax;
  990. struct numa_group *ng;
  991. /* Watch for min being lower than max due to floor calculations */
  992. smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
  993. /* Scale the maximum scan period with the amount of shared memory. */
  994. ng = deref_curr_numa_group(p);
  995. if (ng) {
  996. unsigned long shared = group_faults_shared(ng);
  997. unsigned long private = group_faults_priv(ng);
  998. unsigned long period = smax;
  999. period *= refcount_read(&ng->refcount);
  1000. period *= shared + 1;
  1001. period /= private + shared + 1;
  1002. smax = max(smax, period);
  1003. }
  1004. return max(smin, smax);
  1005. }
  1006. static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
  1007. {
  1008. rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
  1009. rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
  1010. }
  1011. static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
  1012. {
  1013. rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
  1014. rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
  1015. }
  1016. /* Shared or private faults. */
  1017. #define NR_NUMA_HINT_FAULT_TYPES 2
  1018. /* Memory and CPU locality */
  1019. #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
  1020. /* Averaged statistics, and temporary buffers. */
  1021. #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
  1022. pid_t task_numa_group_id(struct task_struct *p)
  1023. {
  1024. struct numa_group *ng;
  1025. pid_t gid = 0;
  1026. rcu_read_lock();
  1027. ng = rcu_dereference(p->numa_group);
  1028. if (ng)
  1029. gid = ng->gid;
  1030. rcu_read_unlock();
  1031. return gid;
  1032. }
  1033. /*
  1034. * The averaged statistics, shared & private, memory & CPU,
  1035. * occupy the first half of the array. The second half of the
  1036. * array is for current counters, which are averaged into the
  1037. * first set by task_numa_placement.
  1038. */
  1039. static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
  1040. {
  1041. return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
  1042. }
  1043. static inline unsigned long task_faults(struct task_struct *p, int nid)
  1044. {
  1045. if (!p->numa_faults)
  1046. return 0;
  1047. return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
  1048. p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
  1049. }
  1050. static inline unsigned long group_faults(struct task_struct *p, int nid)
  1051. {
  1052. struct numa_group *ng = deref_task_numa_group(p);
  1053. if (!ng)
  1054. return 0;
  1055. return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
  1056. ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
  1057. }
  1058. static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
  1059. {
  1060. return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
  1061. group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
  1062. }
  1063. static inline unsigned long group_faults_priv(struct numa_group *ng)
  1064. {
  1065. unsigned long faults = 0;
  1066. int node;
  1067. for_each_online_node(node) {
  1068. faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
  1069. }
  1070. return faults;
  1071. }
  1072. static inline unsigned long group_faults_shared(struct numa_group *ng)
  1073. {
  1074. unsigned long faults = 0;
  1075. int node;
  1076. for_each_online_node(node) {
  1077. faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
  1078. }
  1079. return faults;
  1080. }
  1081. /*
  1082. * A node triggering more than 1/3 as many NUMA faults as the maximum is
  1083. * considered part of a numa group's pseudo-interleaving set. Migrations
  1084. * between these nodes are slowed down, to allow things to settle down.
  1085. */
  1086. #define ACTIVE_NODE_FRACTION 3
  1087. static bool numa_is_active_node(int nid, struct numa_group *ng)
  1088. {
  1089. return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
  1090. }
  1091. /* Handle placement on systems where not all nodes are directly connected. */
  1092. static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
  1093. int maxdist, bool task)
  1094. {
  1095. unsigned long score = 0;
  1096. int node;
  1097. /*
  1098. * All nodes are directly connected, and the same distance
  1099. * from each other. No need for fancy placement algorithms.
  1100. */
  1101. if (sched_numa_topology_type == NUMA_DIRECT)
  1102. return 0;
  1103. /*
  1104. * This code is called for each node, introducing N^2 complexity,
  1105. * which should be ok given the number of nodes rarely exceeds 8.
  1106. */
  1107. for_each_online_node(node) {
  1108. unsigned long faults;
  1109. int dist = node_distance(nid, node);
  1110. /*
  1111. * The furthest away nodes in the system are not interesting
  1112. * for placement; nid was already counted.
  1113. */
  1114. if (dist == sched_max_numa_distance || node == nid)
  1115. continue;
  1116. /*
  1117. * On systems with a backplane NUMA topology, compare groups
  1118. * of nodes, and move tasks towards the group with the most
  1119. * memory accesses. When comparing two nodes at distance
  1120. * "hoplimit", only nodes closer by than "hoplimit" are part
  1121. * of each group. Skip other nodes.
  1122. */
  1123. if (sched_numa_topology_type == NUMA_BACKPLANE &&
  1124. dist >= maxdist)
  1125. continue;
  1126. /* Add up the faults from nearby nodes. */
  1127. if (task)
  1128. faults = task_faults(p, node);
  1129. else
  1130. faults = group_faults(p, node);
  1131. /*
  1132. * On systems with a glueless mesh NUMA topology, there are
  1133. * no fixed "groups of nodes". Instead, nodes that are not
  1134. * directly connected bounce traffic through intermediate
  1135. * nodes; a numa_group can occupy any set of nodes.
  1136. * The further away a node is, the less the faults count.
  1137. * This seems to result in good task placement.
  1138. */
  1139. if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
  1140. faults *= (sched_max_numa_distance - dist);
  1141. faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
  1142. }
  1143. score += faults;
  1144. }
  1145. return score;
  1146. }
  1147. /*
  1148. * These return the fraction of accesses done by a particular task, or
  1149. * task group, on a particular numa node. The group weight is given a
  1150. * larger multiplier, in order to group tasks together that are almost
  1151. * evenly spread out between numa nodes.
  1152. */
  1153. static inline unsigned long task_weight(struct task_struct *p, int nid,
  1154. int dist)
  1155. {
  1156. unsigned long faults, total_faults;
  1157. if (!p->numa_faults)
  1158. return 0;
  1159. total_faults = p->total_numa_faults;
  1160. if (!total_faults)
  1161. return 0;
  1162. faults = task_faults(p, nid);
  1163. faults += score_nearby_nodes(p, nid, dist, true);
  1164. return 1000 * faults / total_faults;
  1165. }
  1166. static inline unsigned long group_weight(struct task_struct *p, int nid,
  1167. int dist)
  1168. {
  1169. struct numa_group *ng = deref_task_numa_group(p);
  1170. unsigned long faults, total_faults;
  1171. if (!ng)
  1172. return 0;
  1173. total_faults = ng->total_faults;
  1174. if (!total_faults)
  1175. return 0;
  1176. faults = group_faults(p, nid);
  1177. faults += score_nearby_nodes(p, nid, dist, false);
  1178. return 1000 * faults / total_faults;
  1179. }
  1180. bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
  1181. int src_nid, int dst_cpu)
  1182. {
  1183. struct numa_group *ng = deref_curr_numa_group(p);
  1184. int dst_nid = cpu_to_node(dst_cpu);
  1185. int last_cpupid, this_cpupid;
  1186. this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
  1187. last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
  1188. /*
  1189. * Allow first faults or private faults to migrate immediately early in
  1190. * the lifetime of a task. The magic number 4 is based on waiting for
  1191. * two full passes of the "multi-stage node selection" test that is
  1192. * executed below.
  1193. */
  1194. if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
  1195. (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
  1196. return true;
  1197. /*
  1198. * Multi-stage node selection is used in conjunction with a periodic
  1199. * migration fault to build a temporal task<->page relation. By using
  1200. * a two-stage filter we remove short/unlikely relations.
  1201. *
  1202. * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
  1203. * a task's usage of a particular page (n_p) per total usage of this
  1204. * page (n_t) (in a given time-span) to a probability.
  1205. *
  1206. * Our periodic faults will sample this probability and getting the
  1207. * same result twice in a row, given these samples are fully
  1208. * independent, is then given by P(n)^2, provided our sample period
  1209. * is sufficiently short compared to the usage pattern.
  1210. *
  1211. * This quadric squishes small probabilities, making it less likely we
  1212. * act on an unlikely task<->page relation.
  1213. */
  1214. if (!cpupid_pid_unset(last_cpupid) &&
  1215. cpupid_to_nid(last_cpupid) != dst_nid)
  1216. return false;
  1217. /* Always allow migrate on private faults */
  1218. if (cpupid_match_pid(p, last_cpupid))
  1219. return true;
  1220. /* A shared fault, but p->numa_group has not been set up yet. */
  1221. if (!ng)
  1222. return true;
  1223. /*
  1224. * Destination node is much more heavily used than the source
  1225. * node? Allow migration.
  1226. */
  1227. if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
  1228. ACTIVE_NODE_FRACTION)
  1229. return true;
  1230. /*
  1231. * Distribute memory according to CPU & memory use on each node,
  1232. * with 3/4 hysteresis to avoid unnecessary memory migrations:
  1233. *
  1234. * faults_cpu(dst) 3 faults_cpu(src)
  1235. * --------------- * - > ---------------
  1236. * faults_mem(dst) 4 faults_mem(src)
  1237. */
  1238. return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
  1239. group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
  1240. }
  1241. /*
  1242. * 'numa_type' describes the node at the moment of load balancing.
  1243. */
  1244. enum numa_type {
  1245. /* The node has spare capacity that can be used to run more tasks. */
  1246. node_has_spare = 0,
  1247. /*
  1248. * The node is fully used and the tasks don't compete for more CPU
  1249. * cycles. Nevertheless, some tasks might wait before running.
  1250. */
  1251. node_fully_busy,
  1252. /*
  1253. * The node is overloaded and can't provide expected CPU cycles to all
  1254. * tasks.
  1255. */
  1256. node_overloaded
  1257. };
  1258. /* Cached statistics for all CPUs within a node */
  1259. struct numa_stats {
  1260. unsigned long load;
  1261. unsigned long runnable;
  1262. unsigned long util;
  1263. /* Total compute capacity of CPUs on a node */
  1264. unsigned long compute_capacity;
  1265. unsigned int nr_running;
  1266. unsigned int weight;
  1267. enum numa_type node_type;
  1268. int idle_cpu;
  1269. };
  1270. static inline bool is_core_idle(int cpu)
  1271. {
  1272. #ifdef CONFIG_SCHED_SMT
  1273. int sibling;
  1274. for_each_cpu(sibling, cpu_smt_mask(cpu)) {
  1275. if (cpu == sibling)
  1276. continue;
  1277. if (!idle_cpu(sibling))
  1278. return false;
  1279. }
  1280. #endif
  1281. return true;
  1282. }
  1283. struct task_numa_env {
  1284. struct task_struct *p;
  1285. int src_cpu, src_nid;
  1286. int dst_cpu, dst_nid;
  1287. struct numa_stats src_stats, dst_stats;
  1288. int imbalance_pct;
  1289. int dist;
  1290. struct task_struct *best_task;
  1291. long best_imp;
  1292. int best_cpu;
  1293. };
  1294. static unsigned long cpu_load(struct rq *rq);
  1295. static unsigned long cpu_runnable(struct rq *rq);
  1296. static unsigned long cpu_util(int cpu);
  1297. static inline long adjust_numa_imbalance(int imbalance, int nr_running);
  1298. static inline enum
  1299. numa_type numa_classify(unsigned int imbalance_pct,
  1300. struct numa_stats *ns)
  1301. {
  1302. if ((ns->nr_running > ns->weight) &&
  1303. (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
  1304. ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
  1305. return node_overloaded;
  1306. if ((ns->nr_running < ns->weight) ||
  1307. (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
  1308. ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
  1309. return node_has_spare;
  1310. return node_fully_busy;
  1311. }
  1312. #ifdef CONFIG_SCHED_SMT
  1313. /* Forward declarations of select_idle_sibling helpers */
  1314. static inline bool test_idle_cores(int cpu, bool def);
  1315. static inline int numa_idle_core(int idle_core, int cpu)
  1316. {
  1317. if (!static_branch_likely(&sched_smt_present) ||
  1318. idle_core >= 0 || !test_idle_cores(cpu, false))
  1319. return idle_core;
  1320. /*
  1321. * Prefer cores instead of packing HT siblings
  1322. * and triggering future load balancing.
  1323. */
  1324. if (is_core_idle(cpu))
  1325. idle_core = cpu;
  1326. return idle_core;
  1327. }
  1328. #else
  1329. static inline int numa_idle_core(int idle_core, int cpu)
  1330. {
  1331. return idle_core;
  1332. }
  1333. #endif
  1334. /*
  1335. * Gather all necessary information to make NUMA balancing placement
  1336. * decisions that are compatible with standard load balancer. This
  1337. * borrows code and logic from update_sg_lb_stats but sharing a
  1338. * common implementation is impractical.
  1339. */
  1340. static void update_numa_stats(struct task_numa_env *env,
  1341. struct numa_stats *ns, int nid,
  1342. bool find_idle)
  1343. {
  1344. int cpu, idle_core = -1;
  1345. memset(ns, 0, sizeof(*ns));
  1346. ns->idle_cpu = -1;
  1347. rcu_read_lock();
  1348. for_each_cpu(cpu, cpumask_of_node(nid)) {
  1349. struct rq *rq = cpu_rq(cpu);
  1350. ns->load += cpu_load(rq);
  1351. ns->runnable += cpu_runnable(rq);
  1352. ns->util += cpu_util(cpu);
  1353. ns->nr_running += rq->cfs.h_nr_running;
  1354. ns->compute_capacity += capacity_of(cpu);
  1355. if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
  1356. if (READ_ONCE(rq->numa_migrate_on) ||
  1357. !cpumask_test_cpu(cpu, env->p->cpus_ptr))
  1358. continue;
  1359. if (ns->idle_cpu == -1)
  1360. ns->idle_cpu = cpu;
  1361. idle_core = numa_idle_core(idle_core, cpu);
  1362. }
  1363. }
  1364. rcu_read_unlock();
  1365. ns->weight = cpumask_weight(cpumask_of_node(nid));
  1366. ns->node_type = numa_classify(env->imbalance_pct, ns);
  1367. if (idle_core >= 0)
  1368. ns->idle_cpu = idle_core;
  1369. }
  1370. static void task_numa_assign(struct task_numa_env *env,
  1371. struct task_struct *p, long imp)
  1372. {
  1373. struct rq *rq = cpu_rq(env->dst_cpu);
  1374. /* Check if run-queue part of active NUMA balance. */
  1375. if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
  1376. int cpu;
  1377. int start = env->dst_cpu;
  1378. /* Find alternative idle CPU. */
  1379. for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) {
  1380. if (cpu == env->best_cpu || !idle_cpu(cpu) ||
  1381. !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
  1382. continue;
  1383. }
  1384. env->dst_cpu = cpu;
  1385. rq = cpu_rq(env->dst_cpu);
  1386. if (!xchg(&rq->numa_migrate_on, 1))
  1387. goto assign;
  1388. }
  1389. /* Failed to find an alternative idle CPU */
  1390. return;
  1391. }
  1392. assign:
  1393. /*
  1394. * Clear previous best_cpu/rq numa-migrate flag, since task now
  1395. * found a better CPU to move/swap.
  1396. */
  1397. if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
  1398. rq = cpu_rq(env->best_cpu);
  1399. WRITE_ONCE(rq->numa_migrate_on, 0);
  1400. }
  1401. if (env->best_task)
  1402. put_task_struct(env->best_task);
  1403. if (p)
  1404. get_task_struct(p);
  1405. env->best_task = p;
  1406. env->best_imp = imp;
  1407. env->best_cpu = env->dst_cpu;
  1408. }
  1409. static bool load_too_imbalanced(long src_load, long dst_load,
  1410. struct task_numa_env *env)
  1411. {
  1412. long imb, old_imb;
  1413. long orig_src_load, orig_dst_load;
  1414. long src_capacity, dst_capacity;
  1415. /*
  1416. * The load is corrected for the CPU capacity available on each node.
  1417. *
  1418. * src_load dst_load
  1419. * ------------ vs ---------
  1420. * src_capacity dst_capacity
  1421. */
  1422. src_capacity = env->src_stats.compute_capacity;
  1423. dst_capacity = env->dst_stats.compute_capacity;
  1424. imb = abs(dst_load * src_capacity - src_load * dst_capacity);
  1425. orig_src_load = env->src_stats.load;
  1426. orig_dst_load = env->dst_stats.load;
  1427. old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
  1428. /* Would this change make things worse? */
  1429. return (imb > old_imb);
  1430. }
  1431. /*
  1432. * Maximum NUMA importance can be 1998 (2*999);
  1433. * SMALLIMP @ 30 would be close to 1998/64.
  1434. * Used to deter task migration.
  1435. */
  1436. #define SMALLIMP 30
  1437. /*
  1438. * This checks if the overall compute and NUMA accesses of the system would
  1439. * be improved if the source tasks was migrated to the target dst_cpu taking
  1440. * into account that it might be best if task running on the dst_cpu should
  1441. * be exchanged with the source task
  1442. */
  1443. static bool task_numa_compare(struct task_numa_env *env,
  1444. long taskimp, long groupimp, bool maymove)
  1445. {
  1446. struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
  1447. struct rq *dst_rq = cpu_rq(env->dst_cpu);
  1448. long imp = p_ng ? groupimp : taskimp;
  1449. struct task_struct *cur;
  1450. long src_load, dst_load;
  1451. int dist = env->dist;
  1452. long moveimp = imp;
  1453. long load;
  1454. bool stopsearch = false;
  1455. if (READ_ONCE(dst_rq->numa_migrate_on))
  1456. return false;
  1457. rcu_read_lock();
  1458. cur = rcu_dereference(dst_rq->curr);
  1459. if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
  1460. cur = NULL;
  1461. /*
  1462. * Because we have preemption enabled we can get migrated around and
  1463. * end try selecting ourselves (current == env->p) as a swap candidate.
  1464. */
  1465. if (cur == env->p) {
  1466. stopsearch = true;
  1467. goto unlock;
  1468. }
  1469. if (!cur) {
  1470. if (maymove && moveimp >= env->best_imp)
  1471. goto assign;
  1472. else
  1473. goto unlock;
  1474. }
  1475. /* Skip this swap candidate if cannot move to the source cpu. */
  1476. if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
  1477. goto unlock;
  1478. /*
  1479. * Skip this swap candidate if it is not moving to its preferred
  1480. * node and the best task is.
  1481. */
  1482. if (env->best_task &&
  1483. env->best_task->numa_preferred_nid == env->src_nid &&
  1484. cur->numa_preferred_nid != env->src_nid) {
  1485. goto unlock;
  1486. }
  1487. /*
  1488. * "imp" is the fault differential for the source task between the
  1489. * source and destination node. Calculate the total differential for
  1490. * the source task and potential destination task. The more negative
  1491. * the value is, the more remote accesses that would be expected to
  1492. * be incurred if the tasks were swapped.
  1493. *
  1494. * If dst and source tasks are in the same NUMA group, or not
  1495. * in any group then look only at task weights.
  1496. */
  1497. cur_ng = rcu_dereference(cur->numa_group);
  1498. if (cur_ng == p_ng) {
  1499. imp = taskimp + task_weight(cur, env->src_nid, dist) -
  1500. task_weight(cur, env->dst_nid, dist);
  1501. /*
  1502. * Add some hysteresis to prevent swapping the
  1503. * tasks within a group over tiny differences.
  1504. */
  1505. if (cur_ng)
  1506. imp -= imp / 16;
  1507. } else {
  1508. /*
  1509. * Compare the group weights. If a task is all by itself
  1510. * (not part of a group), use the task weight instead.
  1511. */
  1512. if (cur_ng && p_ng)
  1513. imp += group_weight(cur, env->src_nid, dist) -
  1514. group_weight(cur, env->dst_nid, dist);
  1515. else
  1516. imp += task_weight(cur, env->src_nid, dist) -
  1517. task_weight(cur, env->dst_nid, dist);
  1518. }
  1519. /* Discourage picking a task already on its preferred node */
  1520. if (cur->numa_preferred_nid == env->dst_nid)
  1521. imp -= imp / 16;
  1522. /*
  1523. * Encourage picking a task that moves to its preferred node.
  1524. * This potentially makes imp larger than it's maximum of
  1525. * 1998 (see SMALLIMP and task_weight for why) but in this
  1526. * case, it does not matter.
  1527. */
  1528. if (cur->numa_preferred_nid == env->src_nid)
  1529. imp += imp / 8;
  1530. if (maymove && moveimp > imp && moveimp > env->best_imp) {
  1531. imp = moveimp;
  1532. cur = NULL;
  1533. goto assign;
  1534. }
  1535. /*
  1536. * Prefer swapping with a task moving to its preferred node over a
  1537. * task that is not.
  1538. */
  1539. if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
  1540. env->best_task->numa_preferred_nid != env->src_nid) {
  1541. goto assign;
  1542. }
  1543. /*
  1544. * If the NUMA importance is less than SMALLIMP,
  1545. * task migration might only result in ping pong
  1546. * of tasks and also hurt performance due to cache
  1547. * misses.
  1548. */
  1549. if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
  1550. goto unlock;
  1551. /*
  1552. * In the overloaded case, try and keep the load balanced.
  1553. */
  1554. load = task_h_load(env->p) - task_h_load(cur);
  1555. if (!load)
  1556. goto assign;
  1557. dst_load = env->dst_stats.load + load;
  1558. src_load = env->src_stats.load - load;
  1559. if (load_too_imbalanced(src_load, dst_load, env))
  1560. goto unlock;
  1561. assign:
  1562. /* Evaluate an idle CPU for a task numa move. */
  1563. if (!cur) {
  1564. int cpu = env->dst_stats.idle_cpu;
  1565. /* Nothing cached so current CPU went idle since the search. */
  1566. if (cpu < 0)
  1567. cpu = env->dst_cpu;
  1568. /*
  1569. * If the CPU is no longer truly idle and the previous best CPU
  1570. * is, keep using it.
  1571. */
  1572. if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
  1573. idle_cpu(env->best_cpu)) {
  1574. cpu = env->best_cpu;
  1575. }
  1576. env->dst_cpu = cpu;
  1577. }
  1578. task_numa_assign(env, cur, imp);
  1579. /*
  1580. * If a move to idle is allowed because there is capacity or load
  1581. * balance improves then stop the search. While a better swap
  1582. * candidate may exist, a search is not free.
  1583. */
  1584. if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
  1585. stopsearch = true;
  1586. /*
  1587. * If a swap candidate must be identified and the current best task
  1588. * moves its preferred node then stop the search.
  1589. */
  1590. if (!maymove && env->best_task &&
  1591. env->best_task->numa_preferred_nid == env->src_nid) {
  1592. stopsearch = true;
  1593. }
  1594. unlock:
  1595. rcu_read_unlock();
  1596. return stopsearch;
  1597. }
  1598. static void task_numa_find_cpu(struct task_numa_env *env,
  1599. long taskimp, long groupimp)
  1600. {
  1601. bool maymove = false;
  1602. int cpu;
  1603. /*
  1604. * If dst node has spare capacity, then check if there is an
  1605. * imbalance that would be overruled by the load balancer.
  1606. */
  1607. if (env->dst_stats.node_type == node_has_spare) {
  1608. unsigned int imbalance;
  1609. int src_running, dst_running;
  1610. /*
  1611. * Would movement cause an imbalance? Note that if src has
  1612. * more running tasks that the imbalance is ignored as the
  1613. * move improves the imbalance from the perspective of the
  1614. * CPU load balancer.
  1615. * */
  1616. src_running = env->src_stats.nr_running - 1;
  1617. dst_running = env->dst_stats.nr_running + 1;
  1618. imbalance = max(0, dst_running - src_running);
  1619. imbalance = adjust_numa_imbalance(imbalance, dst_running);
  1620. /* Use idle CPU if there is no imbalance */
  1621. if (!imbalance) {
  1622. maymove = true;
  1623. if (env->dst_stats.idle_cpu >= 0) {
  1624. env->dst_cpu = env->dst_stats.idle_cpu;
  1625. task_numa_assign(env, NULL, 0);
  1626. return;
  1627. }
  1628. }
  1629. } else {
  1630. long src_load, dst_load, load;
  1631. /*
  1632. * If the improvement from just moving env->p direction is better
  1633. * than swapping tasks around, check if a move is possible.
  1634. */
  1635. load = task_h_load(env->p);
  1636. dst_load = env->dst_stats.load + load;
  1637. src_load = env->src_stats.load - load;
  1638. maymove = !load_too_imbalanced(src_load, dst_load, env);
  1639. }
  1640. for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
  1641. /* Skip this CPU if the source task cannot migrate */
  1642. if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
  1643. continue;
  1644. env->dst_cpu = cpu;
  1645. if (task_numa_compare(env, taskimp, groupimp, maymove))
  1646. break;
  1647. }
  1648. }
  1649. static int task_numa_migrate(struct task_struct *p)
  1650. {
  1651. struct task_numa_env env = {
  1652. .p = p,
  1653. .src_cpu = task_cpu(p),
  1654. .src_nid = task_node(p),
  1655. .imbalance_pct = 112,
  1656. .best_task = NULL,
  1657. .best_imp = 0,
  1658. .best_cpu = -1,
  1659. };
  1660. unsigned long taskweight, groupweight;
  1661. struct sched_domain *sd;
  1662. long taskimp, groupimp;
  1663. struct numa_group *ng;
  1664. struct rq *best_rq;
  1665. int nid, ret, dist;
  1666. /*
  1667. * Pick the lowest SD_NUMA domain, as that would have the smallest
  1668. * imbalance and would be the first to start moving tasks about.
  1669. *
  1670. * And we want to avoid any moving of tasks about, as that would create
  1671. * random movement of tasks -- counter the numa conditions we're trying
  1672. * to satisfy here.
  1673. */
  1674. rcu_read_lock();
  1675. sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
  1676. if (sd)
  1677. env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
  1678. rcu_read_unlock();
  1679. /*
  1680. * Cpusets can break the scheduler domain tree into smaller
  1681. * balance domains, some of which do not cross NUMA boundaries.
  1682. * Tasks that are "trapped" in such domains cannot be migrated
  1683. * elsewhere, so there is no point in (re)trying.
  1684. */
  1685. if (unlikely(!sd)) {
  1686. sched_setnuma(p, task_node(p));
  1687. return -EINVAL;
  1688. }
  1689. env.dst_nid = p->numa_preferred_nid;
  1690. dist = env.dist = node_distance(env.src_nid, env.dst_nid);
  1691. taskweight = task_weight(p, env.src_nid, dist);
  1692. groupweight = group_weight(p, env.src_nid, dist);
  1693. update_numa_stats(&env, &env.src_stats, env.src_nid, false);
  1694. taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
  1695. groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
  1696. update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
  1697. /* Try to find a spot on the preferred nid. */
  1698. task_numa_find_cpu(&env, taskimp, groupimp);
  1699. /*
  1700. * Look at other nodes in these cases:
  1701. * - there is no space available on the preferred_nid
  1702. * - the task is part of a numa_group that is interleaved across
  1703. * multiple NUMA nodes; in order to better consolidate the group,
  1704. * we need to check other locations.
  1705. */
  1706. ng = deref_curr_numa_group(p);
  1707. if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
  1708. for_each_online_node(nid) {
  1709. if (nid == env.src_nid || nid == p->numa_preferred_nid)
  1710. continue;
  1711. dist = node_distance(env.src_nid, env.dst_nid);
  1712. if (sched_numa_topology_type == NUMA_BACKPLANE &&
  1713. dist != env.dist) {
  1714. taskweight = task_weight(p, env.src_nid, dist);
  1715. groupweight = group_weight(p, env.src_nid, dist);
  1716. }
  1717. /* Only consider nodes where both task and groups benefit */
  1718. taskimp = task_weight(p, nid, dist) - taskweight;
  1719. groupimp = group_weight(p, nid, dist) - groupweight;
  1720. if (taskimp < 0 && groupimp < 0)
  1721. continue;
  1722. env.dist = dist;
  1723. env.dst_nid = nid;
  1724. update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
  1725. task_numa_find_cpu(&env, taskimp, groupimp);
  1726. }
  1727. }
  1728. /*
  1729. * If the task is part of a workload that spans multiple NUMA nodes,
  1730. * and is migrating into one of the workload's active nodes, remember
  1731. * this node as the task's preferred numa node, so the workload can
  1732. * settle down.
  1733. * A task that migrated to a second choice node will be better off
  1734. * trying for a better one later. Do not set the preferred node here.
  1735. */
  1736. if (ng) {
  1737. if (env.best_cpu == -1)
  1738. nid = env.src_nid;
  1739. else
  1740. nid = cpu_to_node(env.best_cpu);
  1741. if (nid != p->numa_preferred_nid)
  1742. sched_setnuma(p, nid);
  1743. }
  1744. /* No better CPU than the current one was found. */
  1745. if (env.best_cpu == -1) {
  1746. trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
  1747. return -EAGAIN;
  1748. }
  1749. best_rq = cpu_rq(env.best_cpu);
  1750. if (env.best_task == NULL) {
  1751. ret = migrate_task_to(p, env.best_cpu);
  1752. WRITE_ONCE(best_rq->numa_migrate_on, 0);
  1753. if (ret != 0)
  1754. trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
  1755. return ret;
  1756. }
  1757. ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
  1758. WRITE_ONCE(best_rq->numa_migrate_on, 0);
  1759. if (ret != 0)
  1760. trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
  1761. put_task_struct(env.best_task);
  1762. return ret;
  1763. }
  1764. /* Attempt to migrate a task to a CPU on the preferred node. */
  1765. static void numa_migrate_preferred(struct task_struct *p)
  1766. {
  1767. unsigned long interval = HZ;
  1768. /* This task has no NUMA fault statistics yet */
  1769. if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
  1770. return;
  1771. /* Periodically retry migrating the task to the preferred node */
  1772. interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
  1773. p->numa_migrate_retry = jiffies + interval;
  1774. /* Success if task is already running on preferred CPU */
  1775. if (task_node(p) == p->numa_preferred_nid)
  1776. return;
  1777. /* Otherwise, try migrate to a CPU on the preferred node */
  1778. task_numa_migrate(p);
  1779. }
  1780. /*
  1781. * Find out how many nodes on the workload is actively running on. Do this by
  1782. * tracking the nodes from which NUMA hinting faults are triggered. This can
  1783. * be different from the set of nodes where the workload's memory is currently
  1784. * located.
  1785. */
  1786. static void numa_group_count_active_nodes(struct numa_group *numa_group)
  1787. {
  1788. unsigned long faults, max_faults = 0;
  1789. int nid, active_nodes = 0;
  1790. for_each_online_node(nid) {
  1791. faults = group_faults_cpu(numa_group, nid);
  1792. if (faults > max_faults)
  1793. max_faults = faults;
  1794. }
  1795. for_each_online_node(nid) {
  1796. faults = group_faults_cpu(numa_group, nid);
  1797. if (faults * ACTIVE_NODE_FRACTION > max_faults)
  1798. active_nodes++;
  1799. }
  1800. numa_group->max_faults_cpu = max_faults;
  1801. numa_group->active_nodes = active_nodes;
  1802. }
  1803. /*
  1804. * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
  1805. * increments. The more local the fault statistics are, the higher the scan
  1806. * period will be for the next scan window. If local/(local+remote) ratio is
  1807. * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
  1808. * the scan period will decrease. Aim for 70% local accesses.
  1809. */
  1810. #define NUMA_PERIOD_SLOTS 10
  1811. #define NUMA_PERIOD_THRESHOLD 7
  1812. /*
  1813. * Increase the scan period (slow down scanning) if the majority of
  1814. * our memory is already on our local node, or if the majority of
  1815. * the page accesses are shared with other processes.
  1816. * Otherwise, decrease the scan period.
  1817. */
  1818. static void update_task_scan_period(struct task_struct *p,
  1819. unsigned long shared, unsigned long private)
  1820. {
  1821. unsigned int period_slot;
  1822. int lr_ratio, ps_ratio;
  1823. int diff;
  1824. unsigned long remote = p->numa_faults_locality[0];
  1825. unsigned long local = p->numa_faults_locality[1];
  1826. /*
  1827. * If there were no record hinting faults then either the task is
  1828. * completely idle or all activity is areas that are not of interest
  1829. * to automatic numa balancing. Related to that, if there were failed
  1830. * migration then it implies we are migrating too quickly or the local
  1831. * node is overloaded. In either case, scan slower
  1832. */
  1833. if (local + shared == 0 || p->numa_faults_locality[2]) {
  1834. p->numa_scan_period = min(p->numa_scan_period_max,
  1835. p->numa_scan_period << 1);
  1836. p->mm->numa_next_scan = jiffies +
  1837. msecs_to_jiffies(p->numa_scan_period);
  1838. return;
  1839. }
  1840. /*
  1841. * Prepare to scale scan period relative to the current period.
  1842. * == NUMA_PERIOD_THRESHOLD scan period stays the same
  1843. * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
  1844. * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
  1845. */
  1846. period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
  1847. lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
  1848. ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
  1849. if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
  1850. /*
  1851. * Most memory accesses are local. There is no need to
  1852. * do fast NUMA scanning, since memory is already local.
  1853. */
  1854. int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
  1855. if (!slot)
  1856. slot = 1;
  1857. diff = slot * period_slot;
  1858. } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
  1859. /*
  1860. * Most memory accesses are shared with other tasks.
  1861. * There is no point in continuing fast NUMA scanning,
  1862. * since other tasks may just move the memory elsewhere.
  1863. */
  1864. int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
  1865. if (!slot)
  1866. slot = 1;
  1867. diff = slot * period_slot;
  1868. } else {
  1869. /*
  1870. * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
  1871. * yet they are not on the local NUMA node. Speed up
  1872. * NUMA scanning to get the memory moved over.
  1873. */
  1874. int ratio = max(lr_ratio, ps_ratio);
  1875. diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
  1876. }
  1877. p->numa_scan_period = clamp(p->numa_scan_period + diff,
  1878. task_scan_min(p), task_scan_max(p));
  1879. memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
  1880. }
  1881. /*
  1882. * Get the fraction of time the task has been running since the last
  1883. * NUMA placement cycle. The scheduler keeps similar statistics, but
  1884. * decays those on a 32ms period, which is orders of magnitude off
  1885. * from the dozens-of-seconds NUMA balancing period. Use the scheduler
  1886. * stats only if the task is so new there are no NUMA statistics yet.
  1887. */
  1888. static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
  1889. {
  1890. u64 runtime, delta, now;
  1891. /* Use the start of this time slice to avoid calculations. */
  1892. now = p->se.exec_start;
  1893. runtime = p->se.sum_exec_runtime;
  1894. if (p->last_task_numa_placement) {
  1895. delta = runtime - p->last_sum_exec_runtime;
  1896. *period = now - p->last_task_numa_placement;
  1897. /* Avoid time going backwards, prevent potential divide error: */
  1898. if (unlikely((s64)*period < 0))
  1899. *period = 0;
  1900. } else {
  1901. delta = p->se.avg.load_sum;
  1902. *period = LOAD_AVG_MAX;
  1903. }
  1904. p->last_sum_exec_runtime = runtime;
  1905. p->last_task_numa_placement = now;
  1906. return delta;
  1907. }
  1908. /*
  1909. * Determine the preferred nid for a task in a numa_group. This needs to
  1910. * be done in a way that produces consistent results with group_weight,
  1911. * otherwise workloads might not converge.
  1912. */
  1913. static int preferred_group_nid(struct task_struct *p, int nid)
  1914. {
  1915. nodemask_t nodes;
  1916. int dist;
  1917. /* Direct connections between all NUMA nodes. */
  1918. if (sched_numa_topology_type == NUMA_DIRECT)
  1919. return nid;
  1920. /*
  1921. * On a system with glueless mesh NUMA topology, group_weight
  1922. * scores nodes according to the number of NUMA hinting faults on
  1923. * both the node itself, and on nearby nodes.
  1924. */
  1925. if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
  1926. unsigned long score, max_score = 0;
  1927. int node, max_node = nid;
  1928. dist = sched_max_numa_distance;
  1929. for_each_online_node(node) {
  1930. score = group_weight(p, node, dist);
  1931. if (score > max_score) {
  1932. max_score = score;
  1933. max_node = node;
  1934. }
  1935. }
  1936. return max_node;
  1937. }
  1938. /*
  1939. * Finding the preferred nid in a system with NUMA backplane
  1940. * interconnect topology is more involved. The goal is to locate
  1941. * tasks from numa_groups near each other in the system, and
  1942. * untangle workloads from different sides of the system. This requires
  1943. * searching down the hierarchy of node groups, recursively searching
  1944. * inside the highest scoring group of nodes. The nodemask tricks
  1945. * keep the complexity of the search down.
  1946. */
  1947. nodes = node_online_map;
  1948. for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
  1949. unsigned long max_faults = 0;
  1950. nodemask_t max_group = NODE_MASK_NONE;
  1951. int a, b;
  1952. /* Are there nodes at this distance from each other? */
  1953. if (!find_numa_distance(dist))
  1954. continue;
  1955. for_each_node_mask(a, nodes) {
  1956. unsigned long faults = 0;
  1957. nodemask_t this_group;
  1958. nodes_clear(this_group);
  1959. /* Sum group's NUMA faults; includes a==b case. */
  1960. for_each_node_mask(b, nodes) {
  1961. if (node_distance(a, b) < dist) {
  1962. faults += group_faults(p, b);
  1963. node_set(b, this_group);
  1964. node_clear(b, nodes);
  1965. }
  1966. }
  1967. /* Remember the top group. */
  1968. if (faults > max_faults) {
  1969. max_faults = faults;
  1970. max_group = this_group;
  1971. /*
  1972. * subtle: at the smallest distance there is
  1973. * just one node left in each "group", the
  1974. * winner is the preferred nid.
  1975. */
  1976. nid = a;
  1977. }
  1978. }
  1979. /* Next round, evaluate the nodes within max_group. */
  1980. if (!max_faults)
  1981. break;
  1982. nodes = max_group;
  1983. }
  1984. return nid;
  1985. }
  1986. static void task_numa_placement(struct task_struct *p)
  1987. {
  1988. int seq, nid, max_nid = NUMA_NO_NODE;
  1989. unsigned long max_faults = 0;
  1990. unsigned long fault_types[2] = { 0, 0 };
  1991. unsigned long total_faults;
  1992. u64 runtime, period;
  1993. spinlock_t *group_lock = NULL;
  1994. struct numa_group *ng;
  1995. /*
  1996. * The p->mm->numa_scan_seq field gets updated without
  1997. * exclusive access. Use READ_ONCE() here to ensure
  1998. * that the field is read in a single access:
  1999. */
  2000. seq = READ_ONCE(p->mm->numa_scan_seq);
  2001. if (p->numa_scan_seq == seq)
  2002. return;
  2003. p->numa_scan_seq = seq;
  2004. p->numa_scan_period_max = task_scan_max(p);
  2005. total_faults = p->numa_faults_locality[0] +
  2006. p->numa_faults_locality[1];
  2007. runtime = numa_get_avg_runtime(p, &period);
  2008. /* If the task is part of a group prevent parallel updates to group stats */
  2009. ng = deref_curr_numa_group(p);
  2010. if (ng) {
  2011. group_lock = &ng->lock;
  2012. spin_lock_irq(group_lock);
  2013. }
  2014. /* Find the node with the highest number of faults */
  2015. for_each_online_node(nid) {
  2016. /* Keep track of the offsets in numa_faults array */
  2017. int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
  2018. unsigned long faults = 0, group_faults = 0;
  2019. int priv;
  2020. for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
  2021. long diff, f_diff, f_weight;
  2022. mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
  2023. membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
  2024. cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
  2025. cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
  2026. /* Decay existing window, copy faults since last scan */
  2027. diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
  2028. fault_types[priv] += p->numa_faults[membuf_idx];
  2029. p->numa_faults[membuf_idx] = 0;
  2030. /*
  2031. * Normalize the faults_from, so all tasks in a group
  2032. * count according to CPU use, instead of by the raw
  2033. * number of faults. Tasks with little runtime have
  2034. * little over-all impact on throughput, and thus their
  2035. * faults are less important.
  2036. */
  2037. f_weight = div64_u64(runtime << 16, period + 1);
  2038. f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
  2039. (total_faults + 1);
  2040. f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
  2041. p->numa_faults[cpubuf_idx] = 0;
  2042. p->numa_faults[mem_idx] += diff;
  2043. p->numa_faults[cpu_idx] += f_diff;
  2044. faults += p->numa_faults[mem_idx];
  2045. p->total_numa_faults += diff;
  2046. if (ng) {
  2047. /*
  2048. * safe because we can only change our own group
  2049. *
  2050. * mem_idx represents the offset for a given
  2051. * nid and priv in a specific region because it
  2052. * is at the beginning of the numa_faults array.
  2053. */
  2054. ng->faults[mem_idx] += diff;
  2055. ng->faults_cpu[mem_idx] += f_diff;
  2056. ng->total_faults += diff;
  2057. group_faults += ng->faults[mem_idx];
  2058. }
  2059. }
  2060. if (!ng) {
  2061. if (faults > max_faults) {
  2062. max_faults = faults;
  2063. max_nid = nid;
  2064. }
  2065. } else if (group_faults > max_faults) {
  2066. max_faults = group_faults;
  2067. max_nid = nid;
  2068. }
  2069. }
  2070. if (ng) {
  2071. numa_group_count_active_nodes(ng);
  2072. spin_unlock_irq(group_lock);
  2073. max_nid = preferred_group_nid(p, max_nid);
  2074. }
  2075. if (max_faults) {
  2076. /* Set the new preferred node */
  2077. if (max_nid != p->numa_preferred_nid)
  2078. sched_setnuma(p, max_nid);
  2079. }
  2080. update_task_scan_period(p, fault_types[0], fault_types[1]);
  2081. }
  2082. static inline int get_numa_group(struct numa_group *grp)
  2083. {
  2084. return refcount_inc_not_zero(&grp->refcount);
  2085. }
  2086. static inline void put_numa_group(struct numa_group *grp)
  2087. {
  2088. if (refcount_dec_and_test(&grp->refcount))
  2089. kfree_rcu(grp, rcu);
  2090. }
  2091. static void task_numa_group(struct task_struct *p, int cpupid, int flags,
  2092. int *priv)
  2093. {
  2094. struct numa_group *grp, *my_grp;
  2095. struct task_struct *tsk;
  2096. bool join = false;
  2097. int cpu = cpupid_to_cpu(cpupid);
  2098. int i;
  2099. if (unlikely(!deref_curr_numa_group(p))) {
  2100. unsigned int size = sizeof(struct numa_group) +
  2101. 4*nr_node_ids*sizeof(unsigned long);
  2102. grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
  2103. if (!grp)
  2104. return;
  2105. refcount_set(&grp->refcount, 1);
  2106. grp->active_nodes = 1;
  2107. grp->max_faults_cpu = 0;
  2108. spin_lock_init(&grp->lock);
  2109. grp->gid = p->pid;
  2110. /* Second half of the array tracks nids where faults happen */
  2111. grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
  2112. nr_node_ids;
  2113. for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
  2114. grp->faults[i] = p->numa_faults[i];
  2115. grp->total_faults = p->total_numa_faults;
  2116. grp->nr_tasks++;
  2117. rcu_assign_pointer(p->numa_group, grp);
  2118. }
  2119. rcu_read_lock();
  2120. tsk = READ_ONCE(cpu_rq(cpu)->curr);
  2121. if (!cpupid_match_pid(tsk, cpupid))
  2122. goto no_join;
  2123. grp = rcu_dereference(tsk->numa_group);
  2124. if (!grp)
  2125. goto no_join;
  2126. my_grp = deref_curr_numa_group(p);
  2127. if (grp == my_grp)
  2128. goto no_join;
  2129. /*
  2130. * Only join the other group if its bigger; if we're the bigger group,
  2131. * the other task will join us.
  2132. */
  2133. if (my_grp->nr_tasks > grp->nr_tasks)
  2134. goto no_join;
  2135. /*
  2136. * Tie-break on the grp address.
  2137. */
  2138. if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
  2139. goto no_join;
  2140. /* Always join threads in the same process. */
  2141. if (tsk->mm == current->mm)
  2142. join = true;
  2143. /* Simple filter to avoid false positives due to PID collisions */
  2144. if (flags & TNF_SHARED)
  2145. join = true;
  2146. /* Update priv based on whether false sharing was detected */
  2147. *priv = !join;
  2148. if (join && !get_numa_group(grp))
  2149. goto no_join;
  2150. rcu_read_unlock();
  2151. if (!join)
  2152. return;
  2153. BUG_ON(irqs_disabled());
  2154. double_lock_irq(&my_grp->lock, &grp->lock);
  2155. for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
  2156. my_grp->faults[i] -= p->numa_faults[i];
  2157. grp->faults[i] += p->numa_faults[i];
  2158. }
  2159. my_grp->total_faults -= p->total_numa_faults;
  2160. grp->total_faults += p->total_numa_faults;
  2161. my_grp->nr_tasks--;
  2162. grp->nr_tasks++;
  2163. spin_unlock(&my_grp->lock);
  2164. spin_unlock_irq(&grp->lock);
  2165. rcu_assign_pointer(p->numa_group, grp);
  2166. put_numa_group(my_grp);
  2167. return;
  2168. no_join:
  2169. rcu_read_unlock();
  2170. return;
  2171. }
  2172. /*
  2173. * Get rid of NUMA staticstics associated with a task (either current or dead).
  2174. * If @final is set, the task is dead and has reached refcount zero, so we can
  2175. * safely free all relevant data structures. Otherwise, there might be
  2176. * concurrent reads from places like load balancing and procfs, and we should
  2177. * reset the data back to default state without freeing ->numa_faults.
  2178. */
  2179. void task_numa_free(struct task_struct *p, bool final)
  2180. {
  2181. /* safe: p either is current or is being freed by current */
  2182. struct numa_group *grp = rcu_dereference_raw(p->numa_group);
  2183. unsigned long *numa_faults = p->numa_faults;
  2184. unsigned long flags;
  2185. int i;
  2186. if (!numa_faults)
  2187. return;
  2188. if (grp) {
  2189. spin_lock_irqsave(&grp->lock, flags);
  2190. for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
  2191. grp->faults[i] -= p->numa_faults[i];
  2192. grp->total_faults -= p->total_numa_faults;
  2193. grp->nr_tasks--;
  2194. spin_unlock_irqrestore(&grp->lock, flags);
  2195. RCU_INIT_POINTER(p->numa_group, NULL);
  2196. put_numa_group(grp);
  2197. }
  2198. if (final) {
  2199. p->numa_faults = NULL;
  2200. kfree(numa_faults);
  2201. } else {
  2202. p->total_numa_faults = 0;
  2203. for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
  2204. numa_faults[i] = 0;
  2205. }
  2206. }
  2207. /*
  2208. * Got a PROT_NONE fault for a page on @node.
  2209. */
  2210. void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
  2211. {
  2212. struct task_struct *p = current;
  2213. bool migrated = flags & TNF_MIGRATED;
  2214. int cpu_node = task_node(current);
  2215. int local = !!(flags & TNF_FAULT_LOCAL);
  2216. struct numa_group *ng;
  2217. int priv;
  2218. if (!static_branch_likely(&sched_numa_balancing))
  2219. return;
  2220. /* for example, ksmd faulting in a user's mm */
  2221. if (!p->mm)
  2222. return;
  2223. /* Allocate buffer to track faults on a per-node basis */
  2224. if (unlikely(!p->numa_faults)) {
  2225. int size = sizeof(*p->numa_faults) *
  2226. NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
  2227. p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
  2228. if (!p->numa_faults)
  2229. return;
  2230. p->total_numa_faults = 0;
  2231. memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
  2232. }
  2233. /*
  2234. * First accesses are treated as private, otherwise consider accesses
  2235. * to be private if the accessing pid has not changed
  2236. */
  2237. if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
  2238. priv = 1;
  2239. } else {
  2240. priv = cpupid_match_pid(p, last_cpupid);
  2241. if (!priv && !(flags & TNF_NO_GROUP))
  2242. task_numa_group(p, last_cpupid, flags, &priv);
  2243. }
  2244. /*
  2245. * If a workload spans multiple NUMA nodes, a shared fault that
  2246. * occurs wholly within the set of nodes that the workload is
  2247. * actively using should be counted as local. This allows the
  2248. * scan rate to slow down when a workload has settled down.
  2249. */
  2250. ng = deref_curr_numa_group(p);
  2251. if (!priv && !local && ng && ng->active_nodes > 1 &&
  2252. numa_is_active_node(cpu_node, ng) &&
  2253. numa_is_active_node(mem_node, ng))
  2254. local = 1;
  2255. /*
  2256. * Retry to migrate task to preferred node periodically, in case it
  2257. * previously failed, or the scheduler moved us.
  2258. */
  2259. if (time_after(jiffies, p->numa_migrate_retry)) {
  2260. task_numa_placement(p);
  2261. numa_migrate_preferred(p);
  2262. }
  2263. if (migrated)
  2264. p->numa_pages_migrated += pages;
  2265. if (flags & TNF_MIGRATE_FAIL)
  2266. p->numa_faults_locality[2] += pages;
  2267. p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
  2268. p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
  2269. p->numa_faults_locality[local] += pages;
  2270. }
  2271. static void reset_ptenuma_scan(struct task_struct *p)
  2272. {
  2273. /*
  2274. * We only did a read acquisition of the mmap sem, so
  2275. * p->mm->numa_scan_seq is written to without exclusive access
  2276. * and the update is not guaranteed to be atomic. That's not
  2277. * much of an issue though, since this is just used for
  2278. * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
  2279. * expensive, to avoid any form of compiler optimizations:
  2280. */
  2281. WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
  2282. p->mm->numa_scan_offset = 0;
  2283. }
  2284. /*
  2285. * The expensive part of numa migration is done from task_work context.
  2286. * Triggered from task_tick_numa().
  2287. */
  2288. static void task_numa_work(struct callback_head *work)
  2289. {
  2290. unsigned long migrate, next_scan, now = jiffies;
  2291. struct task_struct *p = current;
  2292. struct mm_struct *mm = p->mm;
  2293. u64 runtime = p->se.sum_exec_runtime;
  2294. struct vm_area_struct *vma;
  2295. unsigned long start, end;
  2296. unsigned long nr_pte_updates = 0;
  2297. long pages, virtpages;
  2298. SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
  2299. work->next = work;
  2300. /*
  2301. * Who cares about NUMA placement when they're dying.
  2302. *
  2303. * NOTE: make sure not to dereference p->mm before this check,
  2304. * exit_task_work() happens _after_ exit_mm() so we could be called
  2305. * without p->mm even though we still had it when we enqueued this
  2306. * work.
  2307. */
  2308. if (p->flags & PF_EXITING)
  2309. return;
  2310. if (!mm->numa_next_scan) {
  2311. mm->numa_next_scan = now +
  2312. msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
  2313. }
  2314. /*
  2315. * Enforce maximal scan/migration frequency..
  2316. */
  2317. migrate = mm->numa_next_scan;
  2318. if (time_before(now, migrate))
  2319. return;
  2320. if (p->numa_scan_period == 0) {
  2321. p->numa_scan_period_max = task_scan_max(p);
  2322. p->numa_scan_period = task_scan_start(p);
  2323. }
  2324. next_scan = now + msecs_to_jiffies(p->numa_scan_period);
  2325. if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
  2326. return;
  2327. /*
  2328. * Delay this task enough that another task of this mm will likely win
  2329. * the next time around.
  2330. */
  2331. p->node_stamp += 2 * TICK_NSEC;
  2332. start = mm->numa_scan_offset;
  2333. pages = sysctl_numa_balancing_scan_size;
  2334. pages <<= 20 - PAGE_SHIFT; /* MB in pages */
  2335. virtpages = pages * 8; /* Scan up to this much virtual space */
  2336. if (!pages)
  2337. return;
  2338. if (!mmap_read_trylock(mm))
  2339. return;
  2340. vma = find_vma(mm, start);
  2341. if (!vma) {
  2342. reset_ptenuma_scan(p);
  2343. start = 0;
  2344. vma = mm->mmap;
  2345. }
  2346. for (; vma; vma = vma->vm_next) {
  2347. if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
  2348. is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
  2349. continue;
  2350. }
  2351. /*
  2352. * Shared library pages mapped by multiple processes are not
  2353. * migrated as it is expected they are cache replicated. Avoid
  2354. * hinting faults in read-only file-backed mappings or the vdso
  2355. * as migrating the pages will be of marginal benefit.
  2356. */
  2357. if (!vma->vm_mm ||
  2358. (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
  2359. continue;
  2360. /*
  2361. * Skip inaccessible VMAs to avoid any confusion between
  2362. * PROT_NONE and NUMA hinting ptes
  2363. */
  2364. if (!vma_is_accessible(vma))
  2365. continue;
  2366. do {
  2367. start = max(start, vma->vm_start);
  2368. end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
  2369. end = min(end, vma->vm_end);
  2370. nr_pte_updates = change_prot_numa(vma, start, end);
  2371. /*
  2372. * Try to scan sysctl_numa_balancing_size worth of
  2373. * hpages that have at least one present PTE that
  2374. * is not already pte-numa. If the VMA contains
  2375. * areas that are unused or already full of prot_numa
  2376. * PTEs, scan up to virtpages, to skip through those
  2377. * areas faster.
  2378. */
  2379. if (nr_pte_updates)
  2380. pages -= (end - start) >> PAGE_SHIFT;
  2381. virtpages -= (end - start) >> PAGE_SHIFT;
  2382. start = end;
  2383. if (pages <= 0 || virtpages <= 0)
  2384. goto out;
  2385. cond_resched();
  2386. } while (end != vma->vm_end);
  2387. }
  2388. out:
  2389. /*
  2390. * It is possible to reach the end of the VMA list but the last few
  2391. * VMAs are not guaranteed to the vma_migratable. If they are not, we
  2392. * would find the !migratable VMA on the next scan but not reset the
  2393. * scanner to the start so check it now.
  2394. */
  2395. if (vma)
  2396. mm->numa_scan_offset = start;
  2397. else
  2398. reset_ptenuma_scan(p);
  2399. mmap_read_unlock(mm);
  2400. /*
  2401. * Make sure tasks use at least 32x as much time to run other code
  2402. * than they used here, to limit NUMA PTE scanning overhead to 3% max.
  2403. * Usually update_task_scan_period slows down scanning enough; on an
  2404. * overloaded system we need to limit overhead on a per task basis.
  2405. */
  2406. if (unlikely(p->se.sum_exec_runtime != runtime)) {
  2407. u64 diff = p->se.sum_exec_runtime - runtime;
  2408. p->node_stamp += 32 * diff;
  2409. }
  2410. }
  2411. void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
  2412. {
  2413. int mm_users = 0;
  2414. struct mm_struct *mm = p->mm;
  2415. if (mm) {
  2416. mm_users = atomic_read(&mm->mm_users);
  2417. if (mm_users == 1) {
  2418. mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
  2419. mm->numa_scan_seq = 0;
  2420. }
  2421. }
  2422. p->node_stamp = 0;
  2423. p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
  2424. p->numa_scan_period = sysctl_numa_balancing_scan_delay;
  2425. /* Protect against double add, see task_tick_numa and task_numa_work */
  2426. p->numa_work.next = &p->numa_work;
  2427. p->numa_faults = NULL;
  2428. RCU_INIT_POINTER(p->numa_group, NULL);
  2429. p->last_task_numa_placement = 0;
  2430. p->last_sum_exec_runtime = 0;
  2431. init_task_work(&p->numa_work, task_numa_work);
  2432. /* New address space, reset the preferred nid */
  2433. if (!(clone_flags & CLONE_VM)) {
  2434. p->numa_preferred_nid = NUMA_NO_NODE;
  2435. return;
  2436. }
  2437. /*
  2438. * New thread, keep existing numa_preferred_nid which should be copied
  2439. * already by arch_dup_task_struct but stagger when scans start.
  2440. */
  2441. if (mm) {
  2442. unsigned int delay;
  2443. delay = min_t(unsigned int, task_scan_max(current),
  2444. current->numa_scan_period * mm_users * NSEC_PER_MSEC);
  2445. delay += 2 * TICK_NSEC;
  2446. p->node_stamp = delay;
  2447. }
  2448. }
  2449. /*
  2450. * Drive the periodic memory faults..
  2451. */
  2452. static void task_tick_numa(struct rq *rq, struct task_struct *curr)
  2453. {
  2454. struct callback_head *work = &curr->numa_work;
  2455. u64 period, now;
  2456. /*
  2457. * We don't care about NUMA placement if we don't have memory.
  2458. */
  2459. if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
  2460. return;
  2461. /*
  2462. * Using runtime rather than walltime has the dual advantage that
  2463. * we (mostly) drive the selection from busy threads and that the
  2464. * task needs to have done some actual work before we bother with
  2465. * NUMA placement.
  2466. */
  2467. now = curr->se.sum_exec_runtime;
  2468. period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
  2469. if (now > curr->node_stamp + period) {
  2470. if (!curr->node_stamp)
  2471. curr->numa_scan_period = task_scan_start(curr);
  2472. curr->node_stamp += period;
  2473. if (!time_before(jiffies, curr->mm->numa_next_scan))
  2474. task_work_add(curr, work, TWA_RESUME);
  2475. }
  2476. }
  2477. static void update_scan_period(struct task_struct *p, int new_cpu)
  2478. {
  2479. int src_nid = cpu_to_node(task_cpu(p));
  2480. int dst_nid = cpu_to_node(new_cpu);
  2481. if (!static_branch_likely(&sched_numa_balancing))
  2482. return;
  2483. if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
  2484. return;
  2485. if (src_nid == dst_nid)
  2486. return;
  2487. /*
  2488. * Allow resets if faults have been trapped before one scan
  2489. * has completed. This is most likely due to a new task that
  2490. * is pulled cross-node due to wakeups or load balancing.
  2491. */
  2492. if (p->numa_scan_seq) {
  2493. /*
  2494. * Avoid scan adjustments if moving to the preferred
  2495. * node or if the task was not previously running on
  2496. * the preferred node.
  2497. */
  2498. if (dst_nid == p->numa_preferred_nid ||
  2499. (p->numa_preferred_nid != NUMA_NO_NODE &&
  2500. src_nid != p->numa_preferred_nid))
  2501. return;
  2502. }
  2503. p->numa_scan_period = task_scan_start(p);
  2504. }
  2505. #else
  2506. static void task_tick_numa(struct rq *rq, struct task_struct *curr)
  2507. {
  2508. }
  2509. static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
  2510. {
  2511. }
  2512. static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
  2513. {
  2514. }
  2515. static inline void update_scan_period(struct task_struct *p, int new_cpu)
  2516. {
  2517. }
  2518. #endif /* CONFIG_NUMA_BALANCING */
  2519. static void
  2520. account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  2521. {
  2522. update_load_add(&cfs_rq->load, se->load.weight);
  2523. #ifdef CONFIG_SMP
  2524. if (entity_is_task(se)) {
  2525. struct rq *rq = rq_of(cfs_rq);
  2526. account_numa_enqueue(rq, task_of(se));
  2527. list_add(&se->group_node, &rq->cfs_tasks);
  2528. }
  2529. #endif
  2530. cfs_rq->nr_running++;
  2531. }
  2532. static void
  2533. account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  2534. {
  2535. update_load_sub(&cfs_rq->load, se->load.weight);
  2536. #ifdef CONFIG_SMP
  2537. if (entity_is_task(se)) {
  2538. account_numa_dequeue(rq_of(cfs_rq), task_of(se));
  2539. list_del_init(&se->group_node);
  2540. }
  2541. #endif
  2542. cfs_rq->nr_running--;
  2543. }
  2544. /*
  2545. * Signed add and clamp on underflow.
  2546. *
  2547. * Explicitly do a load-store to ensure the intermediate value never hits
  2548. * memory. This allows lockless observations without ever seeing the negative
  2549. * values.
  2550. */
  2551. #define add_positive(_ptr, _val) do { \
  2552. typeof(_ptr) ptr = (_ptr); \
  2553. typeof(_val) val = (_val); \
  2554. typeof(*ptr) res, var = READ_ONCE(*ptr); \
  2555. \
  2556. res = var + val; \
  2557. \
  2558. if (val < 0 && res > var) \
  2559. res = 0; \
  2560. \
  2561. WRITE_ONCE(*ptr, res); \
  2562. } while (0)
  2563. /*
  2564. * Unsigned subtract and clamp on underflow.
  2565. *
  2566. * Explicitly do a load-store to ensure the intermediate value never hits
  2567. * memory. This allows lockless observations without ever seeing the negative
  2568. * values.
  2569. */
  2570. #define sub_positive(_ptr, _val) do { \
  2571. typeof(_ptr) ptr = (_ptr); \
  2572. typeof(*ptr) val = (_val); \
  2573. typeof(*ptr) res, var = READ_ONCE(*ptr); \
  2574. res = var - val; \
  2575. if (res > var) \
  2576. res = 0; \
  2577. WRITE_ONCE(*ptr, res); \
  2578. } while (0)
  2579. /*
  2580. * Remove and clamp on negative, from a local variable.
  2581. *
  2582. * A variant of sub_positive(), which does not use explicit load-store
  2583. * and is thus optimized for local variable updates.
  2584. */
  2585. #define lsub_positive(_ptr, _val) do { \
  2586. typeof(_ptr) ptr = (_ptr); \
  2587. *ptr -= min_t(typeof(*ptr), *ptr, _val); \
  2588. } while (0)
  2589. #ifdef CONFIG_SMP
  2590. static inline void
  2591. enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
  2592. {
  2593. cfs_rq->avg.load_avg += se->avg.load_avg;
  2594. cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
  2595. }
  2596. static inline void
  2597. dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
  2598. {
  2599. sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
  2600. sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
  2601. }
  2602. #else
  2603. static inline void
  2604. enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
  2605. static inline void
  2606. dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
  2607. #endif
  2608. static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
  2609. unsigned long weight)
  2610. {
  2611. if (se->on_rq) {
  2612. /* commit outstanding execution time */
  2613. if (cfs_rq->curr == se)
  2614. update_curr(cfs_rq);
  2615. update_load_sub(&cfs_rq->load, se->load.weight);
  2616. }
  2617. dequeue_load_avg(cfs_rq, se);
  2618. update_load_set(&se->load, weight);
  2619. #ifdef CONFIG_SMP
  2620. do {
  2621. u32 divider = get_pelt_divider(&se->avg);
  2622. se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
  2623. } while (0);
  2624. #endif
  2625. enqueue_load_avg(cfs_rq, se);
  2626. if (se->on_rq)
  2627. update_load_add(&cfs_rq->load, se->load.weight);
  2628. }
  2629. void reweight_task(struct task_struct *p, int prio)
  2630. {
  2631. struct sched_entity *se = &p->se;
  2632. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  2633. struct load_weight *load = &se->load;
  2634. unsigned long weight = scale_load(sched_prio_to_weight[prio]);
  2635. reweight_entity(cfs_rq, se, weight);
  2636. load->inv_weight = sched_prio_to_wmult[prio];
  2637. }
  2638. #ifdef CONFIG_FAIR_GROUP_SCHED
  2639. #ifdef CONFIG_SMP
  2640. /*
  2641. * All this does is approximate the hierarchical proportion which includes that
  2642. * global sum we all love to hate.
  2643. *
  2644. * That is, the weight of a group entity, is the proportional share of the
  2645. * group weight based on the group runqueue weights. That is:
  2646. *
  2647. * tg->weight * grq->load.weight
  2648. * ge->load.weight = ----------------------------- (1)
  2649. * \Sum grq->load.weight
  2650. *
  2651. * Now, because computing that sum is prohibitively expensive to compute (been
  2652. * there, done that) we approximate it with this average stuff. The average
  2653. * moves slower and therefore the approximation is cheaper and more stable.
  2654. *
  2655. * So instead of the above, we substitute:
  2656. *
  2657. * grq->load.weight -> grq->avg.load_avg (2)
  2658. *
  2659. * which yields the following:
  2660. *
  2661. * tg->weight * grq->avg.load_avg
  2662. * ge->load.weight = ------------------------------ (3)
  2663. * tg->load_avg
  2664. *
  2665. * Where: tg->load_avg ~= \Sum grq->avg.load_avg
  2666. *
  2667. * That is shares_avg, and it is right (given the approximation (2)).
  2668. *
  2669. * The problem with it is that because the average is slow -- it was designed
  2670. * to be exactly that of course -- this leads to transients in boundary
  2671. * conditions. In specific, the case where the group was idle and we start the
  2672. * one task. It takes time for our CPU's grq->avg.load_avg to build up,
  2673. * yielding bad latency etc..
  2674. *
  2675. * Now, in that special case (1) reduces to:
  2676. *
  2677. * tg->weight * grq->load.weight
  2678. * ge->load.weight = ----------------------------- = tg->weight (4)
  2679. * grp->load.weight
  2680. *
  2681. * That is, the sum collapses because all other CPUs are idle; the UP scenario.
  2682. *
  2683. * So what we do is modify our approximation (3) to approach (4) in the (near)
  2684. * UP case, like:
  2685. *
  2686. * ge->load.weight =
  2687. *
  2688. * tg->weight * grq->load.weight
  2689. * --------------------------------------------------- (5)
  2690. * tg->load_avg - grq->avg.load_avg + grq->load.weight
  2691. *
  2692. * But because grq->load.weight can drop to 0, resulting in a divide by zero,
  2693. * we need to use grq->avg.load_avg as its lower bound, which then gives:
  2694. *
  2695. *
  2696. * tg->weight * grq->load.weight
  2697. * ge->load.weight = ----------------------------- (6)
  2698. * tg_load_avg'
  2699. *
  2700. * Where:
  2701. *
  2702. * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
  2703. * max(grq->load.weight, grq->avg.load_avg)
  2704. *
  2705. * And that is shares_weight and is icky. In the (near) UP case it approaches
  2706. * (4) while in the normal case it approaches (3). It consistently
  2707. * overestimates the ge->load.weight and therefore:
  2708. *
  2709. * \Sum ge->load.weight >= tg->weight
  2710. *
  2711. * hence icky!
  2712. */
  2713. static long calc_group_shares(struct cfs_rq *cfs_rq)
  2714. {
  2715. long tg_weight, tg_shares, load, shares;
  2716. struct task_group *tg = cfs_rq->tg;
  2717. tg_shares = READ_ONCE(tg->shares);
  2718. load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
  2719. tg_weight = atomic_long_read(&tg->load_avg);
  2720. /* Ensure tg_weight >= load */
  2721. tg_weight -= cfs_rq->tg_load_avg_contrib;
  2722. tg_weight += load;
  2723. shares = (tg_shares * load);
  2724. if (tg_weight)
  2725. shares /= tg_weight;
  2726. /*
  2727. * MIN_SHARES has to be unscaled here to support per-CPU partitioning
  2728. * of a group with small tg->shares value. It is a floor value which is
  2729. * assigned as a minimum load.weight to the sched_entity representing
  2730. * the group on a CPU.
  2731. *
  2732. * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
  2733. * on an 8-core system with 8 tasks each runnable on one CPU shares has
  2734. * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
  2735. * case no task is runnable on a CPU MIN_SHARES=2 should be returned
  2736. * instead of 0.
  2737. */
  2738. return clamp_t(long, shares, MIN_SHARES, tg_shares);
  2739. }
  2740. #endif /* CONFIG_SMP */
  2741. static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
  2742. /*
  2743. * Recomputes the group entity based on the current state of its group
  2744. * runqueue.
  2745. */
  2746. static void update_cfs_group(struct sched_entity *se)
  2747. {
  2748. struct cfs_rq *gcfs_rq = group_cfs_rq(se);
  2749. long shares;
  2750. if (!gcfs_rq)
  2751. return;
  2752. if (throttled_hierarchy(gcfs_rq))
  2753. return;
  2754. #ifndef CONFIG_SMP
  2755. shares = READ_ONCE(gcfs_rq->tg->shares);
  2756. if (likely(se->load.weight == shares))
  2757. return;
  2758. #else
  2759. shares = calc_group_shares(gcfs_rq);
  2760. #endif
  2761. reweight_entity(cfs_rq_of(se), se, shares);
  2762. }
  2763. #else /* CONFIG_FAIR_GROUP_SCHED */
  2764. static inline void update_cfs_group(struct sched_entity *se)
  2765. {
  2766. }
  2767. #endif /* CONFIG_FAIR_GROUP_SCHED */
  2768. static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
  2769. {
  2770. struct rq *rq = rq_of(cfs_rq);
  2771. if (&rq->cfs == cfs_rq) {
  2772. /*
  2773. * There are a few boundary cases this might miss but it should
  2774. * get called often enough that that should (hopefully) not be
  2775. * a real problem.
  2776. *
  2777. * It will not get called when we go idle, because the idle
  2778. * thread is a different class (!fair), nor will the utilization
  2779. * number include things like RT tasks.
  2780. *
  2781. * As is, the util number is not freq-invariant (we'd have to
  2782. * implement arch_scale_freq_capacity() for that).
  2783. *
  2784. * See cpu_util().
  2785. */
  2786. cpufreq_update_util(rq, flags);
  2787. }
  2788. }
  2789. #ifdef CONFIG_SMP
  2790. #ifdef CONFIG_FAIR_GROUP_SCHED
  2791. /**
  2792. * update_tg_load_avg - update the tg's load avg
  2793. * @cfs_rq: the cfs_rq whose avg changed
  2794. *
  2795. * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
  2796. * However, because tg->load_avg is a global value there are performance
  2797. * considerations.
  2798. *
  2799. * In order to avoid having to look at the other cfs_rq's, we use a
  2800. * differential update where we store the last value we propagated. This in
  2801. * turn allows skipping updates if the differential is 'small'.
  2802. *
  2803. * Updating tg's load_avg is necessary before update_cfs_share().
  2804. */
  2805. static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
  2806. {
  2807. long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
  2808. /*
  2809. * No need to update load_avg for root_task_group as it is not used.
  2810. */
  2811. if (cfs_rq->tg == &root_task_group)
  2812. return;
  2813. if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
  2814. atomic_long_add(delta, &cfs_rq->tg->load_avg);
  2815. cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
  2816. }
  2817. }
  2818. /*
  2819. * Called within set_task_rq() right before setting a task's CPU. The
  2820. * caller only guarantees p->pi_lock is held; no other assumptions,
  2821. * including the state of rq->lock, should be made.
  2822. */
  2823. void set_task_rq_fair(struct sched_entity *se,
  2824. struct cfs_rq *prev, struct cfs_rq *next)
  2825. {
  2826. u64 p_last_update_time;
  2827. u64 n_last_update_time;
  2828. if (!sched_feat(ATTACH_AGE_LOAD))
  2829. return;
  2830. /*
  2831. * We are supposed to update the task to "current" time, then its up to
  2832. * date and ready to go to new CPU/cfs_rq. But we have difficulty in
  2833. * getting what current time is, so simply throw away the out-of-date
  2834. * time. This will result in the wakee task is less decayed, but giving
  2835. * the wakee more load sounds not bad.
  2836. */
  2837. if (!(se->avg.last_update_time && prev))
  2838. return;
  2839. #ifndef CONFIG_64BIT
  2840. {
  2841. u64 p_last_update_time_copy;
  2842. u64 n_last_update_time_copy;
  2843. do {
  2844. p_last_update_time_copy = prev->load_last_update_time_copy;
  2845. n_last_update_time_copy = next->load_last_update_time_copy;
  2846. smp_rmb();
  2847. p_last_update_time = prev->avg.last_update_time;
  2848. n_last_update_time = next->avg.last_update_time;
  2849. } while (p_last_update_time != p_last_update_time_copy ||
  2850. n_last_update_time != n_last_update_time_copy);
  2851. }
  2852. #else
  2853. p_last_update_time = prev->avg.last_update_time;
  2854. n_last_update_time = next->avg.last_update_time;
  2855. #endif
  2856. __update_load_avg_blocked_se(p_last_update_time, se);
  2857. se->avg.last_update_time = n_last_update_time;
  2858. }
  2859. /*
  2860. * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
  2861. * propagate its contribution. The key to this propagation is the invariant
  2862. * that for each group:
  2863. *
  2864. * ge->avg == grq->avg (1)
  2865. *
  2866. * _IFF_ we look at the pure running and runnable sums. Because they
  2867. * represent the very same entity, just at different points in the hierarchy.
  2868. *
  2869. * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
  2870. * and simply copies the running/runnable sum over (but still wrong, because
  2871. * the group entity and group rq do not have their PELT windows aligned).
  2872. *
  2873. * However, update_tg_cfs_load() is more complex. So we have:
  2874. *
  2875. * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
  2876. *
  2877. * And since, like util, the runnable part should be directly transferable,
  2878. * the following would _appear_ to be the straight forward approach:
  2879. *
  2880. * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
  2881. *
  2882. * And per (1) we have:
  2883. *
  2884. * ge->avg.runnable_avg == grq->avg.runnable_avg
  2885. *
  2886. * Which gives:
  2887. *
  2888. * ge->load.weight * grq->avg.load_avg
  2889. * ge->avg.load_avg = ----------------------------------- (4)
  2890. * grq->load.weight
  2891. *
  2892. * Except that is wrong!
  2893. *
  2894. * Because while for entities historical weight is not important and we
  2895. * really only care about our future and therefore can consider a pure
  2896. * runnable sum, runqueues can NOT do this.
  2897. *
  2898. * We specifically want runqueues to have a load_avg that includes
  2899. * historical weights. Those represent the blocked load, the load we expect
  2900. * to (shortly) return to us. This only works by keeping the weights as
  2901. * integral part of the sum. We therefore cannot decompose as per (3).
  2902. *
  2903. * Another reason this doesn't work is that runnable isn't a 0-sum entity.
  2904. * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
  2905. * rq itself is runnable anywhere between 2/3 and 1 depending on how the
  2906. * runnable section of these tasks overlap (or not). If they were to perfectly
  2907. * align the rq as a whole would be runnable 2/3 of the time. If however we
  2908. * always have at least 1 runnable task, the rq as a whole is always runnable.
  2909. *
  2910. * So we'll have to approximate.. :/
  2911. *
  2912. * Given the constraint:
  2913. *
  2914. * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
  2915. *
  2916. * We can construct a rule that adds runnable to a rq by assuming minimal
  2917. * overlap.
  2918. *
  2919. * On removal, we'll assume each task is equally runnable; which yields:
  2920. *
  2921. * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
  2922. *
  2923. * XXX: only do this for the part of runnable > running ?
  2924. *
  2925. */
  2926. static inline void
  2927. update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
  2928. {
  2929. long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
  2930. u32 divider;
  2931. /* Nothing to update */
  2932. if (!delta)
  2933. return;
  2934. /*
  2935. * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
  2936. * See ___update_load_avg() for details.
  2937. */
  2938. divider = get_pelt_divider(&cfs_rq->avg);
  2939. /* Set new sched_entity's utilization */
  2940. se->avg.util_avg = gcfs_rq->avg.util_avg;
  2941. se->avg.util_sum = se->avg.util_avg * divider;
  2942. /* Update parent cfs_rq utilization */
  2943. add_positive(&cfs_rq->avg.util_avg, delta);
  2944. cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
  2945. }
  2946. static inline void
  2947. update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
  2948. {
  2949. long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
  2950. u32 divider;
  2951. /* Nothing to update */
  2952. if (!delta)
  2953. return;
  2954. /*
  2955. * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
  2956. * See ___update_load_avg() for details.
  2957. */
  2958. divider = get_pelt_divider(&cfs_rq->avg);
  2959. /* Set new sched_entity's runnable */
  2960. se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
  2961. se->avg.runnable_sum = se->avg.runnable_avg * divider;
  2962. /* Update parent cfs_rq runnable */
  2963. add_positive(&cfs_rq->avg.runnable_avg, delta);
  2964. cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
  2965. }
  2966. static inline void
  2967. update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
  2968. {
  2969. long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
  2970. unsigned long load_avg;
  2971. u64 load_sum = 0;
  2972. s64 delta_sum;
  2973. u32 divider;
  2974. if (!runnable_sum)
  2975. return;
  2976. gcfs_rq->prop_runnable_sum = 0;
  2977. /*
  2978. * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
  2979. * See ___update_load_avg() for details.
  2980. */
  2981. divider = get_pelt_divider(&cfs_rq->avg);
  2982. if (runnable_sum >= 0) {
  2983. /*
  2984. * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
  2985. * the CPU is saturated running == runnable.
  2986. */
  2987. runnable_sum += se->avg.load_sum;
  2988. runnable_sum = min_t(long, runnable_sum, divider);
  2989. } else {
  2990. /*
  2991. * Estimate the new unweighted runnable_sum of the gcfs_rq by
  2992. * assuming all tasks are equally runnable.
  2993. */
  2994. if (scale_load_down(gcfs_rq->load.weight)) {
  2995. load_sum = div_s64(gcfs_rq->avg.load_sum,
  2996. scale_load_down(gcfs_rq->load.weight));
  2997. }
  2998. /* But make sure to not inflate se's runnable */
  2999. runnable_sum = min(se->avg.load_sum, load_sum);
  3000. }
  3001. /*
  3002. * runnable_sum can't be lower than running_sum
  3003. * Rescale running sum to be in the same range as runnable sum
  3004. * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT]
  3005. * runnable_sum is in [0 : LOAD_AVG_MAX]
  3006. */
  3007. running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
  3008. runnable_sum = max(runnable_sum, running_sum);
  3009. load_sum = (s64)se_weight(se) * runnable_sum;
  3010. load_avg = div_s64(load_sum, divider);
  3011. delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
  3012. delta_avg = load_avg - se->avg.load_avg;
  3013. se->avg.load_sum = runnable_sum;
  3014. se->avg.load_avg = load_avg;
  3015. add_positive(&cfs_rq->avg.load_avg, delta_avg);
  3016. add_positive(&cfs_rq->avg.load_sum, delta_sum);
  3017. }
  3018. static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
  3019. {
  3020. cfs_rq->propagate = 1;
  3021. cfs_rq->prop_runnable_sum += runnable_sum;
  3022. }
  3023. /* Update task and its cfs_rq load average */
  3024. static inline int propagate_entity_load_avg(struct sched_entity *se)
  3025. {
  3026. struct cfs_rq *cfs_rq, *gcfs_rq;
  3027. if (entity_is_task(se))
  3028. return 0;
  3029. gcfs_rq = group_cfs_rq(se);
  3030. if (!gcfs_rq->propagate)
  3031. return 0;
  3032. gcfs_rq->propagate = 0;
  3033. cfs_rq = cfs_rq_of(se);
  3034. add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
  3035. update_tg_cfs_util(cfs_rq, se, gcfs_rq);
  3036. update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
  3037. update_tg_cfs_load(cfs_rq, se, gcfs_rq);
  3038. trace_pelt_cfs_tp(cfs_rq);
  3039. trace_pelt_se_tp(se);
  3040. return 1;
  3041. }
  3042. /*
  3043. * Check if we need to update the load and the utilization of a blocked
  3044. * group_entity:
  3045. */
  3046. static inline bool skip_blocked_update(struct sched_entity *se)
  3047. {
  3048. struct cfs_rq *gcfs_rq = group_cfs_rq(se);
  3049. /*
  3050. * If sched_entity still have not zero load or utilization, we have to
  3051. * decay it:
  3052. */
  3053. if (se->avg.load_avg || se->avg.util_avg)
  3054. return false;
  3055. /*
  3056. * If there is a pending propagation, we have to update the load and
  3057. * the utilization of the sched_entity:
  3058. */
  3059. if (gcfs_rq->propagate)
  3060. return false;
  3061. /*
  3062. * Otherwise, the load and the utilization of the sched_entity is
  3063. * already zero and there is no pending propagation, so it will be a
  3064. * waste of time to try to decay it:
  3065. */
  3066. return true;
  3067. }
  3068. #else /* CONFIG_FAIR_GROUP_SCHED */
  3069. static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
  3070. static inline int propagate_entity_load_avg(struct sched_entity *se)
  3071. {
  3072. return 0;
  3073. }
  3074. static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
  3075. #endif /* CONFIG_FAIR_GROUP_SCHED */
  3076. /**
  3077. * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
  3078. * @now: current time, as per cfs_rq_clock_pelt()
  3079. * @cfs_rq: cfs_rq to update
  3080. *
  3081. * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
  3082. * avg. The immediate corollary is that all (fair) tasks must be attached, see
  3083. * post_init_entity_util_avg().
  3084. *
  3085. * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
  3086. *
  3087. * Returns true if the load decayed or we removed load.
  3088. *
  3089. * Since both these conditions indicate a changed cfs_rq->avg.load we should
  3090. * call update_tg_load_avg() when this function returns true.
  3091. */
  3092. static inline int
  3093. update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  3094. {
  3095. unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
  3096. struct sched_avg *sa = &cfs_rq->avg;
  3097. int decayed = 0;
  3098. if (cfs_rq->removed.nr) {
  3099. unsigned long r;
  3100. u32 divider = get_pelt_divider(&cfs_rq->avg);
  3101. raw_spin_lock(&cfs_rq->removed.lock);
  3102. swap(cfs_rq->removed.util_avg, removed_util);
  3103. swap(cfs_rq->removed.load_avg, removed_load);
  3104. swap(cfs_rq->removed.runnable_avg, removed_runnable);
  3105. cfs_rq->removed.nr = 0;
  3106. raw_spin_unlock(&cfs_rq->removed.lock);
  3107. r = removed_load;
  3108. sub_positive(&sa->load_avg, r);
  3109. sub_positive(&sa->load_sum, r * divider);
  3110. r = removed_util;
  3111. sub_positive(&sa->util_avg, r);
  3112. sub_positive(&sa->util_sum, r * divider);
  3113. /*
  3114. * Because of rounding, se->util_sum might ends up being +1 more than
  3115. * cfs->util_sum. Although this is not a problem by itself, detaching
  3116. * a lot of tasks with the rounding problem between 2 updates of
  3117. * util_avg (~1ms) can make cfs->util_sum becoming null whereas
  3118. * cfs_util_avg is not.
  3119. * Check that util_sum is still above its lower bound for the new
  3120. * util_avg. Given that period_contrib might have moved since the last
  3121. * sync, we are only sure that util_sum must be above or equal to
  3122. * util_avg * minimum possible divider
  3123. */
  3124. sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
  3125. r = removed_runnable;
  3126. sub_positive(&sa->runnable_avg, r);
  3127. sub_positive(&sa->runnable_sum, r * divider);
  3128. /*
  3129. * removed_runnable is the unweighted version of removed_load so we
  3130. * can use it to estimate removed_load_sum.
  3131. */
  3132. add_tg_cfs_propagate(cfs_rq,
  3133. -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
  3134. decayed = 1;
  3135. }
  3136. decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
  3137. #ifndef CONFIG_64BIT
  3138. smp_wmb();
  3139. cfs_rq->load_last_update_time_copy = sa->last_update_time;
  3140. #endif
  3141. return decayed;
  3142. }
  3143. /**
  3144. * attach_entity_load_avg - attach this entity to its cfs_rq load avg
  3145. * @cfs_rq: cfs_rq to attach to
  3146. * @se: sched_entity to attach
  3147. *
  3148. * Must call update_cfs_rq_load_avg() before this, since we rely on
  3149. * cfs_rq->avg.last_update_time being current.
  3150. */
  3151. static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
  3152. {
  3153. /*
  3154. * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
  3155. * See ___update_load_avg() for details.
  3156. */
  3157. u32 divider = get_pelt_divider(&cfs_rq->avg);
  3158. /*
  3159. * When we attach the @se to the @cfs_rq, we must align the decay
  3160. * window because without that, really weird and wonderful things can
  3161. * happen.
  3162. *
  3163. * XXX illustrate
  3164. */
  3165. se->avg.last_update_time = cfs_rq->avg.last_update_time;
  3166. se->avg.period_contrib = cfs_rq->avg.period_contrib;
  3167. /*
  3168. * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
  3169. * period_contrib. This isn't strictly correct, but since we're
  3170. * entirely outside of the PELT hierarchy, nobody cares if we truncate
  3171. * _sum a little.
  3172. */
  3173. se->avg.util_sum = se->avg.util_avg * divider;
  3174. se->avg.runnable_sum = se->avg.runnable_avg * divider;
  3175. se->avg.load_sum = se->avg.load_avg * divider;
  3176. if (se_weight(se) < se->avg.load_sum)
  3177. se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
  3178. else
  3179. se->avg.load_sum = 1;
  3180. enqueue_load_avg(cfs_rq, se);
  3181. cfs_rq->avg.util_avg += se->avg.util_avg;
  3182. cfs_rq->avg.util_sum += se->avg.util_sum;
  3183. cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
  3184. cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
  3185. add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
  3186. cfs_rq_util_change(cfs_rq, 0);
  3187. trace_pelt_cfs_tp(cfs_rq);
  3188. }
  3189. /**
  3190. * detach_entity_load_avg - detach this entity from its cfs_rq load avg
  3191. * @cfs_rq: cfs_rq to detach from
  3192. * @se: sched_entity to detach
  3193. *
  3194. * Must call update_cfs_rq_load_avg() before this, since we rely on
  3195. * cfs_rq->avg.last_update_time being current.
  3196. */
  3197. static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
  3198. {
  3199. dequeue_load_avg(cfs_rq, se);
  3200. sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
  3201. sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
  3202. sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
  3203. sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
  3204. add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
  3205. cfs_rq_util_change(cfs_rq, 0);
  3206. trace_pelt_cfs_tp(cfs_rq);
  3207. }
  3208. /*
  3209. * Optional action to be done while updating the load average
  3210. */
  3211. #define UPDATE_TG 0x1
  3212. #define SKIP_AGE_LOAD 0x2
  3213. #define DO_ATTACH 0x4
  3214. /* Update task and its cfs_rq load average */
  3215. static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  3216. {
  3217. u64 now = cfs_rq_clock_pelt(cfs_rq);
  3218. int decayed;
  3219. trace_android_vh_prepare_update_load_avg_se(se, flags);
  3220. /*
  3221. * Track task load average for carrying it to new CPU after migrated, and
  3222. * track group sched_entity load average for task_h_load calc in migration
  3223. */
  3224. if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
  3225. __update_load_avg_se(now, cfs_rq, se);
  3226. trace_android_vh_finish_update_load_avg_se(se, flags);
  3227. decayed = update_cfs_rq_load_avg(now, cfs_rq);
  3228. decayed |= propagate_entity_load_avg(se);
  3229. if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
  3230. /*
  3231. * DO_ATTACH means we're here from enqueue_entity().
  3232. * !last_update_time means we've passed through
  3233. * migrate_task_rq_fair() indicating we migrated.
  3234. *
  3235. * IOW we're enqueueing a task on a new CPU.
  3236. */
  3237. attach_entity_load_avg(cfs_rq, se);
  3238. update_tg_load_avg(cfs_rq);
  3239. } else if (decayed) {
  3240. cfs_rq_util_change(cfs_rq, 0);
  3241. if (flags & UPDATE_TG)
  3242. update_tg_load_avg(cfs_rq);
  3243. }
  3244. }
  3245. #ifndef CONFIG_64BIT
  3246. static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
  3247. {
  3248. u64 last_update_time_copy;
  3249. u64 last_update_time;
  3250. do {
  3251. last_update_time_copy = cfs_rq->load_last_update_time_copy;
  3252. smp_rmb();
  3253. last_update_time = cfs_rq->avg.last_update_time;
  3254. } while (last_update_time != last_update_time_copy);
  3255. return last_update_time;
  3256. }
  3257. #else
  3258. static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
  3259. {
  3260. return cfs_rq->avg.last_update_time;
  3261. }
  3262. #endif
  3263. /*
  3264. * Synchronize entity load avg of dequeued entity without locking
  3265. * the previous rq.
  3266. */
  3267. static void sync_entity_load_avg(struct sched_entity *se)
  3268. {
  3269. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  3270. u64 last_update_time;
  3271. last_update_time = cfs_rq_last_update_time(cfs_rq);
  3272. trace_android_vh_prepare_update_load_avg_se(se, 0);
  3273. __update_load_avg_blocked_se(last_update_time, se);
  3274. trace_android_vh_finish_update_load_avg_se(se, 0);
  3275. }
  3276. /*
  3277. * Task first catches up with cfs_rq, and then subtract
  3278. * itself from the cfs_rq (task must be off the queue now).
  3279. */
  3280. static void remove_entity_load_avg(struct sched_entity *se)
  3281. {
  3282. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  3283. unsigned long flags;
  3284. /*
  3285. * tasks cannot exit without having gone through wake_up_new_task() ->
  3286. * post_init_entity_util_avg() which will have added things to the
  3287. * cfs_rq, so we can remove unconditionally.
  3288. */
  3289. sync_entity_load_avg(se);
  3290. raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
  3291. ++cfs_rq->removed.nr;
  3292. cfs_rq->removed.util_avg += se->avg.util_avg;
  3293. cfs_rq->removed.load_avg += se->avg.load_avg;
  3294. cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
  3295. raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
  3296. }
  3297. static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
  3298. {
  3299. return cfs_rq->avg.runnable_avg;
  3300. }
  3301. static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
  3302. {
  3303. return cfs_rq->avg.load_avg;
  3304. }
  3305. static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
  3306. static inline unsigned long task_util(struct task_struct *p)
  3307. {
  3308. return READ_ONCE(p->se.avg.util_avg);
  3309. }
  3310. static inline unsigned long _task_util_est(struct task_struct *p)
  3311. {
  3312. struct util_est ue = READ_ONCE(p->se.avg.util_est);
  3313. return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
  3314. }
  3315. static inline unsigned long task_util_est(struct task_struct *p)
  3316. {
  3317. return max(task_util(p), _task_util_est(p));
  3318. }
  3319. #ifdef CONFIG_UCLAMP_TASK
  3320. static inline unsigned long uclamp_task_util(struct task_struct *p)
  3321. {
  3322. return clamp(task_util_est(p),
  3323. uclamp_eff_value(p, UCLAMP_MIN),
  3324. uclamp_eff_value(p, UCLAMP_MAX));
  3325. }
  3326. #else
  3327. static inline unsigned long uclamp_task_util(struct task_struct *p)
  3328. {
  3329. return task_util_est(p);
  3330. }
  3331. #endif
  3332. static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
  3333. struct task_struct *p)
  3334. {
  3335. unsigned int enqueued;
  3336. if (!sched_feat(UTIL_EST))
  3337. return;
  3338. /* Update root cfs_rq's estimated utilization */
  3339. enqueued = cfs_rq->avg.util_est.enqueued;
  3340. enqueued += _task_util_est(p);
  3341. WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
  3342. trace_sched_util_est_cfs_tp(cfs_rq);
  3343. }
  3344. static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
  3345. struct task_struct *p)
  3346. {
  3347. unsigned int enqueued;
  3348. if (!sched_feat(UTIL_EST))
  3349. return;
  3350. /* Update root cfs_rq's estimated utilization */
  3351. enqueued = cfs_rq->avg.util_est.enqueued;
  3352. enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
  3353. WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
  3354. trace_sched_util_est_cfs_tp(cfs_rq);
  3355. }
  3356. #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
  3357. /*
  3358. * Check if a (signed) value is within a specified (unsigned) margin,
  3359. * based on the observation that:
  3360. *
  3361. * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
  3362. *
  3363. * NOTE: this only works when value + maring < INT_MAX.
  3364. */
  3365. static inline bool within_margin(int value, int margin)
  3366. {
  3367. return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
  3368. }
  3369. static inline void util_est_update(struct cfs_rq *cfs_rq,
  3370. struct task_struct *p,
  3371. bool task_sleep)
  3372. {
  3373. long last_ewma_diff, last_enqueued_diff;
  3374. struct util_est ue;
  3375. int ret = 0;
  3376. trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret);
  3377. if (ret)
  3378. return;
  3379. if (!sched_feat(UTIL_EST))
  3380. return;
  3381. /*
  3382. * Skip update of task's estimated utilization when the task has not
  3383. * yet completed an activation, e.g. being migrated.
  3384. */
  3385. if (!task_sleep)
  3386. return;
  3387. /*
  3388. * If the PELT values haven't changed since enqueue time,
  3389. * skip the util_est update.
  3390. */
  3391. ue = p->se.avg.util_est;
  3392. if (ue.enqueued & UTIL_AVG_UNCHANGED)
  3393. return;
  3394. last_enqueued_diff = ue.enqueued;
  3395. /*
  3396. * Reset EWMA on utilization increases, the moving average is used only
  3397. * to smooth utilization decreases.
  3398. */
  3399. ue.enqueued = task_util(p);
  3400. if (sched_feat(UTIL_EST_FASTUP)) {
  3401. if (ue.ewma < ue.enqueued) {
  3402. ue.ewma = ue.enqueued;
  3403. goto done;
  3404. }
  3405. }
  3406. /*
  3407. * Skip update of task's estimated utilization when its members are
  3408. * already ~1% close to its last activation value.
  3409. */
  3410. last_ewma_diff = ue.enqueued - ue.ewma;
  3411. last_enqueued_diff -= ue.enqueued;
  3412. if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
  3413. if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
  3414. goto done;
  3415. return;
  3416. }
  3417. /*
  3418. * To avoid overestimation of actual task utilization, skip updates if
  3419. * we cannot grant there is idle time in this CPU.
  3420. */
  3421. if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
  3422. return;
  3423. /*
  3424. * Update Task's estimated utilization
  3425. *
  3426. * When *p completes an activation we can consolidate another sample
  3427. * of the task size. This is done by storing the current PELT value
  3428. * as ue.enqueued and by using this value to update the Exponential
  3429. * Weighted Moving Average (EWMA):
  3430. *
  3431. * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
  3432. * = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
  3433. * = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
  3434. * = w * ( last_ewma_diff ) + ewma(t-1)
  3435. * = w * (last_ewma_diff + ewma(t-1) / w)
  3436. *
  3437. * Where 'w' is the weight of new samples, which is configured to be
  3438. * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
  3439. */
  3440. ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
  3441. ue.ewma += last_ewma_diff;
  3442. ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
  3443. done:
  3444. ue.enqueued |= UTIL_AVG_UNCHANGED;
  3445. WRITE_ONCE(p->se.avg.util_est, ue);
  3446. trace_sched_util_est_se_tp(&p->se);
  3447. }
  3448. static inline int task_fits_capacity(struct task_struct *p, long capacity)
  3449. {
  3450. return fits_capacity(uclamp_task_util(p), capacity);
  3451. }
  3452. static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
  3453. {
  3454. bool need_update = true;
  3455. trace_android_rvh_update_misfit_status(p, rq, &need_update);
  3456. if (!static_branch_unlikely(&sched_asym_cpucapacity) || !need_update)
  3457. return;
  3458. if (!p || p->nr_cpus_allowed == 1) {
  3459. rq->misfit_task_load = 0;
  3460. return;
  3461. }
  3462. if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
  3463. rq->misfit_task_load = 0;
  3464. return;
  3465. }
  3466. /*
  3467. * Make sure that misfit_task_load will not be null even if
  3468. * task_h_load() returns 0.
  3469. */
  3470. rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
  3471. }
  3472. #else /* CONFIG_SMP */
  3473. #define UPDATE_TG 0x0
  3474. #define SKIP_AGE_LOAD 0x0
  3475. #define DO_ATTACH 0x0
  3476. static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
  3477. {
  3478. cfs_rq_util_change(cfs_rq, 0);
  3479. }
  3480. static inline void remove_entity_load_avg(struct sched_entity *se) {}
  3481. static inline void
  3482. attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
  3483. static inline void
  3484. detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
  3485. static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
  3486. {
  3487. return 0;
  3488. }
  3489. static inline void
  3490. util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
  3491. static inline void
  3492. util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
  3493. static inline void
  3494. util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
  3495. bool task_sleep) {}
  3496. static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
  3497. #endif /* CONFIG_SMP */
  3498. static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
  3499. {
  3500. #ifdef CONFIG_SCHED_DEBUG
  3501. s64 d = se->vruntime - cfs_rq->min_vruntime;
  3502. if (d < 0)
  3503. d = -d;
  3504. if (d > 3*sysctl_sched_latency)
  3505. schedstat_inc(cfs_rq->nr_spread_over);
  3506. #endif
  3507. }
  3508. static void
  3509. place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
  3510. {
  3511. u64 vruntime = cfs_rq->min_vruntime;
  3512. /*
  3513. * The 'current' period is already promised to the current tasks,
  3514. * however the extra weight of the new task will slow them down a
  3515. * little, place the new task so that it fits in the slot that
  3516. * stays open at the end.
  3517. */
  3518. if (initial && sched_feat(START_DEBIT))
  3519. vruntime += sched_vslice(cfs_rq, se);
  3520. /* sleeps up to a single latency don't count. */
  3521. if (!initial) {
  3522. unsigned long thresh = sysctl_sched_latency;
  3523. /*
  3524. * Halve their sleep time's effect, to allow
  3525. * for a gentler effect of sleepers:
  3526. */
  3527. if (sched_feat(GENTLE_FAIR_SLEEPERS))
  3528. thresh >>= 1;
  3529. vruntime -= thresh;
  3530. }
  3531. /* ensure we never gain time by being placed backwards. */
  3532. se->vruntime = max_vruntime(se->vruntime, vruntime);
  3533. trace_android_rvh_place_entity(cfs_rq, se, initial, vruntime);
  3534. }
  3535. static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
  3536. static inline void check_schedstat_required(void)
  3537. {
  3538. #ifdef CONFIG_SCHEDSTATS
  3539. if (schedstat_enabled())
  3540. return;
  3541. /* Force schedstat enabled if a dependent tracepoint is active */
  3542. if (trace_sched_stat_wait_enabled() ||
  3543. trace_sched_stat_sleep_enabled() ||
  3544. trace_sched_stat_iowait_enabled() ||
  3545. trace_sched_stat_blocked_enabled() ||
  3546. trace_sched_stat_runtime_enabled()) {
  3547. printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
  3548. "stat_blocked and stat_runtime require the "
  3549. "kernel parameter schedstats=enable or "
  3550. "kernel.sched_schedstats=1\n");
  3551. }
  3552. #endif
  3553. }
  3554. static inline bool cfs_bandwidth_used(void);
  3555. /*
  3556. * MIGRATION
  3557. *
  3558. * dequeue
  3559. * update_curr()
  3560. * update_min_vruntime()
  3561. * vruntime -= min_vruntime
  3562. *
  3563. * enqueue
  3564. * update_curr()
  3565. * update_min_vruntime()
  3566. * vruntime += min_vruntime
  3567. *
  3568. * this way the vruntime transition between RQs is done when both
  3569. * min_vruntime are up-to-date.
  3570. *
  3571. * WAKEUP (remote)
  3572. *
  3573. * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
  3574. * vruntime -= min_vruntime
  3575. *
  3576. * enqueue
  3577. * update_curr()
  3578. * update_min_vruntime()
  3579. * vruntime += min_vruntime
  3580. *
  3581. * this way we don't have the most up-to-date min_vruntime on the originating
  3582. * CPU and an up-to-date min_vruntime on the destination CPU.
  3583. */
  3584. static void
  3585. enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  3586. {
  3587. bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
  3588. bool curr = cfs_rq->curr == se;
  3589. /*
  3590. * If we're the current task, we must renormalise before calling
  3591. * update_curr().
  3592. */
  3593. if (renorm && curr)
  3594. se->vruntime += cfs_rq->min_vruntime;
  3595. update_curr(cfs_rq);
  3596. /*
  3597. * Otherwise, renormalise after, such that we're placed at the current
  3598. * moment in time, instead of some random moment in the past. Being
  3599. * placed in the past could significantly boost this task to the
  3600. * fairness detriment of existing tasks.
  3601. */
  3602. if (renorm && !curr)
  3603. se->vruntime += cfs_rq->min_vruntime;
  3604. /*
  3605. * When enqueuing a sched_entity, we must:
  3606. * - Update loads to have both entity and cfs_rq synced with now.
  3607. * - Add its load to cfs_rq->runnable_avg
  3608. * - For group_entity, update its weight to reflect the new share of
  3609. * its group cfs_rq
  3610. * - Add its new weight to cfs_rq->load.weight
  3611. */
  3612. update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
  3613. se_update_runnable(se);
  3614. update_cfs_group(se);
  3615. account_entity_enqueue(cfs_rq, se);
  3616. if (flags & ENQUEUE_WAKEUP)
  3617. place_entity(cfs_rq, se, 0);
  3618. check_schedstat_required();
  3619. update_stats_enqueue(cfs_rq, se, flags);
  3620. check_spread(cfs_rq, se);
  3621. if (!curr)
  3622. __enqueue_entity(cfs_rq, se);
  3623. se->on_rq = 1;
  3624. /*
  3625. * When bandwidth control is enabled, cfs might have been removed
  3626. * because of a parent been throttled but cfs->nr_running > 1. Try to
  3627. * add it unconditionnally.
  3628. */
  3629. if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
  3630. list_add_leaf_cfs_rq(cfs_rq);
  3631. if (cfs_rq->nr_running == 1)
  3632. check_enqueue_throttle(cfs_rq);
  3633. }
  3634. static void __clear_buddies_last(struct sched_entity *se)
  3635. {
  3636. for_each_sched_entity(se) {
  3637. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  3638. if (cfs_rq->last != se)
  3639. break;
  3640. cfs_rq->last = NULL;
  3641. }
  3642. }
  3643. static void __clear_buddies_next(struct sched_entity *se)
  3644. {
  3645. for_each_sched_entity(se) {
  3646. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  3647. if (cfs_rq->next != se)
  3648. break;
  3649. cfs_rq->next = NULL;
  3650. }
  3651. }
  3652. static void __clear_buddies_skip(struct sched_entity *se)
  3653. {
  3654. for_each_sched_entity(se) {
  3655. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  3656. if (cfs_rq->skip != se)
  3657. break;
  3658. cfs_rq->skip = NULL;
  3659. }
  3660. }
  3661. static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
  3662. {
  3663. if (cfs_rq->last == se)
  3664. __clear_buddies_last(se);
  3665. if (cfs_rq->next == se)
  3666. __clear_buddies_next(se);
  3667. if (cfs_rq->skip == se)
  3668. __clear_buddies_skip(se);
  3669. }
  3670. static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
  3671. static void
  3672. dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  3673. {
  3674. /*
  3675. * Update run-time statistics of the 'current'.
  3676. */
  3677. update_curr(cfs_rq);
  3678. /*
  3679. * When dequeuing a sched_entity, we must:
  3680. * - Update loads to have both entity and cfs_rq synced with now.
  3681. * - Subtract its load from the cfs_rq->runnable_avg.
  3682. * - Subtract its previous weight from cfs_rq->load.weight.
  3683. * - For group entity, update its weight to reflect the new share
  3684. * of its group cfs_rq.
  3685. */
  3686. update_load_avg(cfs_rq, se, UPDATE_TG);
  3687. se_update_runnable(se);
  3688. update_stats_dequeue(cfs_rq, se, flags);
  3689. clear_buddies(cfs_rq, se);
  3690. if (se != cfs_rq->curr)
  3691. __dequeue_entity(cfs_rq, se);
  3692. se->on_rq = 0;
  3693. account_entity_dequeue(cfs_rq, se);
  3694. /*
  3695. * Normalize after update_curr(); which will also have moved
  3696. * min_vruntime if @se is the one holding it back. But before doing
  3697. * update_min_vruntime() again, which will discount @se's position and
  3698. * can move min_vruntime forward still more.
  3699. */
  3700. if (!(flags & DEQUEUE_SLEEP))
  3701. se->vruntime -= cfs_rq->min_vruntime;
  3702. /* return excess runtime on last dequeue */
  3703. return_cfs_rq_runtime(cfs_rq);
  3704. update_cfs_group(se);
  3705. /*
  3706. * Now advance min_vruntime if @se was the entity holding it back,
  3707. * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
  3708. * put back on, and if we advance min_vruntime, we'll be placed back
  3709. * further than we started -- ie. we'll be penalized.
  3710. */
  3711. if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
  3712. update_min_vruntime(cfs_rq);
  3713. }
  3714. /*
  3715. * Preempt the current task with a newly woken task if needed:
  3716. */
  3717. static void
  3718. check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  3719. {
  3720. unsigned long ideal_runtime, delta_exec;
  3721. struct sched_entity *se;
  3722. s64 delta;
  3723. bool skip_preempt = false;
  3724. ideal_runtime = sched_slice(cfs_rq, curr);
  3725. delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  3726. trace_android_rvh_check_preempt_tick(current, &ideal_runtime, &skip_preempt,
  3727. delta_exec, cfs_rq, curr, sysctl_sched_min_granularity);
  3728. if (skip_preempt)
  3729. return;
  3730. if (delta_exec > ideal_runtime) {
  3731. resched_curr(rq_of(cfs_rq));
  3732. /*
  3733. * The current task ran long enough, ensure it doesn't get
  3734. * re-elected due to buddy favours.
  3735. */
  3736. clear_buddies(cfs_rq, curr);
  3737. return;
  3738. }
  3739. /*
  3740. * Ensure that a task that missed wakeup preemption by a
  3741. * narrow margin doesn't have to wait for a full slice.
  3742. * This also mitigates buddy induced latencies under load.
  3743. */
  3744. if (delta_exec < sysctl_sched_min_granularity)
  3745. return;
  3746. se = __pick_first_entity(cfs_rq);
  3747. delta = curr->vruntime - se->vruntime;
  3748. if (delta < 0)
  3749. return;
  3750. if (delta > ideal_runtime)
  3751. resched_curr(rq_of(cfs_rq));
  3752. }
  3753. void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  3754. {
  3755. /* 'current' is not kept within the tree. */
  3756. if (se->on_rq) {
  3757. /*
  3758. * Any task has to be enqueued before it get to execute on
  3759. * a CPU. So account for the time it spent waiting on the
  3760. * runqueue.
  3761. */
  3762. update_stats_wait_end(cfs_rq, se);
  3763. __dequeue_entity(cfs_rq, se);
  3764. update_load_avg(cfs_rq, se, UPDATE_TG);
  3765. }
  3766. update_stats_curr_start(cfs_rq, se);
  3767. cfs_rq->curr = se;
  3768. /*
  3769. * Track our maximum slice length, if the CPU's load is at
  3770. * least twice that of our own weight (i.e. dont track it
  3771. * when there are only lesser-weight tasks around):
  3772. */
  3773. if (schedstat_enabled() &&
  3774. rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
  3775. schedstat_set(se->statistics.slice_max,
  3776. max((u64)schedstat_val(se->statistics.slice_max),
  3777. se->sum_exec_runtime - se->prev_sum_exec_runtime));
  3778. }
  3779. se->prev_sum_exec_runtime = se->sum_exec_runtime;
  3780. }
  3781. EXPORT_SYMBOL_GPL(set_next_entity);
  3782. static int
  3783. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
  3784. /*
  3785. * Pick the next process, keeping these things in mind, in this order:
  3786. * 1) keep things fair between processes/task groups
  3787. * 2) pick the "next" process, since someone really wants that to run
  3788. * 3) pick the "last" process, for cache locality
  3789. * 4) do not run the "skip" process, if something else is available
  3790. */
  3791. static struct sched_entity *
  3792. pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  3793. {
  3794. struct sched_entity *left = __pick_first_entity(cfs_rq);
  3795. struct sched_entity *se = NULL;
  3796. trace_android_rvh_pick_next_entity(cfs_rq, curr, &se);
  3797. if (se)
  3798. goto done;
  3799. /*
  3800. * If curr is set we have to see if its left of the leftmost entity
  3801. * still in the tree, provided there was anything in the tree at all.
  3802. */
  3803. if (!left || (curr && entity_before(curr, left)))
  3804. left = curr;
  3805. se = left; /* ideally we run the leftmost entity */
  3806. /*
  3807. * Avoid running the skip buddy, if running something else can
  3808. * be done without getting too unfair.
  3809. */
  3810. if (cfs_rq->skip == se) {
  3811. struct sched_entity *second;
  3812. if (se == curr) {
  3813. second = __pick_first_entity(cfs_rq);
  3814. } else {
  3815. second = __pick_next_entity(se);
  3816. if (!second || (curr && entity_before(curr, second)))
  3817. second = curr;
  3818. }
  3819. if (second && wakeup_preempt_entity(second, left) < 1)
  3820. se = second;
  3821. }
  3822. if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) {
  3823. /*
  3824. * Someone really wants this to run. If it's not unfair, run it.
  3825. */
  3826. se = cfs_rq->next;
  3827. } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) {
  3828. /*
  3829. * Prefer last buddy, try to return the CPU to a preempted task.
  3830. */
  3831. se = cfs_rq->last;
  3832. }
  3833. done:
  3834. clear_buddies(cfs_rq, se);
  3835. return se;
  3836. }
  3837. static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
  3838. static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
  3839. {
  3840. /*
  3841. * If still on the runqueue then deactivate_task()
  3842. * was not called and update_curr() has to be done:
  3843. */
  3844. if (prev->on_rq)
  3845. update_curr(cfs_rq);
  3846. /* throttle cfs_rqs exceeding runtime */
  3847. check_cfs_rq_runtime(cfs_rq);
  3848. check_spread(cfs_rq, prev);
  3849. if (prev->on_rq) {
  3850. update_stats_wait_start(cfs_rq, prev);
  3851. /* Put 'current' back into the tree. */
  3852. __enqueue_entity(cfs_rq, prev);
  3853. /* in !on_rq case, update occurred at dequeue */
  3854. update_load_avg(cfs_rq, prev, 0);
  3855. }
  3856. cfs_rq->curr = NULL;
  3857. }
  3858. static void
  3859. entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  3860. {
  3861. /*
  3862. * Update run-time statistics of the 'current'.
  3863. */
  3864. update_curr(cfs_rq);
  3865. /*
  3866. * Ensure that runnable average is periodically updated.
  3867. */
  3868. update_load_avg(cfs_rq, curr, UPDATE_TG);
  3869. update_cfs_group(curr);
  3870. #ifdef CONFIG_SCHED_HRTICK
  3871. /*
  3872. * queued ticks are scheduled to match the slice, so don't bother
  3873. * validating it and just reschedule.
  3874. */
  3875. if (queued) {
  3876. resched_curr(rq_of(cfs_rq));
  3877. return;
  3878. }
  3879. /*
  3880. * don't let the period tick interfere with the hrtick preemption
  3881. */
  3882. if (!sched_feat(DOUBLE_TICK) &&
  3883. hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
  3884. return;
  3885. #endif
  3886. if (cfs_rq->nr_running > 1)
  3887. check_preempt_tick(cfs_rq, curr);
  3888. }
  3889. /**************************************************
  3890. * CFS bandwidth control machinery
  3891. */
  3892. #ifdef CONFIG_CFS_BANDWIDTH
  3893. #ifdef CONFIG_JUMP_LABEL
  3894. static struct static_key __cfs_bandwidth_used;
  3895. static inline bool cfs_bandwidth_used(void)
  3896. {
  3897. return static_key_false(&__cfs_bandwidth_used);
  3898. }
  3899. void cfs_bandwidth_usage_inc(void)
  3900. {
  3901. static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
  3902. }
  3903. void cfs_bandwidth_usage_dec(void)
  3904. {
  3905. static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
  3906. }
  3907. #else /* CONFIG_JUMP_LABEL */
  3908. static bool cfs_bandwidth_used(void)
  3909. {
  3910. return true;
  3911. }
  3912. void cfs_bandwidth_usage_inc(void) {}
  3913. void cfs_bandwidth_usage_dec(void) {}
  3914. #endif /* CONFIG_JUMP_LABEL */
  3915. /*
  3916. * default period for cfs group bandwidth.
  3917. * default: 0.1s, units: nanoseconds
  3918. */
  3919. static inline u64 default_cfs_period(void)
  3920. {
  3921. return 100000000ULL;
  3922. }
  3923. static inline u64 sched_cfs_bandwidth_slice(void)
  3924. {
  3925. return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
  3926. }
  3927. /*
  3928. * Replenish runtime according to assigned quota. We use sched_clock_cpu
  3929. * directly instead of rq->clock to avoid adding additional synchronization
  3930. * around rq->lock.
  3931. *
  3932. * requires cfs_b->lock
  3933. */
  3934. void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
  3935. {
  3936. if (cfs_b->quota != RUNTIME_INF)
  3937. cfs_b->runtime = cfs_b->quota;
  3938. }
  3939. static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
  3940. {
  3941. return &tg->cfs_bandwidth;
  3942. }
  3943. /* returns 0 on failure to allocate runtime */
  3944. static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
  3945. struct cfs_rq *cfs_rq, u64 target_runtime)
  3946. {
  3947. u64 min_amount, amount = 0;
  3948. lockdep_assert_held(&cfs_b->lock);
  3949. /* note: this is a positive sum as runtime_remaining <= 0 */
  3950. min_amount = target_runtime - cfs_rq->runtime_remaining;
  3951. if (cfs_b->quota == RUNTIME_INF)
  3952. amount = min_amount;
  3953. else {
  3954. start_cfs_bandwidth(cfs_b);
  3955. if (cfs_b->runtime > 0) {
  3956. amount = min(cfs_b->runtime, min_amount);
  3957. cfs_b->runtime -= amount;
  3958. cfs_b->idle = 0;
  3959. }
  3960. }
  3961. cfs_rq->runtime_remaining += amount;
  3962. return cfs_rq->runtime_remaining > 0;
  3963. }
  3964. /* returns 0 on failure to allocate runtime */
  3965. static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  3966. {
  3967. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  3968. int ret;
  3969. raw_spin_lock(&cfs_b->lock);
  3970. ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
  3971. raw_spin_unlock(&cfs_b->lock);
  3972. return ret;
  3973. }
  3974. static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
  3975. {
  3976. /* dock delta_exec before expiring quota (as it could span periods) */
  3977. cfs_rq->runtime_remaining -= delta_exec;
  3978. if (likely(cfs_rq->runtime_remaining > 0))
  3979. return;
  3980. if (cfs_rq->throttled)
  3981. return;
  3982. /*
  3983. * if we're unable to extend our runtime we resched so that the active
  3984. * hierarchy can be throttled
  3985. */
  3986. if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
  3987. resched_curr(rq_of(cfs_rq));
  3988. }
  3989. static __always_inline
  3990. void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
  3991. {
  3992. if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
  3993. return;
  3994. __account_cfs_rq_runtime(cfs_rq, delta_exec);
  3995. }
  3996. static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
  3997. {
  3998. return cfs_bandwidth_used() && cfs_rq->throttled;
  3999. }
  4000. /* check whether cfs_rq, or any parent, is throttled */
  4001. static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
  4002. {
  4003. return cfs_bandwidth_used() && cfs_rq->throttle_count;
  4004. }
  4005. /*
  4006. * Ensure that neither of the group entities corresponding to src_cpu or
  4007. * dest_cpu are members of a throttled hierarchy when performing group
  4008. * load-balance operations.
  4009. */
  4010. static inline int throttled_lb_pair(struct task_group *tg,
  4011. int src_cpu, int dest_cpu)
  4012. {
  4013. struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
  4014. src_cfs_rq = tg->cfs_rq[src_cpu];
  4015. dest_cfs_rq = tg->cfs_rq[dest_cpu];
  4016. return throttled_hierarchy(src_cfs_rq) ||
  4017. throttled_hierarchy(dest_cfs_rq);
  4018. }
  4019. static int tg_unthrottle_up(struct task_group *tg, void *data)
  4020. {
  4021. struct rq *rq = data;
  4022. struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
  4023. cfs_rq->throttle_count--;
  4024. if (!cfs_rq->throttle_count) {
  4025. cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
  4026. cfs_rq->throttled_clock_task;
  4027. /* Add cfs_rq with already running entity in the list */
  4028. if (cfs_rq->nr_running >= 1)
  4029. list_add_leaf_cfs_rq(cfs_rq);
  4030. }
  4031. return 0;
  4032. }
  4033. static int tg_throttle_down(struct task_group *tg, void *data)
  4034. {
  4035. struct rq *rq = data;
  4036. struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
  4037. /* group is entering throttled state, stop time */
  4038. if (!cfs_rq->throttle_count) {
  4039. cfs_rq->throttled_clock_task = rq_clock_task(rq);
  4040. list_del_leaf_cfs_rq(cfs_rq);
  4041. }
  4042. cfs_rq->throttle_count++;
  4043. return 0;
  4044. }
  4045. static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
  4046. {
  4047. struct rq *rq = rq_of(cfs_rq);
  4048. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  4049. struct sched_entity *se;
  4050. long task_delta, idle_task_delta, dequeue = 1;
  4051. raw_spin_lock(&cfs_b->lock);
  4052. /* This will start the period timer if necessary */
  4053. if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
  4054. /*
  4055. * We have raced with bandwidth becoming available, and if we
  4056. * actually throttled the timer might not unthrottle us for an
  4057. * entire period. We additionally needed to make sure that any
  4058. * subsequent check_cfs_rq_runtime calls agree not to throttle
  4059. * us, as we may commit to do cfs put_prev+pick_next, so we ask
  4060. * for 1ns of runtime rather than just check cfs_b.
  4061. */
  4062. dequeue = 0;
  4063. } else {
  4064. list_add_tail_rcu(&cfs_rq->throttled_list,
  4065. &cfs_b->throttled_cfs_rq);
  4066. }
  4067. raw_spin_unlock(&cfs_b->lock);
  4068. if (!dequeue)
  4069. return false; /* Throttle no longer required. */
  4070. se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
  4071. /* freeze hierarchy runnable averages while throttled */
  4072. rcu_read_lock();
  4073. walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
  4074. rcu_read_unlock();
  4075. task_delta = cfs_rq->h_nr_running;
  4076. idle_task_delta = cfs_rq->idle_h_nr_running;
  4077. for_each_sched_entity(se) {
  4078. struct cfs_rq *qcfs_rq = cfs_rq_of(se);
  4079. /* throttled entity or throttle-on-deactivate */
  4080. if (!se->on_rq)
  4081. break;
  4082. if (dequeue) {
  4083. dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
  4084. } else {
  4085. update_load_avg(qcfs_rq, se, 0);
  4086. se_update_runnable(se);
  4087. }
  4088. qcfs_rq->h_nr_running -= task_delta;
  4089. qcfs_rq->idle_h_nr_running -= idle_task_delta;
  4090. if (qcfs_rq->load.weight)
  4091. dequeue = 0;
  4092. }
  4093. if (!se)
  4094. sub_nr_running(rq, task_delta);
  4095. /*
  4096. * Note: distribution will already see us throttled via the
  4097. * throttled-list. rq->lock protects completion.
  4098. */
  4099. cfs_rq->throttled = 1;
  4100. cfs_rq->throttled_clock = rq_clock(rq);
  4101. return true;
  4102. }
  4103. void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
  4104. {
  4105. struct rq *rq = rq_of(cfs_rq);
  4106. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  4107. struct sched_entity *se;
  4108. long task_delta, idle_task_delta;
  4109. se = cfs_rq->tg->se[cpu_of(rq)];
  4110. cfs_rq->throttled = 0;
  4111. update_rq_clock(rq);
  4112. raw_spin_lock(&cfs_b->lock);
  4113. cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
  4114. list_del_rcu(&cfs_rq->throttled_list);
  4115. raw_spin_unlock(&cfs_b->lock);
  4116. /* update hierarchical throttle state */
  4117. walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
  4118. if (!cfs_rq->load.weight)
  4119. return;
  4120. task_delta = cfs_rq->h_nr_running;
  4121. idle_task_delta = cfs_rq->idle_h_nr_running;
  4122. for_each_sched_entity(se) {
  4123. if (se->on_rq)
  4124. break;
  4125. cfs_rq = cfs_rq_of(se);
  4126. enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
  4127. cfs_rq->h_nr_running += task_delta;
  4128. cfs_rq->idle_h_nr_running += idle_task_delta;
  4129. /* end evaluation on encountering a throttled cfs_rq */
  4130. if (cfs_rq_throttled(cfs_rq))
  4131. goto unthrottle_throttle;
  4132. }
  4133. for_each_sched_entity(se) {
  4134. cfs_rq = cfs_rq_of(se);
  4135. update_load_avg(cfs_rq, se, UPDATE_TG);
  4136. se_update_runnable(se);
  4137. cfs_rq->h_nr_running += task_delta;
  4138. cfs_rq->idle_h_nr_running += idle_task_delta;
  4139. /* end evaluation on encountering a throttled cfs_rq */
  4140. if (cfs_rq_throttled(cfs_rq))
  4141. goto unthrottle_throttle;
  4142. /*
  4143. * One parent has been throttled and cfs_rq removed from the
  4144. * list. Add it back to not break the leaf list.
  4145. */
  4146. if (throttled_hierarchy(cfs_rq))
  4147. list_add_leaf_cfs_rq(cfs_rq);
  4148. }
  4149. /* At this point se is NULL and we are at root level*/
  4150. add_nr_running(rq, task_delta);
  4151. unthrottle_throttle:
  4152. /*
  4153. * The cfs_rq_throttled() breaks in the above iteration can result in
  4154. * incomplete leaf list maintenance, resulting in triggering the
  4155. * assertion below.
  4156. */
  4157. for_each_sched_entity(se) {
  4158. cfs_rq = cfs_rq_of(se);
  4159. if (list_add_leaf_cfs_rq(cfs_rq))
  4160. break;
  4161. }
  4162. assert_list_leaf_cfs_rq(rq);
  4163. /* Determine whether we need to wake up potentially idle CPU: */
  4164. if (rq->curr == rq->idle && rq->cfs.nr_running)
  4165. resched_curr(rq);
  4166. }
  4167. static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
  4168. {
  4169. struct cfs_rq *cfs_rq;
  4170. u64 runtime, remaining = 1;
  4171. rcu_read_lock();
  4172. list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
  4173. throttled_list) {
  4174. struct rq *rq = rq_of(cfs_rq);
  4175. struct rq_flags rf;
  4176. rq_lock_irqsave(rq, &rf);
  4177. if (!cfs_rq_throttled(cfs_rq))
  4178. goto next;
  4179. /* By the above check, this should never be true */
  4180. SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
  4181. raw_spin_lock(&cfs_b->lock);
  4182. runtime = -cfs_rq->runtime_remaining + 1;
  4183. if (runtime > cfs_b->runtime)
  4184. runtime = cfs_b->runtime;
  4185. cfs_b->runtime -= runtime;
  4186. remaining = cfs_b->runtime;
  4187. raw_spin_unlock(&cfs_b->lock);
  4188. cfs_rq->runtime_remaining += runtime;
  4189. /* we check whether we're throttled above */
  4190. if (cfs_rq->runtime_remaining > 0)
  4191. unthrottle_cfs_rq(cfs_rq);
  4192. next:
  4193. rq_unlock_irqrestore(rq, &rf);
  4194. if (!remaining)
  4195. break;
  4196. }
  4197. rcu_read_unlock();
  4198. }
  4199. /*
  4200. * Responsible for refilling a task_group's bandwidth and unthrottling its
  4201. * cfs_rqs as appropriate. If there has been no activity within the last
  4202. * period the timer is deactivated until scheduling resumes; cfs_b->idle is
  4203. * used to track this state.
  4204. */
  4205. static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
  4206. {
  4207. int throttled;
  4208. /* no need to continue the timer with no bandwidth constraint */
  4209. if (cfs_b->quota == RUNTIME_INF)
  4210. goto out_deactivate;
  4211. throttled = !list_empty(&cfs_b->throttled_cfs_rq);
  4212. cfs_b->nr_periods += overrun;
  4213. /*
  4214. * idle depends on !throttled (for the case of a large deficit), and if
  4215. * we're going inactive then everything else can be deferred
  4216. */
  4217. if (cfs_b->idle && !throttled)
  4218. goto out_deactivate;
  4219. __refill_cfs_bandwidth_runtime(cfs_b);
  4220. if (!throttled) {
  4221. /* mark as potentially idle for the upcoming period */
  4222. cfs_b->idle = 1;
  4223. return 0;
  4224. }
  4225. /* account preceding periods in which throttling occurred */
  4226. cfs_b->nr_throttled += overrun;
  4227. /*
  4228. * This check is repeated as we release cfs_b->lock while we unthrottle.
  4229. */
  4230. while (throttled && cfs_b->runtime > 0) {
  4231. raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
  4232. /* we can't nest cfs_b->lock while distributing bandwidth */
  4233. distribute_cfs_runtime(cfs_b);
  4234. raw_spin_lock_irqsave(&cfs_b->lock, flags);
  4235. throttled = !list_empty(&cfs_b->throttled_cfs_rq);
  4236. }
  4237. /*
  4238. * While we are ensured activity in the period following an
  4239. * unthrottle, this also covers the case in which the new bandwidth is
  4240. * insufficient to cover the existing bandwidth deficit. (Forcing the
  4241. * timer to remain active while there are any throttled entities.)
  4242. */
  4243. cfs_b->idle = 0;
  4244. return 0;
  4245. out_deactivate:
  4246. return 1;
  4247. }
  4248. /* a cfs_rq won't donate quota below this amount */
  4249. static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
  4250. /* minimum remaining period time to redistribute slack quota */
  4251. static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
  4252. /* how long we wait to gather additional slack before distributing */
  4253. static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
  4254. /*
  4255. * Are we near the end of the current quota period?
  4256. *
  4257. * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
  4258. * hrtimer base being cleared by hrtimer_start. In the case of
  4259. * migrate_hrtimers, base is never cleared, so we are fine.
  4260. */
  4261. static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
  4262. {
  4263. struct hrtimer *refresh_timer = &cfs_b->period_timer;
  4264. s64 remaining;
  4265. /* if the call-back is running a quota refresh is already occurring */
  4266. if (hrtimer_callback_running(refresh_timer))
  4267. return 1;
  4268. /* is a quota refresh about to occur? */
  4269. remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
  4270. if (remaining < (s64)min_expire)
  4271. return 1;
  4272. return 0;
  4273. }
  4274. static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
  4275. {
  4276. u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
  4277. /* if there's a quota refresh soon don't bother with slack */
  4278. if (runtime_refresh_within(cfs_b, min_left))
  4279. return;
  4280. /* don't push forwards an existing deferred unthrottle */
  4281. if (cfs_b->slack_started)
  4282. return;
  4283. cfs_b->slack_started = true;
  4284. hrtimer_start(&cfs_b->slack_timer,
  4285. ns_to_ktime(cfs_bandwidth_slack_period),
  4286. HRTIMER_MODE_REL);
  4287. }
  4288. /* we know any runtime found here is valid as update_curr() precedes return */
  4289. static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  4290. {
  4291. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  4292. s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
  4293. if (slack_runtime <= 0)
  4294. return;
  4295. raw_spin_lock(&cfs_b->lock);
  4296. if (cfs_b->quota != RUNTIME_INF) {
  4297. cfs_b->runtime += slack_runtime;
  4298. /* we are under rq->lock, defer unthrottling using a timer */
  4299. if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
  4300. !list_empty(&cfs_b->throttled_cfs_rq))
  4301. start_cfs_slack_bandwidth(cfs_b);
  4302. }
  4303. raw_spin_unlock(&cfs_b->lock);
  4304. /* even if it's not valid for return we don't want to try again */
  4305. cfs_rq->runtime_remaining -= slack_runtime;
  4306. }
  4307. static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  4308. {
  4309. if (!cfs_bandwidth_used())
  4310. return;
  4311. if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
  4312. return;
  4313. __return_cfs_rq_runtime(cfs_rq);
  4314. }
  4315. /*
  4316. * This is done with a timer (instead of inline with bandwidth return) since
  4317. * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
  4318. */
  4319. static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
  4320. {
  4321. u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
  4322. unsigned long flags;
  4323. /* confirm we're still not at a refresh boundary */
  4324. raw_spin_lock_irqsave(&cfs_b->lock, flags);
  4325. cfs_b->slack_started = false;
  4326. if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
  4327. raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
  4328. return;
  4329. }
  4330. if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
  4331. runtime = cfs_b->runtime;
  4332. raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
  4333. if (!runtime)
  4334. return;
  4335. distribute_cfs_runtime(cfs_b);
  4336. raw_spin_lock_irqsave(&cfs_b->lock, flags);
  4337. raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
  4338. }
  4339. /*
  4340. * When a group wakes up we want to make sure that its quota is not already
  4341. * expired/exceeded, otherwise it may be allowed to steal additional ticks of
  4342. * runtime as update_curr() throttling can not trigger until it's on-rq.
  4343. */
  4344. static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
  4345. {
  4346. if (!cfs_bandwidth_used())
  4347. return;
  4348. /* an active group must be handled by the update_curr()->put() path */
  4349. if (!cfs_rq->runtime_enabled || cfs_rq->curr)
  4350. return;
  4351. /* ensure the group is not already throttled */
  4352. if (cfs_rq_throttled(cfs_rq))
  4353. return;
  4354. /* update runtime allocation */
  4355. account_cfs_rq_runtime(cfs_rq, 0);
  4356. if (cfs_rq->runtime_remaining <= 0)
  4357. throttle_cfs_rq(cfs_rq);
  4358. }
  4359. static void sync_throttle(struct task_group *tg, int cpu)
  4360. {
  4361. struct cfs_rq *pcfs_rq, *cfs_rq;
  4362. if (!cfs_bandwidth_used())
  4363. return;
  4364. if (!tg->parent)
  4365. return;
  4366. cfs_rq = tg->cfs_rq[cpu];
  4367. pcfs_rq = tg->parent->cfs_rq[cpu];
  4368. cfs_rq->throttle_count = pcfs_rq->throttle_count;
  4369. cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
  4370. }
  4371. /* conditionally throttle active cfs_rq's from put_prev_entity() */
  4372. static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  4373. {
  4374. if (!cfs_bandwidth_used())
  4375. return false;
  4376. if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
  4377. return false;
  4378. /*
  4379. * it's possible for a throttled entity to be forced into a running
  4380. * state (e.g. set_curr_task), in this case we're finished.
  4381. */
  4382. if (cfs_rq_throttled(cfs_rq))
  4383. return true;
  4384. return throttle_cfs_rq(cfs_rq);
  4385. }
  4386. static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
  4387. {
  4388. struct cfs_bandwidth *cfs_b =
  4389. container_of(timer, struct cfs_bandwidth, slack_timer);
  4390. do_sched_cfs_slack_timer(cfs_b);
  4391. return HRTIMER_NORESTART;
  4392. }
  4393. extern const u64 max_cfs_quota_period;
  4394. static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
  4395. {
  4396. struct cfs_bandwidth *cfs_b =
  4397. container_of(timer, struct cfs_bandwidth, period_timer);
  4398. unsigned long flags;
  4399. int overrun;
  4400. int idle = 0;
  4401. int count = 0;
  4402. raw_spin_lock_irqsave(&cfs_b->lock, flags);
  4403. for (;;) {
  4404. overrun = hrtimer_forward_now(timer, cfs_b->period);
  4405. if (!overrun)
  4406. break;
  4407. idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
  4408. if (++count > 3) {
  4409. u64 new, old = ktime_to_ns(cfs_b->period);
  4410. /*
  4411. * Grow period by a factor of 2 to avoid losing precision.
  4412. * Precision loss in the quota/period ratio can cause __cfs_schedulable
  4413. * to fail.
  4414. */
  4415. new = old * 2;
  4416. if (new < max_cfs_quota_period) {
  4417. cfs_b->period = ns_to_ktime(new);
  4418. cfs_b->quota *= 2;
  4419. pr_warn_ratelimited(
  4420. "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
  4421. smp_processor_id(),
  4422. div_u64(new, NSEC_PER_USEC),
  4423. div_u64(cfs_b->quota, NSEC_PER_USEC));
  4424. } else {
  4425. pr_warn_ratelimited(
  4426. "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
  4427. smp_processor_id(),
  4428. div_u64(old, NSEC_PER_USEC),
  4429. div_u64(cfs_b->quota, NSEC_PER_USEC));
  4430. }
  4431. /* reset count so we don't come right back in here */
  4432. count = 0;
  4433. }
  4434. }
  4435. if (idle)
  4436. cfs_b->period_active = 0;
  4437. raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
  4438. return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  4439. }
  4440. void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
  4441. {
  4442. raw_spin_lock_init(&cfs_b->lock);
  4443. cfs_b->runtime = 0;
  4444. cfs_b->quota = RUNTIME_INF;
  4445. cfs_b->period = ns_to_ktime(default_cfs_period());
  4446. INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
  4447. hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
  4448. cfs_b->period_timer.function = sched_cfs_period_timer;
  4449. hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  4450. cfs_b->slack_timer.function = sched_cfs_slack_timer;
  4451. cfs_b->slack_started = false;
  4452. }
  4453. static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  4454. {
  4455. cfs_rq->runtime_enabled = 0;
  4456. INIT_LIST_HEAD(&cfs_rq->throttled_list);
  4457. }
  4458. void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
  4459. {
  4460. lockdep_assert_held(&cfs_b->lock);
  4461. if (cfs_b->period_active)
  4462. return;
  4463. cfs_b->period_active = 1;
  4464. hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
  4465. hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
  4466. }
  4467. static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
  4468. {
  4469. /* init_cfs_bandwidth() was not called */
  4470. if (!cfs_b->throttled_cfs_rq.next)
  4471. return;
  4472. hrtimer_cancel(&cfs_b->period_timer);
  4473. hrtimer_cancel(&cfs_b->slack_timer);
  4474. }
  4475. /*
  4476. * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
  4477. *
  4478. * The race is harmless, since modifying bandwidth settings of unhooked group
  4479. * bits doesn't do much.
  4480. */
  4481. /* cpu online calback */
  4482. static void __maybe_unused update_runtime_enabled(struct rq *rq)
  4483. {
  4484. struct task_group *tg;
  4485. lockdep_assert_held(&rq->lock);
  4486. rcu_read_lock();
  4487. list_for_each_entry_rcu(tg, &task_groups, list) {
  4488. struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
  4489. struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
  4490. raw_spin_lock(&cfs_b->lock);
  4491. cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
  4492. raw_spin_unlock(&cfs_b->lock);
  4493. }
  4494. rcu_read_unlock();
  4495. }
  4496. /* cpu offline callback */
  4497. static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
  4498. {
  4499. struct task_group *tg;
  4500. lockdep_assert_held(&rq->lock);
  4501. rcu_read_lock();
  4502. list_for_each_entry_rcu(tg, &task_groups, list) {
  4503. struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
  4504. if (!cfs_rq->runtime_enabled)
  4505. continue;
  4506. /*
  4507. * clock_task is not advancing so we just need to make sure
  4508. * there's some valid quota amount
  4509. */
  4510. cfs_rq->runtime_remaining = 1;
  4511. /*
  4512. * Offline rq is schedulable till CPU is completely disabled
  4513. * in take_cpu_down(), so we prevent new cfs throttling here.
  4514. */
  4515. cfs_rq->runtime_enabled = 0;
  4516. if (cfs_rq_throttled(cfs_rq))
  4517. unthrottle_cfs_rq(cfs_rq);
  4518. }
  4519. rcu_read_unlock();
  4520. }
  4521. #else /* CONFIG_CFS_BANDWIDTH */
  4522. static inline bool cfs_bandwidth_used(void)
  4523. {
  4524. return false;
  4525. }
  4526. static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
  4527. static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
  4528. static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
  4529. static inline void sync_throttle(struct task_group *tg, int cpu) {}
  4530. static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
  4531. static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
  4532. {
  4533. return 0;
  4534. }
  4535. static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
  4536. {
  4537. return 0;
  4538. }
  4539. static inline int throttled_lb_pair(struct task_group *tg,
  4540. int src_cpu, int dest_cpu)
  4541. {
  4542. return 0;
  4543. }
  4544. void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
  4545. #ifdef CONFIG_FAIR_GROUP_SCHED
  4546. static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
  4547. #endif
  4548. static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
  4549. {
  4550. return NULL;
  4551. }
  4552. static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
  4553. static inline void update_runtime_enabled(struct rq *rq) {}
  4554. static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
  4555. #endif /* CONFIG_CFS_BANDWIDTH */
  4556. /**************************************************
  4557. * CFS operations on tasks:
  4558. */
  4559. #ifdef CONFIG_SCHED_HRTICK
  4560. static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
  4561. {
  4562. struct sched_entity *se = &p->se;
  4563. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  4564. SCHED_WARN_ON(task_rq(p) != rq);
  4565. if (rq->cfs.h_nr_running > 1) {
  4566. u64 slice = sched_slice(cfs_rq, se);
  4567. u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
  4568. s64 delta = slice - ran;
  4569. if (delta < 0) {
  4570. if (rq->curr == p)
  4571. resched_curr(rq);
  4572. return;
  4573. }
  4574. hrtick_start(rq, delta);
  4575. }
  4576. }
  4577. /*
  4578. * called from enqueue/dequeue and updates the hrtick when the
  4579. * current task is from our class and nr_running is low enough
  4580. * to matter.
  4581. */
  4582. static void hrtick_update(struct rq *rq)
  4583. {
  4584. struct task_struct *curr = rq->curr;
  4585. if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
  4586. return;
  4587. if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
  4588. hrtick_start_fair(rq, curr);
  4589. }
  4590. #else /* !CONFIG_SCHED_HRTICK */
  4591. static inline void
  4592. hrtick_start_fair(struct rq *rq, struct task_struct *p)
  4593. {
  4594. }
  4595. static inline void hrtick_update(struct rq *rq)
  4596. {
  4597. }
  4598. #endif
  4599. #ifdef CONFIG_SMP
  4600. static inline unsigned long cpu_util(int cpu);
  4601. static inline bool cpu_overutilized(int cpu)
  4602. {
  4603. int overutilized = -1;
  4604. trace_android_rvh_cpu_overutilized(cpu, &overutilized);
  4605. if (overutilized != -1)
  4606. return overutilized;
  4607. return !fits_capacity(cpu_util(cpu), capacity_of(cpu));
  4608. }
  4609. static inline void update_overutilized_status(struct rq *rq)
  4610. {
  4611. if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
  4612. WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
  4613. trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
  4614. }
  4615. }
  4616. #else
  4617. static inline void update_overutilized_status(struct rq *rq) { }
  4618. #endif
  4619. /* Runqueue only has SCHED_IDLE tasks enqueued */
  4620. static int sched_idle_rq(struct rq *rq)
  4621. {
  4622. return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
  4623. rq->nr_running);
  4624. }
  4625. #ifdef CONFIG_SMP
  4626. static int sched_idle_cpu(int cpu)
  4627. {
  4628. return sched_idle_rq(cpu_rq(cpu));
  4629. }
  4630. #endif
  4631. /*
  4632. * The enqueue_task method is called before nr_running is
  4633. * increased. Here we update the fair scheduling stats and
  4634. * then put the task into the rbtree:
  4635. */
  4636. static void
  4637. enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  4638. {
  4639. struct cfs_rq *cfs_rq;
  4640. struct sched_entity *se = &p->se;
  4641. int idle_h_nr_running = task_has_idle_policy(p);
  4642. int task_new = !(flags & ENQUEUE_WAKEUP);
  4643. int should_iowait_boost;
  4644. /*
  4645. * The code below (indirectly) updates schedutil which looks at
  4646. * the cfs_rq utilization to select a frequency.
  4647. * Let's add the task's estimated utilization to the cfs_rq's
  4648. * estimated utilization, before we update schedutil.
  4649. */
  4650. util_est_enqueue(&rq->cfs, p);
  4651. /*
  4652. * If in_iowait is set, the code below may not trigger any cpufreq
  4653. * utilization updates, so do it here explicitly with the IOWAIT flag
  4654. * passed.
  4655. */
  4656. should_iowait_boost = p->in_iowait;
  4657. trace_android_rvh_set_iowait(p, &should_iowait_boost);
  4658. if (should_iowait_boost)
  4659. cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
  4660. for_each_sched_entity(se) {
  4661. if (se->on_rq)
  4662. break;
  4663. cfs_rq = cfs_rq_of(se);
  4664. enqueue_entity(cfs_rq, se, flags);
  4665. cfs_rq->h_nr_running++;
  4666. cfs_rq->idle_h_nr_running += idle_h_nr_running;
  4667. /* end evaluation on encountering a throttled cfs_rq */
  4668. if (cfs_rq_throttled(cfs_rq))
  4669. goto enqueue_throttle;
  4670. flags = ENQUEUE_WAKEUP;
  4671. }
  4672. trace_android_rvh_enqueue_task_fair(rq, p, flags);
  4673. for_each_sched_entity(se) {
  4674. cfs_rq = cfs_rq_of(se);
  4675. update_load_avg(cfs_rq, se, UPDATE_TG);
  4676. se_update_runnable(se);
  4677. update_cfs_group(se);
  4678. cfs_rq->h_nr_running++;
  4679. cfs_rq->idle_h_nr_running += idle_h_nr_running;
  4680. /* end evaluation on encountering a throttled cfs_rq */
  4681. if (cfs_rq_throttled(cfs_rq))
  4682. goto enqueue_throttle;
  4683. /*
  4684. * One parent has been throttled and cfs_rq removed from the
  4685. * list. Add it back to not break the leaf list.
  4686. */
  4687. if (throttled_hierarchy(cfs_rq))
  4688. list_add_leaf_cfs_rq(cfs_rq);
  4689. }
  4690. /* At this point se is NULL and we are at root level*/
  4691. add_nr_running(rq, 1);
  4692. /*
  4693. * Since new tasks are assigned an initial util_avg equal to
  4694. * half of the spare capacity of their CPU, tiny tasks have the
  4695. * ability to cross the overutilized threshold, which will
  4696. * result in the load balancer ruining all the task placement
  4697. * done by EAS. As a way to mitigate that effect, do not account
  4698. * for the first enqueue operation of new tasks during the
  4699. * overutilized flag detection.
  4700. *
  4701. * A better way of solving this problem would be to wait for
  4702. * the PELT signals of tasks to converge before taking them
  4703. * into account, but that is not straightforward to implement,
  4704. * and the following generally works well enough in practice.
  4705. */
  4706. if (!task_new)
  4707. update_overutilized_status(rq);
  4708. enqueue_throttle:
  4709. if (cfs_bandwidth_used()) {
  4710. /*
  4711. * When bandwidth control is enabled; the cfs_rq_throttled()
  4712. * breaks in the above iteration can result in incomplete
  4713. * leaf list maintenance, resulting in triggering the assertion
  4714. * below.
  4715. */
  4716. for_each_sched_entity(se) {
  4717. cfs_rq = cfs_rq_of(se);
  4718. if (list_add_leaf_cfs_rq(cfs_rq))
  4719. break;
  4720. }
  4721. }
  4722. assert_list_leaf_cfs_rq(rq);
  4723. hrtick_update(rq);
  4724. }
  4725. static void set_next_buddy(struct sched_entity *se);
  4726. /*
  4727. * The dequeue_task method is called before nr_running is
  4728. * decreased. We remove the task from the rbtree and
  4729. * update the fair scheduling stats:
  4730. */
  4731. static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  4732. {
  4733. struct cfs_rq *cfs_rq;
  4734. struct sched_entity *se = &p->se;
  4735. int task_sleep = flags & DEQUEUE_SLEEP;
  4736. int idle_h_nr_running = task_has_idle_policy(p);
  4737. bool was_sched_idle = sched_idle_rq(rq);
  4738. util_est_dequeue(&rq->cfs, p);
  4739. for_each_sched_entity(se) {
  4740. cfs_rq = cfs_rq_of(se);
  4741. dequeue_entity(cfs_rq, se, flags);
  4742. cfs_rq->h_nr_running--;
  4743. cfs_rq->idle_h_nr_running -= idle_h_nr_running;
  4744. /* end evaluation on encountering a throttled cfs_rq */
  4745. if (cfs_rq_throttled(cfs_rq))
  4746. goto dequeue_throttle;
  4747. /* Don't dequeue parent if it has other entities besides us */
  4748. if (cfs_rq->load.weight) {
  4749. /* Avoid re-evaluating load for this entity: */
  4750. se = parent_entity(se);
  4751. /*
  4752. * Bias pick_next to pick a task from this cfs_rq, as
  4753. * p is sleeping when it is within its sched_slice.
  4754. */
  4755. if (task_sleep && se && !throttled_hierarchy(cfs_rq))
  4756. set_next_buddy(se);
  4757. break;
  4758. }
  4759. flags |= DEQUEUE_SLEEP;
  4760. }
  4761. trace_android_rvh_dequeue_task_fair(rq, p, flags);
  4762. for_each_sched_entity(se) {
  4763. cfs_rq = cfs_rq_of(se);
  4764. update_load_avg(cfs_rq, se, UPDATE_TG);
  4765. se_update_runnable(se);
  4766. update_cfs_group(se);
  4767. cfs_rq->h_nr_running--;
  4768. cfs_rq->idle_h_nr_running -= idle_h_nr_running;
  4769. /* end evaluation on encountering a throttled cfs_rq */
  4770. if (cfs_rq_throttled(cfs_rq))
  4771. goto dequeue_throttle;
  4772. }
  4773. /* At this point se is NULL and we are at root level*/
  4774. sub_nr_running(rq, 1);
  4775. /* balance early to pull high priority tasks */
  4776. if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
  4777. rq->next_balance = jiffies;
  4778. dequeue_throttle:
  4779. util_est_update(&rq->cfs, p, task_sleep);
  4780. hrtick_update(rq);
  4781. }
  4782. #ifdef CONFIG_SMP
  4783. /* Working cpumask for: load_balance, load_balance_newidle. */
  4784. DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
  4785. DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
  4786. #ifdef CONFIG_NO_HZ_COMMON
  4787. static struct {
  4788. cpumask_var_t idle_cpus_mask;
  4789. atomic_t nr_cpus;
  4790. int has_blocked; /* Idle CPUS has blocked load */
  4791. unsigned long next_balance; /* in jiffy units */
  4792. unsigned long next_blocked; /* Next update of blocked load in jiffies */
  4793. } nohz ____cacheline_aligned;
  4794. #endif /* CONFIG_NO_HZ_COMMON */
  4795. static unsigned long cpu_load(struct rq *rq)
  4796. {
  4797. return cfs_rq_load_avg(&rq->cfs);
  4798. }
  4799. /*
  4800. * cpu_load_without - compute CPU load without any contributions from *p
  4801. * @cpu: the CPU which load is requested
  4802. * @p: the task which load should be discounted
  4803. *
  4804. * The load of a CPU is defined by the load of tasks currently enqueued on that
  4805. * CPU as well as tasks which are currently sleeping after an execution on that
  4806. * CPU.
  4807. *
  4808. * This method returns the load of the specified CPU by discounting the load of
  4809. * the specified task, whenever the task is currently contributing to the CPU
  4810. * load.
  4811. */
  4812. static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
  4813. {
  4814. struct cfs_rq *cfs_rq;
  4815. unsigned int load;
  4816. /* Task has no contribution or is new */
  4817. if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
  4818. return cpu_load(rq);
  4819. cfs_rq = &rq->cfs;
  4820. load = READ_ONCE(cfs_rq->avg.load_avg);
  4821. /* Discount task's util from CPU's util */
  4822. lsub_positive(&load, task_h_load(p));
  4823. return load;
  4824. }
  4825. static unsigned long cpu_runnable(struct rq *rq)
  4826. {
  4827. return cfs_rq_runnable_avg(&rq->cfs);
  4828. }
  4829. static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
  4830. {
  4831. struct cfs_rq *cfs_rq;
  4832. unsigned int runnable;
  4833. /* Task has no contribution or is new */
  4834. if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
  4835. return cpu_runnable(rq);
  4836. cfs_rq = &rq->cfs;
  4837. runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
  4838. /* Discount task's runnable from CPU's runnable */
  4839. lsub_positive(&runnable, p->se.avg.runnable_avg);
  4840. return runnable;
  4841. }
  4842. static unsigned long capacity_of(int cpu)
  4843. {
  4844. return cpu_rq(cpu)->cpu_capacity;
  4845. }
  4846. static void record_wakee(struct task_struct *p)
  4847. {
  4848. /*
  4849. * Only decay a single time; tasks that have less then 1 wakeup per
  4850. * jiffy will not have built up many flips.
  4851. */
  4852. if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
  4853. current->wakee_flips >>= 1;
  4854. current->wakee_flip_decay_ts = jiffies;
  4855. }
  4856. if (current->last_wakee != p) {
  4857. current->last_wakee = p;
  4858. current->wakee_flips++;
  4859. }
  4860. }
  4861. /*
  4862. * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
  4863. *
  4864. * A waker of many should wake a different task than the one last awakened
  4865. * at a frequency roughly N times higher than one of its wakees.
  4866. *
  4867. * In order to determine whether we should let the load spread vs consolidating
  4868. * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
  4869. * partner, and a factor of lls_size higher frequency in the other.
  4870. *
  4871. * With both conditions met, we can be relatively sure that the relationship is
  4872. * non-monogamous, with partner count exceeding socket size.
  4873. *
  4874. * Waker/wakee being client/server, worker/dispatcher, interrupt source or
  4875. * whatever is irrelevant, spread criteria is apparent partner count exceeds
  4876. * socket size.
  4877. */
  4878. static int wake_wide(struct task_struct *p)
  4879. {
  4880. unsigned int master = current->wakee_flips;
  4881. unsigned int slave = p->wakee_flips;
  4882. int factor = __this_cpu_read(sd_llc_size);
  4883. if (master < slave)
  4884. swap(master, slave);
  4885. if (slave < factor || master < slave * factor)
  4886. return 0;
  4887. return 1;
  4888. }
  4889. /*
  4890. * The purpose of wake_affine() is to quickly determine on which CPU we can run
  4891. * soonest. For the purpose of speed we only consider the waking and previous
  4892. * CPU.
  4893. *
  4894. * wake_affine_idle() - only considers 'now', it check if the waking CPU is
  4895. * cache-affine and is (or will be) idle.
  4896. *
  4897. * wake_affine_weight() - considers the weight to reflect the average
  4898. * scheduling latency of the CPUs. This seems to work
  4899. * for the overloaded case.
  4900. */
  4901. static int
  4902. wake_affine_idle(int this_cpu, int prev_cpu, int sync)
  4903. {
  4904. /*
  4905. * If this_cpu is idle, it implies the wakeup is from interrupt
  4906. * context. Only allow the move if cache is shared. Otherwise an
  4907. * interrupt intensive workload could force all tasks onto one
  4908. * node depending on the IO topology or IRQ affinity settings.
  4909. *
  4910. * If the prev_cpu is idle and cache affine then avoid a migration.
  4911. * There is no guarantee that the cache hot data from an interrupt
  4912. * is more important than cache hot data on the prev_cpu and from
  4913. * a cpufreq perspective, it's better to have higher utilisation
  4914. * on one CPU.
  4915. */
  4916. if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
  4917. return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
  4918. if (sync && cpu_rq(this_cpu)->nr_running == 1)
  4919. return this_cpu;
  4920. return nr_cpumask_bits;
  4921. }
  4922. static int
  4923. wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
  4924. int this_cpu, int prev_cpu, int sync)
  4925. {
  4926. s64 this_eff_load, prev_eff_load;
  4927. unsigned long task_load;
  4928. this_eff_load = cpu_load(cpu_rq(this_cpu));
  4929. if (sync) {
  4930. unsigned long current_load = task_h_load(current);
  4931. if (current_load > this_eff_load)
  4932. return this_cpu;
  4933. this_eff_load -= current_load;
  4934. }
  4935. task_load = task_h_load(p);
  4936. this_eff_load += task_load;
  4937. if (sched_feat(WA_BIAS))
  4938. this_eff_load *= 100;
  4939. this_eff_load *= capacity_of(prev_cpu);
  4940. prev_eff_load = cpu_load(cpu_rq(prev_cpu));
  4941. prev_eff_load -= task_load;
  4942. if (sched_feat(WA_BIAS))
  4943. prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
  4944. prev_eff_load *= capacity_of(this_cpu);
  4945. /*
  4946. * If sync, adjust the weight of prev_eff_load such that if
  4947. * prev_eff == this_eff that select_idle_sibling() will consider
  4948. * stacking the wakee on top of the waker if no other CPU is
  4949. * idle.
  4950. */
  4951. if (sync)
  4952. prev_eff_load += 1;
  4953. return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
  4954. }
  4955. static int wake_affine(struct sched_domain *sd, struct task_struct *p,
  4956. int this_cpu, int prev_cpu, int sync)
  4957. {
  4958. int target = nr_cpumask_bits;
  4959. if (sched_feat(WA_IDLE))
  4960. target = wake_affine_idle(this_cpu, prev_cpu, sync);
  4961. if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
  4962. target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
  4963. schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
  4964. if (target == nr_cpumask_bits)
  4965. return prev_cpu;
  4966. schedstat_inc(sd->ttwu_move_affine);
  4967. schedstat_inc(p->se.statistics.nr_wakeups_affine);
  4968. return target;
  4969. }
  4970. static struct sched_group *
  4971. find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
  4972. /*
  4973. * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
  4974. */
  4975. static int
  4976. find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  4977. {
  4978. unsigned long load, min_load = ULONG_MAX;
  4979. unsigned int min_exit_latency = UINT_MAX;
  4980. u64 latest_idle_timestamp = 0;
  4981. int least_loaded_cpu = this_cpu;
  4982. int shallowest_idle_cpu = -1;
  4983. int i;
  4984. /* Check if we have any choice: */
  4985. if (group->group_weight == 1)
  4986. return cpumask_first(sched_group_span(group));
  4987. /* Traverse only the allowed CPUs */
  4988. for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
  4989. if (sched_idle_cpu(i))
  4990. return i;
  4991. if (available_idle_cpu(i)) {
  4992. struct rq *rq = cpu_rq(i);
  4993. struct cpuidle_state *idle = idle_get_state(rq);
  4994. if (idle && idle->exit_latency < min_exit_latency) {
  4995. /*
  4996. * We give priority to a CPU whose idle state
  4997. * has the smallest exit latency irrespective
  4998. * of any idle timestamp.
  4999. */
  5000. min_exit_latency = idle->exit_latency;
  5001. latest_idle_timestamp = rq->idle_stamp;
  5002. shallowest_idle_cpu = i;
  5003. } else if ((!idle || idle->exit_latency == min_exit_latency) &&
  5004. rq->idle_stamp > latest_idle_timestamp) {
  5005. /*
  5006. * If equal or no active idle state, then
  5007. * the most recently idled CPU might have
  5008. * a warmer cache.
  5009. */
  5010. latest_idle_timestamp = rq->idle_stamp;
  5011. shallowest_idle_cpu = i;
  5012. }
  5013. } else if (shallowest_idle_cpu == -1) {
  5014. load = cpu_load(cpu_rq(i));
  5015. if (load < min_load) {
  5016. min_load = load;
  5017. least_loaded_cpu = i;
  5018. }
  5019. }
  5020. }
  5021. return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
  5022. }
  5023. static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
  5024. int cpu, int prev_cpu, int sd_flag)
  5025. {
  5026. int new_cpu = cpu;
  5027. if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
  5028. return prev_cpu;
  5029. /*
  5030. * We need task's util for cpu_util_without, sync it up to
  5031. * prev_cpu's last_update_time.
  5032. */
  5033. if (!(sd_flag & SD_BALANCE_FORK))
  5034. sync_entity_load_avg(&p->se);
  5035. while (sd) {
  5036. struct sched_group *group;
  5037. struct sched_domain *tmp;
  5038. int weight;
  5039. if (!(sd->flags & sd_flag)) {
  5040. sd = sd->child;
  5041. continue;
  5042. }
  5043. group = find_idlest_group(sd, p, cpu);
  5044. if (!group) {
  5045. sd = sd->child;
  5046. continue;
  5047. }
  5048. new_cpu = find_idlest_group_cpu(group, p, cpu);
  5049. if (new_cpu == cpu) {
  5050. /* Now try balancing at a lower domain level of 'cpu': */
  5051. sd = sd->child;
  5052. continue;
  5053. }
  5054. /* Now try balancing at a lower domain level of 'new_cpu': */
  5055. cpu = new_cpu;
  5056. weight = sd->span_weight;
  5057. sd = NULL;
  5058. for_each_domain(cpu, tmp) {
  5059. if (weight <= tmp->span_weight)
  5060. break;
  5061. if (tmp->flags & sd_flag)
  5062. sd = tmp;
  5063. }
  5064. }
  5065. return new_cpu;
  5066. }
  5067. #ifdef CONFIG_SCHED_SMT
  5068. DEFINE_STATIC_KEY_FALSE(sched_smt_present);
  5069. EXPORT_SYMBOL_GPL(sched_smt_present);
  5070. static inline void set_idle_cores(int cpu, int val)
  5071. {
  5072. struct sched_domain_shared *sds;
  5073. sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
  5074. if (sds)
  5075. WRITE_ONCE(sds->has_idle_cores, val);
  5076. }
  5077. static inline bool test_idle_cores(int cpu, bool def)
  5078. {
  5079. struct sched_domain_shared *sds;
  5080. sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
  5081. if (sds)
  5082. return READ_ONCE(sds->has_idle_cores);
  5083. return def;
  5084. }
  5085. /*
  5086. * Scans the local SMT mask to see if the entire core is idle, and records this
  5087. * information in sd_llc_shared->has_idle_cores.
  5088. *
  5089. * Since SMT siblings share all cache levels, inspecting this limited remote
  5090. * state should be fairly cheap.
  5091. */
  5092. void __update_idle_core(struct rq *rq)
  5093. {
  5094. int core = cpu_of(rq);
  5095. int cpu;
  5096. rcu_read_lock();
  5097. if (test_idle_cores(core, true))
  5098. goto unlock;
  5099. for_each_cpu(cpu, cpu_smt_mask(core)) {
  5100. if (cpu == core)
  5101. continue;
  5102. if (!available_idle_cpu(cpu))
  5103. goto unlock;
  5104. }
  5105. set_idle_cores(core, 1);
  5106. unlock:
  5107. rcu_read_unlock();
  5108. }
  5109. /*
  5110. * Scan the entire LLC domain for idle cores; this dynamically switches off if
  5111. * there are no idle cores left in the system; tracked through
  5112. * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
  5113. */
  5114. static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
  5115. {
  5116. struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
  5117. int core, cpu;
  5118. if (!static_branch_likely(&sched_smt_present))
  5119. return -1;
  5120. if (!test_idle_cores(target, false))
  5121. return -1;
  5122. cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
  5123. for_each_cpu_wrap(core, cpus, target) {
  5124. bool idle = true;
  5125. for_each_cpu(cpu, cpu_smt_mask(core)) {
  5126. if (!available_idle_cpu(cpu)) {
  5127. idle = false;
  5128. break;
  5129. }
  5130. }
  5131. cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
  5132. if (idle)
  5133. return core;
  5134. }
  5135. /*
  5136. * Failed to find an idle core; stop looking for one.
  5137. */
  5138. set_idle_cores(target, 0);
  5139. return -1;
  5140. }
  5141. /*
  5142. * Scan the local SMT mask for idle CPUs.
  5143. */
  5144. static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
  5145. {
  5146. int cpu;
  5147. if (!static_branch_likely(&sched_smt_present))
  5148. return -1;
  5149. for_each_cpu(cpu, cpu_smt_mask(target)) {
  5150. if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
  5151. !cpumask_test_cpu(cpu, sched_domain_span(sd)))
  5152. continue;
  5153. if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
  5154. return cpu;
  5155. }
  5156. return -1;
  5157. }
  5158. #else /* CONFIG_SCHED_SMT */
  5159. static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
  5160. {
  5161. return -1;
  5162. }
  5163. static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
  5164. {
  5165. return -1;
  5166. }
  5167. #endif /* CONFIG_SCHED_SMT */
  5168. /*
  5169. * Scan the LLC domain for idle CPUs; this is dynamically regulated by
  5170. * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
  5171. * average idle time for this rq (as found in rq->avg_idle).
  5172. */
  5173. static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
  5174. {
  5175. struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
  5176. struct sched_domain *this_sd;
  5177. u64 avg_cost, avg_idle;
  5178. u64 time;
  5179. int this = smp_processor_id();
  5180. int cpu, nr = INT_MAX;
  5181. this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
  5182. if (!this_sd)
  5183. return -1;
  5184. /*
  5185. * Due to large variance we need a large fuzz factor; hackbench in
  5186. * particularly is sensitive here.
  5187. */
  5188. avg_idle = this_rq()->avg_idle / 512;
  5189. avg_cost = this_sd->avg_scan_cost + 1;
  5190. if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
  5191. return -1;
  5192. if (sched_feat(SIS_PROP)) {
  5193. u64 span_avg = sd->span_weight * avg_idle;
  5194. if (span_avg > 4*avg_cost)
  5195. nr = div_u64(span_avg, avg_cost);
  5196. else
  5197. nr = 4;
  5198. }
  5199. time = cpu_clock(this);
  5200. cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
  5201. for_each_cpu_wrap(cpu, cpus, target) {
  5202. if (!--nr)
  5203. return -1;
  5204. if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
  5205. break;
  5206. }
  5207. time = cpu_clock(this) - time;
  5208. update_avg(&this_sd->avg_scan_cost, time);
  5209. return cpu;
  5210. }
  5211. /*
  5212. * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
  5213. * the task fits. If no CPU is big enough, but there are idle ones, try to
  5214. * maximize capacity.
  5215. */
  5216. static int
  5217. select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
  5218. {
  5219. unsigned long task_util, best_cap = 0;
  5220. int cpu, best_cpu = -1;
  5221. struct cpumask *cpus;
  5222. cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
  5223. cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
  5224. task_util = uclamp_task_util(p);
  5225. for_each_cpu_wrap(cpu, cpus, target) {
  5226. unsigned long cpu_cap = capacity_of(cpu);
  5227. if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
  5228. continue;
  5229. if (fits_capacity(task_util, cpu_cap))
  5230. return cpu;
  5231. if (cpu_cap > best_cap) {
  5232. best_cap = cpu_cap;
  5233. best_cpu = cpu;
  5234. }
  5235. }
  5236. return best_cpu;
  5237. }
  5238. static inline bool asym_fits_capacity(int task_util, int cpu)
  5239. {
  5240. if (static_branch_unlikely(&sched_asym_cpucapacity))
  5241. return fits_capacity(task_util, capacity_of(cpu));
  5242. return true;
  5243. }
  5244. /*
  5245. * Try and locate an idle core/thread in the LLC cache domain.
  5246. */
  5247. static int select_idle_sibling(struct task_struct *p, int prev, int target)
  5248. {
  5249. struct sched_domain *sd;
  5250. unsigned long task_util;
  5251. int i, recent_used_cpu;
  5252. /*
  5253. * On asymmetric system, update task utilization because we will check
  5254. * that the task fits with cpu's capacity.
  5255. */
  5256. if (static_branch_unlikely(&sched_asym_cpucapacity)) {
  5257. sync_entity_load_avg(&p->se);
  5258. task_util = uclamp_task_util(p);
  5259. }
  5260. if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
  5261. asym_fits_capacity(task_util, target))
  5262. return target;
  5263. /*
  5264. * If the previous CPU is cache affine and idle, don't be stupid:
  5265. */
  5266. if (prev != target && cpus_share_cache(prev, target) &&
  5267. (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
  5268. asym_fits_capacity(task_util, prev))
  5269. return prev;
  5270. /*
  5271. * Allow a per-cpu kthread to stack with the wakee if the
  5272. * kworker thread and the tasks previous CPUs are the same.
  5273. * The assumption is that the wakee queued work for the
  5274. * per-cpu kthread that is now complete and the wakeup is
  5275. * essentially a sync wakeup. An obvious example of this
  5276. * pattern is IO completions.
  5277. */
  5278. if (is_per_cpu_kthread(current) &&
  5279. in_task() &&
  5280. prev == smp_processor_id() &&
  5281. this_rq()->nr_running <= 1 &&
  5282. asym_fits_capacity(task_util, prev)) {
  5283. return prev;
  5284. }
  5285. /* Check a recently used CPU as a potential idle candidate: */
  5286. recent_used_cpu = p->recent_used_cpu;
  5287. if (recent_used_cpu != prev &&
  5288. recent_used_cpu != target &&
  5289. cpus_share_cache(recent_used_cpu, target) &&
  5290. (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
  5291. cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
  5292. asym_fits_capacity(task_util, recent_used_cpu)) {
  5293. /*
  5294. * Replace recent_used_cpu with prev as it is a potential
  5295. * candidate for the next wake:
  5296. */
  5297. p->recent_used_cpu = prev;
  5298. return recent_used_cpu;
  5299. }
  5300. /*
  5301. * For asymmetric CPU capacity systems, our domain of interest is
  5302. * sd_asym_cpucapacity rather than sd_llc.
  5303. */
  5304. if (static_branch_unlikely(&sched_asym_cpucapacity)) {
  5305. sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
  5306. /*
  5307. * On an asymmetric CPU capacity system where an exclusive
  5308. * cpuset defines a symmetric island (i.e. one unique
  5309. * capacity_orig value through the cpuset), the key will be set
  5310. * but the CPUs within that cpuset will not have a domain with
  5311. * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
  5312. * capacity path.
  5313. */
  5314. if (sd) {
  5315. i = select_idle_capacity(p, sd, target);
  5316. return ((unsigned)i < nr_cpumask_bits) ? i : target;
  5317. }
  5318. }
  5319. sd = rcu_dereference(per_cpu(sd_llc, target));
  5320. if (!sd)
  5321. return target;
  5322. i = select_idle_core(p, sd, target);
  5323. if ((unsigned)i < nr_cpumask_bits)
  5324. return i;
  5325. i = select_idle_cpu(p, sd, target);
  5326. if ((unsigned)i < nr_cpumask_bits)
  5327. return i;
  5328. i = select_idle_smt(p, sd, target);
  5329. if ((unsigned)i < nr_cpumask_bits)
  5330. return i;
  5331. return target;
  5332. }
  5333. /**
  5334. * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
  5335. * @cpu: the CPU to get the utilization of
  5336. *
  5337. * The unit of the return value must be the one of capacity so we can compare
  5338. * the utilization with the capacity of the CPU that is available for CFS task
  5339. * (ie cpu_capacity).
  5340. *
  5341. * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
  5342. * recent utilization of currently non-runnable tasks on a CPU. It represents
  5343. * the amount of utilization of a CPU in the range [0..capacity_orig] where
  5344. * capacity_orig is the cpu_capacity available at the highest frequency
  5345. * (arch_scale_freq_capacity()).
  5346. * The utilization of a CPU converges towards a sum equal to or less than the
  5347. * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
  5348. * the running time on this CPU scaled by capacity_curr.
  5349. *
  5350. * The estimated utilization of a CPU is defined to be the maximum between its
  5351. * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
  5352. * currently RUNNABLE on that CPU.
  5353. * This allows to properly represent the expected utilization of a CPU which
  5354. * has just got a big task running since a long sleep period. At the same time
  5355. * however it preserves the benefits of the "blocked utilization" in
  5356. * describing the potential for other tasks waking up on the same CPU.
  5357. *
  5358. * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
  5359. * higher than capacity_orig because of unfortunate rounding in
  5360. * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
  5361. * the average stabilizes with the new running time. We need to check that the
  5362. * utilization stays within the range of [0..capacity_orig] and cap it if
  5363. * necessary. Without utilization capping, a group could be seen as overloaded
  5364. * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
  5365. * available capacity. We allow utilization to overshoot capacity_curr (but not
  5366. * capacity_orig) as it useful for predicting the capacity required after task
  5367. * migrations (scheduler-driven DVFS).
  5368. *
  5369. * Return: the (estimated) utilization for the specified CPU
  5370. */
  5371. static inline unsigned long cpu_util(int cpu)
  5372. {
  5373. struct cfs_rq *cfs_rq;
  5374. unsigned int util;
  5375. cfs_rq = &cpu_rq(cpu)->cfs;
  5376. util = READ_ONCE(cfs_rq->avg.util_avg);
  5377. if (sched_feat(UTIL_EST))
  5378. util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
  5379. return min_t(unsigned long, util, capacity_orig_of(cpu));
  5380. }
  5381. /*
  5382. * cpu_util_without: compute cpu utilization without any contributions from *p
  5383. * @cpu: the CPU which utilization is requested
  5384. * @p: the task which utilization should be discounted
  5385. *
  5386. * The utilization of a CPU is defined by the utilization of tasks currently
  5387. * enqueued on that CPU as well as tasks which are currently sleeping after an
  5388. * execution on that CPU.
  5389. *
  5390. * This method returns the utilization of the specified CPU by discounting the
  5391. * utilization of the specified task, whenever the task is currently
  5392. * contributing to the CPU utilization.
  5393. */
  5394. static unsigned long cpu_util_without(int cpu, struct task_struct *p)
  5395. {
  5396. struct cfs_rq *cfs_rq;
  5397. unsigned int util;
  5398. /* Task has no contribution or is new */
  5399. if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
  5400. return cpu_util(cpu);
  5401. cfs_rq = &cpu_rq(cpu)->cfs;
  5402. util = READ_ONCE(cfs_rq->avg.util_avg);
  5403. /* Discount task's util from CPU's util */
  5404. lsub_positive(&util, task_util(p));
  5405. /*
  5406. * Covered cases:
  5407. *
  5408. * a) if *p is the only task sleeping on this CPU, then:
  5409. * cpu_util (== task_util) > util_est (== 0)
  5410. * and thus we return:
  5411. * cpu_util_without = (cpu_util - task_util) = 0
  5412. *
  5413. * b) if other tasks are SLEEPING on this CPU, which is now exiting
  5414. * IDLE, then:
  5415. * cpu_util >= task_util
  5416. * cpu_util > util_est (== 0)
  5417. * and thus we discount *p's blocked utilization to return:
  5418. * cpu_util_without = (cpu_util - task_util) >= 0
  5419. *
  5420. * c) if other tasks are RUNNABLE on that CPU and
  5421. * util_est > cpu_util
  5422. * then we use util_est since it returns a more restrictive
  5423. * estimation of the spare capacity on that CPU, by just
  5424. * considering the expected utilization of tasks already
  5425. * runnable on that CPU.
  5426. *
  5427. * Cases a) and b) are covered by the above code, while case c) is
  5428. * covered by the following code when estimated utilization is
  5429. * enabled.
  5430. */
  5431. if (sched_feat(UTIL_EST)) {
  5432. unsigned int estimated =
  5433. READ_ONCE(cfs_rq->avg.util_est.enqueued);
  5434. /*
  5435. * Despite the following checks we still have a small window
  5436. * for a possible race, when an execl's select_task_rq_fair()
  5437. * races with LB's detach_task():
  5438. *
  5439. * detach_task()
  5440. * p->on_rq = TASK_ON_RQ_MIGRATING;
  5441. * ---------------------------------- A
  5442. * deactivate_task() \
  5443. * dequeue_task() + RaceTime
  5444. * util_est_dequeue() /
  5445. * ---------------------------------- B
  5446. *
  5447. * The additional check on "current == p" it's required to
  5448. * properly fix the execl regression and it helps in further
  5449. * reducing the chances for the above race.
  5450. */
  5451. if (unlikely(task_on_rq_queued(p) || current == p))
  5452. lsub_positive(&estimated, _task_util_est(p));
  5453. util = max(util, estimated);
  5454. }
  5455. /*
  5456. * Utilization (estimated) can exceed the CPU capacity, thus let's
  5457. * clamp to the maximum CPU capacity to ensure consistency with
  5458. * the cpu_util call.
  5459. */
  5460. return min_t(unsigned long, util, capacity_orig_of(cpu));
  5461. }
  5462. /*
  5463. * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
  5464. * to @dst_cpu.
  5465. */
  5466. static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
  5467. {
  5468. struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
  5469. unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
  5470. /*
  5471. * If @p migrates from @cpu to another, remove its contribution. Or,
  5472. * if @p migrates from another CPU to @cpu, add its contribution. In
  5473. * the other cases, @cpu is not impacted by the migration, so the
  5474. * util_avg should already be correct.
  5475. */
  5476. if (task_cpu(p) == cpu && dst_cpu != cpu)
  5477. sub_positive(&util, task_util(p));
  5478. else if (task_cpu(p) != cpu && dst_cpu == cpu)
  5479. util += task_util(p);
  5480. if (sched_feat(UTIL_EST)) {
  5481. util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
  5482. /*
  5483. * During wake-up, the task isn't enqueued yet and doesn't
  5484. * appear in the cfs_rq->avg.util_est.enqueued of any rq,
  5485. * so just add it (if needed) to "simulate" what will be
  5486. * cpu_util() after the task has been enqueued.
  5487. */
  5488. if (dst_cpu == cpu)
  5489. util_est += _task_util_est(p);
  5490. util = max(util, util_est);
  5491. }
  5492. return min(util, capacity_orig_of(cpu));
  5493. }
  5494. /*
  5495. * compute_energy(): Estimates the energy that @pd would consume if @p was
  5496. * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
  5497. * landscape of @pd's CPUs after the task migration, and uses the Energy Model
  5498. * to compute what would be the energy if we decided to actually migrate that
  5499. * task.
  5500. */
  5501. static long
  5502. compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
  5503. {
  5504. struct cpumask *pd_mask = perf_domain_span(pd);
  5505. unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
  5506. unsigned long max_util = 0, sum_util = 0;
  5507. unsigned long energy = 0;
  5508. int cpu;
  5509. /*
  5510. * The capacity state of CPUs of the current rd can be driven by CPUs
  5511. * of another rd if they belong to the same pd. So, account for the
  5512. * utilization of these CPUs too by masking pd with cpu_online_mask
  5513. * instead of the rd span.
  5514. *
  5515. * If an entire pd is outside of the current rd, it will not appear in
  5516. * its pd list and will not be accounted by compute_energy().
  5517. */
  5518. for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
  5519. unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
  5520. struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
  5521. /*
  5522. * Busy time computation: utilization clamping is not
  5523. * required since the ratio (sum_util / cpu_capacity)
  5524. * is already enough to scale the EM reported power
  5525. * consumption at the (eventually clamped) cpu_capacity.
  5526. */
  5527. sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap,
  5528. ENERGY_UTIL, NULL);
  5529. /*
  5530. * Performance domain frequency: utilization clamping
  5531. * must be considered since it affects the selection
  5532. * of the performance domain frequency.
  5533. * NOTE: in case RT tasks are running, by default the
  5534. * FREQUENCY_UTIL's utilization can be max OPP.
  5535. */
  5536. cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap,
  5537. FREQUENCY_UTIL, tsk);
  5538. max_util = max(max_util, cpu_util);
  5539. }
  5540. trace_android_vh_em_cpu_energy(pd->em_pd, max_util, sum_util, &energy);
  5541. if (!energy)
  5542. energy = em_cpu_energy(pd->em_pd, max_util, sum_util);
  5543. return energy;
  5544. }
  5545. /*
  5546. * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
  5547. * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
  5548. * spare capacity in each performance domain and uses it as a potential
  5549. * candidate to execute the task. Then, it uses the Energy Model to figure
  5550. * out which of the CPU candidates is the most energy-efficient.
  5551. *
  5552. * The rationale for this heuristic is as follows. In a performance domain,
  5553. * all the most energy efficient CPU candidates (according to the Energy
  5554. * Model) are those for which we'll request a low frequency. When there are
  5555. * several CPUs for which the frequency request will be the same, we don't
  5556. * have enough data to break the tie between them, because the Energy Model
  5557. * only includes active power costs. With this model, if we assume that
  5558. * frequency requests follow utilization (e.g. using schedutil), the CPU with
  5559. * the maximum spare capacity in a performance domain is guaranteed to be among
  5560. * the best candidates of the performance domain.
  5561. *
  5562. * In practice, it could be preferable from an energy standpoint to pack
  5563. * small tasks on a CPU in order to let other CPUs go in deeper idle states,
  5564. * but that could also hurt our chances to go cluster idle, and we have no
  5565. * ways to tell with the current Energy Model if this is actually a good
  5566. * idea or not. So, find_energy_efficient_cpu() basically favors
  5567. * cluster-packing, and spreading inside a cluster. That should at least be
  5568. * a good thing for latency, and this is consistent with the idea that most
  5569. * of the energy savings of EAS come from the asymmetry of the system, and
  5570. * not so much from breaking the tie between identical CPUs. That's also the
  5571. * reason why EAS is enabled in the topology code only for systems where
  5572. * SD_ASYM_CPUCAPACITY is set.
  5573. *
  5574. * NOTE: Forkees are not accepted in the energy-aware wake-up path because
  5575. * they don't have any useful utilization data yet and it's not possible to
  5576. * forecast their impact on energy consumption. Consequently, they will be
  5577. * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
  5578. * to be energy-inefficient in some use-cases. The alternative would be to
  5579. * bias new tasks towards specific types of CPUs first, or to try to infer
  5580. * their util_avg from the parent task, but those heuristics could hurt
  5581. * other use-cases too. So, until someone finds a better way to solve this,
  5582. * let's keep things simple by re-using the existing slow path.
  5583. */
  5584. static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
  5585. {
  5586. unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
  5587. struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
  5588. int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
  5589. unsigned long max_spare_cap_ls = 0, target_cap;
  5590. unsigned long cpu_cap, util, base_energy = 0;
  5591. bool boosted, latency_sensitive = false;
  5592. unsigned int min_exit_lat = UINT_MAX;
  5593. int cpu, best_energy_cpu = prev_cpu;
  5594. struct cpuidle_state *idle;
  5595. struct sched_domain *sd;
  5596. struct perf_domain *pd;
  5597. int new_cpu = INT_MAX;
  5598. sync_entity_load_avg(&p->se);
  5599. trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu);
  5600. if (new_cpu != INT_MAX)
  5601. return new_cpu;
  5602. rcu_read_lock();
  5603. pd = rcu_dereference(rd->pd);
  5604. if (!pd || READ_ONCE(rd->overutilized))
  5605. goto fail;
  5606. cpu = smp_processor_id();
  5607. if (sync && cpu_rq(cpu)->nr_running == 1 &&
  5608. cpumask_test_cpu(cpu, p->cpus_ptr) &&
  5609. task_fits_capacity(p, capacity_of(cpu))) {
  5610. rcu_read_unlock();
  5611. return cpu;
  5612. }
  5613. /*
  5614. * Energy-aware wake-up happens on the lowest sched_domain starting
  5615. * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
  5616. */
  5617. sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
  5618. while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
  5619. sd = sd->parent;
  5620. if (!sd)
  5621. goto fail;
  5622. if (!task_util_est(p))
  5623. goto unlock;
  5624. latency_sensitive = uclamp_latency_sensitive(p);
  5625. boosted = uclamp_boosted(p);
  5626. target_cap = boosted ? 0 : ULONG_MAX;
  5627. for (; pd; pd = pd->next) {
  5628. unsigned long cur_delta, spare_cap, max_spare_cap = 0;
  5629. unsigned long base_energy_pd;
  5630. int max_spare_cap_cpu = -1;
  5631. /* Compute the 'base' energy of the pd, without @p */
  5632. base_energy_pd = compute_energy(p, -1, pd);
  5633. base_energy += base_energy_pd;
  5634. for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
  5635. if (!cpumask_test_cpu(cpu, p->cpus_ptr))
  5636. continue;
  5637. util = cpu_util_next(cpu, p, cpu);
  5638. cpu_cap = capacity_of(cpu);
  5639. spare_cap = cpu_cap;
  5640. lsub_positive(&spare_cap, util);
  5641. /*
  5642. * Skip CPUs that cannot satisfy the capacity request.
  5643. * IOW, placing the task there would make the CPU
  5644. * overutilized. Take uclamp into account to see how
  5645. * much capacity we can get out of the CPU; this is
  5646. * aligned with schedutil_cpu_util().
  5647. */
  5648. util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
  5649. if (!fits_capacity(util, cpu_cap))
  5650. continue;
  5651. /* Always use prev_cpu as a candidate. */
  5652. if (!latency_sensitive && cpu == prev_cpu) {
  5653. prev_delta = compute_energy(p, prev_cpu, pd);
  5654. prev_delta -= base_energy_pd;
  5655. best_delta = min(best_delta, prev_delta);
  5656. }
  5657. /*
  5658. * Find the CPU with the maximum spare capacity in
  5659. * the performance domain
  5660. */
  5661. if (spare_cap > max_spare_cap) {
  5662. max_spare_cap = spare_cap;
  5663. max_spare_cap_cpu = cpu;
  5664. }
  5665. if (!latency_sensitive)
  5666. continue;
  5667. if (idle_cpu(cpu)) {
  5668. cpu_cap = capacity_orig_of(cpu);
  5669. if (boosted && cpu_cap < target_cap)
  5670. continue;
  5671. if (!boosted && cpu_cap > target_cap)
  5672. continue;
  5673. idle = idle_get_state(cpu_rq(cpu));
  5674. if (idle && idle->exit_latency > min_exit_lat &&
  5675. cpu_cap == target_cap)
  5676. continue;
  5677. if (idle)
  5678. min_exit_lat = idle->exit_latency;
  5679. target_cap = cpu_cap;
  5680. best_idle_cpu = cpu;
  5681. } else if (spare_cap > max_spare_cap_ls) {
  5682. max_spare_cap_ls = spare_cap;
  5683. max_spare_cap_cpu_ls = cpu;
  5684. }
  5685. }
  5686. /* Evaluate the energy impact of using this CPU. */
  5687. if (!latency_sensitive && max_spare_cap_cpu >= 0 &&
  5688. max_spare_cap_cpu != prev_cpu) {
  5689. cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
  5690. cur_delta -= base_energy_pd;
  5691. if (cur_delta < best_delta) {
  5692. best_delta = cur_delta;
  5693. best_energy_cpu = max_spare_cap_cpu;
  5694. }
  5695. }
  5696. }
  5697. unlock:
  5698. rcu_read_unlock();
  5699. if (latency_sensitive)
  5700. return best_idle_cpu >= 0 ? best_idle_cpu : max_spare_cap_cpu_ls;
  5701. /*
  5702. * Pick the best CPU if prev_cpu cannot be used, or if it saves at
  5703. * least 6% of the energy used by prev_cpu.
  5704. */
  5705. if (prev_delta == ULONG_MAX)
  5706. return best_energy_cpu;
  5707. if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
  5708. return best_energy_cpu;
  5709. return prev_cpu;
  5710. fail:
  5711. rcu_read_unlock();
  5712. return -1;
  5713. }
  5714. /*
  5715. * select_task_rq_fair: Select target runqueue for the waking task in domains
  5716. * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
  5717. * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
  5718. *
  5719. * Balances load by selecting the idlest CPU in the idlest group, or under
  5720. * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
  5721. *
  5722. * Returns the target CPU number.
  5723. *
  5724. * preempt must be disabled.
  5725. */
  5726. static int
  5727. select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
  5728. {
  5729. struct sched_domain *tmp, *sd = NULL;
  5730. int cpu = smp_processor_id();
  5731. int new_cpu = prev_cpu;
  5732. int want_affine = 0;
  5733. int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
  5734. int target_cpu = -1;
  5735. if (trace_android_rvh_select_task_rq_fair_enabled() &&
  5736. !(sd_flag & SD_BALANCE_FORK))
  5737. sync_entity_load_avg(&p->se);
  5738. trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag,
  5739. wake_flags, &target_cpu);
  5740. if (target_cpu >= 0)
  5741. return target_cpu;
  5742. if (sd_flag & SD_BALANCE_WAKE) {
  5743. record_wakee(p);
  5744. if (sched_energy_enabled()) {
  5745. new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
  5746. if (new_cpu >= 0)
  5747. return new_cpu;
  5748. new_cpu = prev_cpu;
  5749. }
  5750. want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
  5751. }
  5752. rcu_read_lock();
  5753. for_each_domain(cpu, tmp) {
  5754. /*
  5755. * If both 'cpu' and 'prev_cpu' are part of this domain,
  5756. * cpu is a valid SD_WAKE_AFFINE target.
  5757. */
  5758. if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
  5759. cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
  5760. if (cpu != prev_cpu)
  5761. new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
  5762. sd = NULL; /* Prefer wake_affine over balance flags */
  5763. break;
  5764. }
  5765. if (tmp->flags & sd_flag)
  5766. sd = tmp;
  5767. else if (!want_affine)
  5768. break;
  5769. }
  5770. if (unlikely(sd)) {
  5771. /* Slow path */
  5772. new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
  5773. } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
  5774. /* Fast path */
  5775. new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
  5776. if (want_affine)
  5777. current->recent_used_cpu = cpu;
  5778. }
  5779. rcu_read_unlock();
  5780. return new_cpu;
  5781. }
  5782. static void detach_entity_cfs_rq(struct sched_entity *se);
  5783. /*
  5784. * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
  5785. * cfs_rq_of(p) references at time of call are still valid and identify the
  5786. * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
  5787. */
  5788. static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
  5789. {
  5790. /*
  5791. * As blocked tasks retain absolute vruntime the migration needs to
  5792. * deal with this by subtracting the old and adding the new
  5793. * min_vruntime -- the latter is done by enqueue_entity() when placing
  5794. * the task on the new runqueue.
  5795. */
  5796. if (p->state == TASK_WAKING) {
  5797. struct sched_entity *se = &p->se;
  5798. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  5799. u64 min_vruntime;
  5800. #ifndef CONFIG_64BIT
  5801. u64 min_vruntime_copy;
  5802. do {
  5803. min_vruntime_copy = cfs_rq->min_vruntime_copy;
  5804. smp_rmb();
  5805. min_vruntime = cfs_rq->min_vruntime;
  5806. } while (min_vruntime != min_vruntime_copy);
  5807. #else
  5808. min_vruntime = cfs_rq->min_vruntime;
  5809. #endif
  5810. se->vruntime -= min_vruntime;
  5811. }
  5812. if (p->on_rq == TASK_ON_RQ_MIGRATING) {
  5813. /*
  5814. * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
  5815. * rq->lock and can modify state directly.
  5816. */
  5817. lockdep_assert_held(&task_rq(p)->lock);
  5818. detach_entity_cfs_rq(&p->se);
  5819. } else {
  5820. /*
  5821. * We are supposed to update the task to "current" time, then
  5822. * its up to date and ready to go to new CPU/cfs_rq. But we
  5823. * have difficulty in getting what current time is, so simply
  5824. * throw away the out-of-date time. This will result in the
  5825. * wakee task is less decayed, but giving the wakee more load
  5826. * sounds not bad.
  5827. */
  5828. remove_entity_load_avg(&p->se);
  5829. }
  5830. /* Tell new CPU we are migrated */
  5831. p->se.avg.last_update_time = 0;
  5832. /* We have migrated, no longer consider this task hot */
  5833. p->se.exec_start = 0;
  5834. update_scan_period(p, new_cpu);
  5835. }
  5836. static void task_dead_fair(struct task_struct *p)
  5837. {
  5838. remove_entity_load_avg(&p->se);
  5839. }
  5840. static int
  5841. balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  5842. {
  5843. if (rq->nr_running)
  5844. return 1;
  5845. return newidle_balance(rq, rf) != 0;
  5846. }
  5847. #endif /* CONFIG_SMP */
  5848. static unsigned long wakeup_gran(struct sched_entity *se)
  5849. {
  5850. unsigned long gran = sysctl_sched_wakeup_granularity;
  5851. /*
  5852. * Since its curr running now, convert the gran from real-time
  5853. * to virtual-time in his units.
  5854. *
  5855. * By using 'se' instead of 'curr' we penalize light tasks, so
  5856. * they get preempted easier. That is, if 'se' < 'curr' then
  5857. * the resulting gran will be larger, therefore penalizing the
  5858. * lighter, if otoh 'se' > 'curr' then the resulting gran will
  5859. * be smaller, again penalizing the lighter task.
  5860. *
  5861. * This is especially important for buddies when the leftmost
  5862. * task is higher priority than the buddy.
  5863. */
  5864. return calc_delta_fair(gran, se);
  5865. }
  5866. /*
  5867. * Should 'se' preempt 'curr'.
  5868. *
  5869. * |s1
  5870. * |s2
  5871. * |s3
  5872. * g
  5873. * |<--->|c
  5874. *
  5875. * w(c, s1) = -1
  5876. * w(c, s2) = 0
  5877. * w(c, s3) = 1
  5878. *
  5879. */
  5880. static int
  5881. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
  5882. {
  5883. s64 gran, vdiff = curr->vruntime - se->vruntime;
  5884. if (vdiff <= 0)
  5885. return -1;
  5886. gran = wakeup_gran(se);
  5887. if (vdiff > gran)
  5888. return 1;
  5889. return 0;
  5890. }
  5891. static void set_last_buddy(struct sched_entity *se)
  5892. {
  5893. if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
  5894. return;
  5895. for_each_sched_entity(se) {
  5896. if (SCHED_WARN_ON(!se->on_rq))
  5897. return;
  5898. cfs_rq_of(se)->last = se;
  5899. }
  5900. }
  5901. static void set_next_buddy(struct sched_entity *se)
  5902. {
  5903. if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
  5904. return;
  5905. for_each_sched_entity(se) {
  5906. if (SCHED_WARN_ON(!se->on_rq))
  5907. return;
  5908. cfs_rq_of(se)->next = se;
  5909. }
  5910. }
  5911. static void set_skip_buddy(struct sched_entity *se)
  5912. {
  5913. for_each_sched_entity(se)
  5914. cfs_rq_of(se)->skip = se;
  5915. }
  5916. /*
  5917. * Preempt the current task with a newly woken task if needed:
  5918. */
  5919. static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
  5920. {
  5921. struct task_struct *curr = rq->curr;
  5922. struct sched_entity *se = &curr->se, *pse = &p->se;
  5923. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  5924. int scale = cfs_rq->nr_running >= sched_nr_latency;
  5925. int next_buddy_marked = 0;
  5926. bool preempt = false, nopreempt = false;
  5927. if (unlikely(se == pse))
  5928. return;
  5929. /*
  5930. * This is possible from callers such as attach_tasks(), in which we
  5931. * unconditionally check_prempt_curr() after an enqueue (which may have
  5932. * lead to a throttle). This both saves work and prevents false
  5933. * next-buddy nomination below.
  5934. */
  5935. if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
  5936. return;
  5937. if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
  5938. set_next_buddy(pse);
  5939. next_buddy_marked = 1;
  5940. }
  5941. /*
  5942. * We can come here with TIF_NEED_RESCHED already set from new task
  5943. * wake up path.
  5944. *
  5945. * Note: this also catches the edge-case of curr being in a throttled
  5946. * group (e.g. via set_curr_task), since update_curr() (in the
  5947. * enqueue of curr) will have resulted in resched being set. This
  5948. * prevents us from potentially nominating it as a false LAST_BUDDY
  5949. * below.
  5950. */
  5951. if (test_tsk_need_resched(curr))
  5952. return;
  5953. /* Idle tasks are by definition preempted by non-idle tasks. */
  5954. if (unlikely(task_has_idle_policy(curr)) &&
  5955. likely(!task_has_idle_policy(p)))
  5956. goto preempt;
  5957. /*
  5958. * Batch and idle tasks do not preempt non-idle tasks (their preemption
  5959. * is driven by the tick):
  5960. */
  5961. if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
  5962. return;
  5963. find_matching_se(&se, &pse);
  5964. update_curr(cfs_rq_of(se));
  5965. trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &nopreempt,
  5966. wake_flags, se, pse, next_buddy_marked, sysctl_sched_wakeup_granularity);
  5967. if (preempt)
  5968. goto preempt;
  5969. if (nopreempt)
  5970. return;
  5971. BUG_ON(!pse);
  5972. if (wakeup_preempt_entity(se, pse) == 1) {
  5973. /*
  5974. * Bias pick_next to pick the sched entity that is
  5975. * triggering this preemption.
  5976. */
  5977. if (!next_buddy_marked)
  5978. set_next_buddy(pse);
  5979. goto preempt;
  5980. }
  5981. return;
  5982. preempt:
  5983. resched_curr(rq);
  5984. /*
  5985. * Only set the backward buddy when the current task is still
  5986. * on the rq. This can happen when a wakeup gets interleaved
  5987. * with schedule on the ->pre_schedule() or idle_balance()
  5988. * point, either of which can * drop the rq lock.
  5989. *
  5990. * Also, during early boot the idle thread is in the fair class,
  5991. * for obvious reasons its a bad idea to schedule back to it.
  5992. */
  5993. if (unlikely(!se->on_rq || curr == rq->idle))
  5994. return;
  5995. if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
  5996. set_last_buddy(se);
  5997. }
  5998. struct task_struct *
  5999. pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  6000. {
  6001. struct cfs_rq *cfs_rq = &rq->cfs;
  6002. struct sched_entity *se = NULL;
  6003. struct task_struct *p = NULL;
  6004. int new_tasks;
  6005. bool repick = false;
  6006. again:
  6007. if (!sched_fair_runnable(rq))
  6008. goto idle;
  6009. #ifdef CONFIG_FAIR_GROUP_SCHED
  6010. if (!prev || prev->sched_class != &fair_sched_class)
  6011. goto simple;
  6012. /*
  6013. * Because of the set_next_buddy() in dequeue_task_fair() it is rather
  6014. * likely that a next task is from the same cgroup as the current.
  6015. *
  6016. * Therefore attempt to avoid putting and setting the entire cgroup
  6017. * hierarchy, only change the part that actually changes.
  6018. */
  6019. do {
  6020. struct sched_entity *curr = cfs_rq->curr;
  6021. /*
  6022. * Since we got here without doing put_prev_entity() we also
  6023. * have to consider cfs_rq->curr. If it is still a runnable
  6024. * entity, update_curr() will update its vruntime, otherwise
  6025. * forget we've ever seen it.
  6026. */
  6027. if (curr) {
  6028. if (curr->on_rq)
  6029. update_curr(cfs_rq);
  6030. else
  6031. curr = NULL;
  6032. /*
  6033. * This call to check_cfs_rq_runtime() will do the
  6034. * throttle and dequeue its entity in the parent(s).
  6035. * Therefore the nr_running test will indeed
  6036. * be correct.
  6037. */
  6038. if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
  6039. cfs_rq = &rq->cfs;
  6040. if (!cfs_rq->nr_running)
  6041. goto idle;
  6042. goto simple;
  6043. }
  6044. }
  6045. se = pick_next_entity(cfs_rq, curr);
  6046. cfs_rq = group_cfs_rq(se);
  6047. } while (cfs_rq);
  6048. p = task_of(se);
  6049. trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev);
  6050. /*
  6051. * Since we haven't yet done put_prev_entity and if the selected task
  6052. * is a different task than we started out with, try and touch the
  6053. * least amount of cfs_rqs.
  6054. */
  6055. if (prev != p) {
  6056. struct sched_entity *pse = &prev->se;
  6057. while (!(cfs_rq = is_same_group(se, pse))) {
  6058. int se_depth = se->depth;
  6059. int pse_depth = pse->depth;
  6060. if (se_depth <= pse_depth) {
  6061. put_prev_entity(cfs_rq_of(pse), pse);
  6062. pse = parent_entity(pse);
  6063. }
  6064. if (se_depth >= pse_depth) {
  6065. set_next_entity(cfs_rq_of(se), se);
  6066. se = parent_entity(se);
  6067. }
  6068. }
  6069. put_prev_entity(cfs_rq, pse);
  6070. set_next_entity(cfs_rq, se);
  6071. }
  6072. goto done;
  6073. simple:
  6074. #endif
  6075. if (prev)
  6076. put_prev_task(rq, prev);
  6077. trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev);
  6078. if (repick) {
  6079. for_each_sched_entity(se)
  6080. set_next_entity(cfs_rq_of(se), se);
  6081. goto done;
  6082. }
  6083. do {
  6084. se = pick_next_entity(cfs_rq, NULL);
  6085. set_next_entity(cfs_rq, se);
  6086. cfs_rq = group_cfs_rq(se);
  6087. } while (cfs_rq);
  6088. p = task_of(se);
  6089. done: __maybe_unused;
  6090. #ifdef CONFIG_SMP
  6091. /*
  6092. * Move the next running task to the front of
  6093. * the list, so our cfs_tasks list becomes MRU
  6094. * one.
  6095. */
  6096. list_move(&p->se.group_node, &rq->cfs_tasks);
  6097. #endif
  6098. if (hrtick_enabled(rq))
  6099. hrtick_start_fair(rq, p);
  6100. update_misfit_status(p, rq);
  6101. return p;
  6102. idle:
  6103. if (!rf)
  6104. return NULL;
  6105. new_tasks = newidle_balance(rq, rf);
  6106. /*
  6107. * Because newidle_balance() releases (and re-acquires) rq->lock, it is
  6108. * possible for any higher priority task to appear. In that case we
  6109. * must re-start the pick_next_entity() loop.
  6110. */
  6111. if (new_tasks < 0)
  6112. return RETRY_TASK;
  6113. if (new_tasks > 0)
  6114. goto again;
  6115. /*
  6116. * rq is about to be idle, check if we need to update the
  6117. * lost_idle_time of clock_pelt
  6118. */
  6119. update_idle_rq_clock_pelt(rq);
  6120. return NULL;
  6121. }
  6122. static struct task_struct *__pick_next_task_fair(struct rq *rq)
  6123. {
  6124. return pick_next_task_fair(rq, NULL, NULL);
  6125. }
  6126. /*
  6127. * Account for a descheduled task:
  6128. */
  6129. static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
  6130. {
  6131. struct sched_entity *se = &prev->se;
  6132. struct cfs_rq *cfs_rq;
  6133. for_each_sched_entity(se) {
  6134. cfs_rq = cfs_rq_of(se);
  6135. put_prev_entity(cfs_rq, se);
  6136. }
  6137. }
  6138. /*
  6139. * sched_yield() is very simple
  6140. *
  6141. * The magic of dealing with the ->skip buddy is in pick_next_entity.
  6142. */
  6143. static void yield_task_fair(struct rq *rq)
  6144. {
  6145. struct task_struct *curr = rq->curr;
  6146. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  6147. struct sched_entity *se = &curr->se;
  6148. /*
  6149. * Are we the only task in the tree?
  6150. */
  6151. if (unlikely(rq->nr_running == 1))
  6152. return;
  6153. clear_buddies(cfs_rq, se);
  6154. if (curr->policy != SCHED_BATCH) {
  6155. update_rq_clock(rq);
  6156. /*
  6157. * Update run-time statistics of the 'current'.
  6158. */
  6159. update_curr(cfs_rq);
  6160. /*
  6161. * Tell update_rq_clock() that we've just updated,
  6162. * so we don't do microscopic update in schedule()
  6163. * and double the fastpath cost.
  6164. */
  6165. rq_clock_skip_update(rq);
  6166. }
  6167. set_skip_buddy(se);
  6168. }
  6169. static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
  6170. {
  6171. struct sched_entity *se = &p->se;
  6172. /* throttled hierarchies are not runnable */
  6173. if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
  6174. return false;
  6175. /* Tell the scheduler that we'd really like pse to run next. */
  6176. set_next_buddy(se);
  6177. yield_task_fair(rq);
  6178. return true;
  6179. }
  6180. #ifdef CONFIG_SMP
  6181. /**************************************************
  6182. * Fair scheduling class load-balancing methods.
  6183. *
  6184. * BASICS
  6185. *
  6186. * The purpose of load-balancing is to achieve the same basic fairness the
  6187. * per-CPU scheduler provides, namely provide a proportional amount of compute
  6188. * time to each task. This is expressed in the following equation:
  6189. *
  6190. * W_i,n/P_i == W_j,n/P_j for all i,j (1)
  6191. *
  6192. * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
  6193. * W_i,0 is defined as:
  6194. *
  6195. * W_i,0 = \Sum_j w_i,j (2)
  6196. *
  6197. * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
  6198. * is derived from the nice value as per sched_prio_to_weight[].
  6199. *
  6200. * The weight average is an exponential decay average of the instantaneous
  6201. * weight:
  6202. *
  6203. * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
  6204. *
  6205. * C_i is the compute capacity of CPU i, typically it is the
  6206. * fraction of 'recent' time available for SCHED_OTHER task execution. But it
  6207. * can also include other factors [XXX].
  6208. *
  6209. * To achieve this balance we define a measure of imbalance which follows
  6210. * directly from (1):
  6211. *
  6212. * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
  6213. *
  6214. * We them move tasks around to minimize the imbalance. In the continuous
  6215. * function space it is obvious this converges, in the discrete case we get
  6216. * a few fun cases generally called infeasible weight scenarios.
  6217. *
  6218. * [XXX expand on:
  6219. * - infeasible weights;
  6220. * - local vs global optima in the discrete case. ]
  6221. *
  6222. *
  6223. * SCHED DOMAINS
  6224. *
  6225. * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
  6226. * for all i,j solution, we create a tree of CPUs that follows the hardware
  6227. * topology where each level pairs two lower groups (or better). This results
  6228. * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
  6229. * tree to only the first of the previous level and we decrease the frequency
  6230. * of load-balance at each level inv. proportional to the number of CPUs in
  6231. * the groups.
  6232. *
  6233. * This yields:
  6234. *
  6235. * log_2 n 1 n
  6236. * \Sum { --- * --- * 2^i } = O(n) (5)
  6237. * i = 0 2^i 2^i
  6238. * `- size of each group
  6239. * | | `- number of CPUs doing load-balance
  6240. * | `- freq
  6241. * `- sum over all levels
  6242. *
  6243. * Coupled with a limit on how many tasks we can migrate every balance pass,
  6244. * this makes (5) the runtime complexity of the balancer.
  6245. *
  6246. * An important property here is that each CPU is still (indirectly) connected
  6247. * to every other CPU in at most O(log n) steps:
  6248. *
  6249. * The adjacency matrix of the resulting graph is given by:
  6250. *
  6251. * log_2 n
  6252. * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
  6253. * k = 0
  6254. *
  6255. * And you'll find that:
  6256. *
  6257. * A^(log_2 n)_i,j != 0 for all i,j (7)
  6258. *
  6259. * Showing there's indeed a path between every CPU in at most O(log n) steps.
  6260. * The task movement gives a factor of O(m), giving a convergence complexity
  6261. * of:
  6262. *
  6263. * O(nm log n), n := nr_cpus, m := nr_tasks (8)
  6264. *
  6265. *
  6266. * WORK CONSERVING
  6267. *
  6268. * In order to avoid CPUs going idle while there's still work to do, new idle
  6269. * balancing is more aggressive and has the newly idle CPU iterate up the domain
  6270. * tree itself instead of relying on other CPUs to bring it work.
  6271. *
  6272. * This adds some complexity to both (5) and (8) but it reduces the total idle
  6273. * time.
  6274. *
  6275. * [XXX more?]
  6276. *
  6277. *
  6278. * CGROUPS
  6279. *
  6280. * Cgroups make a horror show out of (2), instead of a simple sum we get:
  6281. *
  6282. * s_k,i
  6283. * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
  6284. * S_k
  6285. *
  6286. * Where
  6287. *
  6288. * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
  6289. *
  6290. * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
  6291. *
  6292. * The big problem is S_k, its a global sum needed to compute a local (W_i)
  6293. * property.
  6294. *
  6295. * [XXX write more on how we solve this.. _after_ merging pjt's patches that
  6296. * rewrite all of this once again.]
  6297. */
  6298. unsigned long __read_mostly max_load_balance_interval = HZ/10;
  6299. EXPORT_SYMBOL_GPL(max_load_balance_interval);
  6300. enum fbq_type { regular, remote, all };
  6301. /*
  6302. * 'group_type' describes the group of CPUs at the moment of load balancing.
  6303. *
  6304. * The enum is ordered by pulling priority, with the group with lowest priority
  6305. * first so the group_type can simply be compared when selecting the busiest
  6306. * group. See update_sd_pick_busiest().
  6307. */
  6308. enum group_type {
  6309. /* The group has spare capacity that can be used to run more tasks. */
  6310. group_has_spare = 0,
  6311. /*
  6312. * The group is fully used and the tasks don't compete for more CPU
  6313. * cycles. Nevertheless, some tasks might wait before running.
  6314. */
  6315. group_fully_busy,
  6316. /*
  6317. * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity
  6318. * and must be migrated to a more powerful CPU.
  6319. */
  6320. group_misfit_task,
  6321. /*
  6322. * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
  6323. * and the task should be migrated to it instead of running on the
  6324. * current CPU.
  6325. */
  6326. group_asym_packing,
  6327. /*
  6328. * The tasks' affinity constraints previously prevented the scheduler
  6329. * from balancing the load across the system.
  6330. */
  6331. group_imbalanced,
  6332. /*
  6333. * The CPU is overloaded and can't provide expected CPU cycles to all
  6334. * tasks.
  6335. */
  6336. group_overloaded
  6337. };
  6338. enum migration_type {
  6339. migrate_load = 0,
  6340. migrate_util,
  6341. migrate_task,
  6342. migrate_misfit
  6343. };
  6344. #define LBF_ALL_PINNED 0x01
  6345. #define LBF_NEED_BREAK 0x02
  6346. #define LBF_DST_PINNED 0x04
  6347. #define LBF_SOME_PINNED 0x08
  6348. #define LBF_NOHZ_STATS 0x10
  6349. #define LBF_NOHZ_AGAIN 0x20
  6350. struct lb_env {
  6351. struct sched_domain *sd;
  6352. struct rq *src_rq;
  6353. int src_cpu;
  6354. int dst_cpu;
  6355. struct rq *dst_rq;
  6356. struct cpumask *dst_grpmask;
  6357. int new_dst_cpu;
  6358. enum cpu_idle_type idle;
  6359. long imbalance;
  6360. /* The set of CPUs under consideration for load-balancing */
  6361. struct cpumask *cpus;
  6362. unsigned int flags;
  6363. unsigned int loop;
  6364. unsigned int loop_break;
  6365. unsigned int loop_max;
  6366. enum fbq_type fbq_type;
  6367. enum migration_type migration_type;
  6368. struct list_head tasks;
  6369. struct rq_flags *src_rq_rf;
  6370. };
  6371. /*
  6372. * Is this task likely cache-hot:
  6373. */
  6374. static int task_hot(struct task_struct *p, struct lb_env *env)
  6375. {
  6376. s64 delta;
  6377. lockdep_assert_held(&env->src_rq->lock);
  6378. if (p->sched_class != &fair_sched_class)
  6379. return 0;
  6380. if (unlikely(task_has_idle_policy(p)))
  6381. return 0;
  6382. /* SMT siblings share cache */
  6383. if (env->sd->flags & SD_SHARE_CPUCAPACITY)
  6384. return 0;
  6385. /*
  6386. * Buddy candidates are cache hot:
  6387. */
  6388. if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
  6389. (&p->se == cfs_rq_of(&p->se)->next ||
  6390. &p->se == cfs_rq_of(&p->se)->last))
  6391. return 1;
  6392. if (sysctl_sched_migration_cost == -1)
  6393. return 1;
  6394. if (sysctl_sched_migration_cost == 0)
  6395. return 0;
  6396. delta = rq_clock_task(env->src_rq) - p->se.exec_start;
  6397. return delta < (s64)sysctl_sched_migration_cost;
  6398. }
  6399. #ifdef CONFIG_NUMA_BALANCING
  6400. /*
  6401. * Returns 1, if task migration degrades locality
  6402. * Returns 0, if task migration improves locality i.e migration preferred.
  6403. * Returns -1, if task migration is not affected by locality.
  6404. */
  6405. static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
  6406. {
  6407. struct numa_group *numa_group = rcu_dereference(p->numa_group);
  6408. unsigned long src_weight, dst_weight;
  6409. int src_nid, dst_nid, dist;
  6410. if (!static_branch_likely(&sched_numa_balancing))
  6411. return -1;
  6412. if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
  6413. return -1;
  6414. src_nid = cpu_to_node(env->src_cpu);
  6415. dst_nid = cpu_to_node(env->dst_cpu);
  6416. if (src_nid == dst_nid)
  6417. return -1;
  6418. /* Migrating away from the preferred node is always bad. */
  6419. if (src_nid == p->numa_preferred_nid) {
  6420. if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
  6421. return 1;
  6422. else
  6423. return -1;
  6424. }
  6425. /* Encourage migration to the preferred node. */
  6426. if (dst_nid == p->numa_preferred_nid)
  6427. return 0;
  6428. /* Leaving a core idle is often worse than degrading locality. */
  6429. if (env->idle == CPU_IDLE)
  6430. return -1;
  6431. dist = node_distance(src_nid, dst_nid);
  6432. if (numa_group) {
  6433. src_weight = group_weight(p, src_nid, dist);
  6434. dst_weight = group_weight(p, dst_nid, dist);
  6435. } else {
  6436. src_weight = task_weight(p, src_nid, dist);
  6437. dst_weight = task_weight(p, dst_nid, dist);
  6438. }
  6439. return dst_weight < src_weight;
  6440. }
  6441. #else
  6442. static inline int migrate_degrades_locality(struct task_struct *p,
  6443. struct lb_env *env)
  6444. {
  6445. return -1;
  6446. }
  6447. #endif
  6448. /*
  6449. * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  6450. */
  6451. static
  6452. int can_migrate_task(struct task_struct *p, struct lb_env *env)
  6453. {
  6454. int tsk_cache_hot;
  6455. int can_migrate = 1;
  6456. lockdep_assert_held(&env->src_rq->lock);
  6457. trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate);
  6458. if (!can_migrate)
  6459. return 0;
  6460. /*
  6461. * We do not migrate tasks that are:
  6462. * 1) throttled_lb_pair, or
  6463. * 2) cannot be migrated to this CPU due to cpus_ptr, or
  6464. * 3) running (obviously), or
  6465. * 4) are cache-hot on their current CPU.
  6466. */
  6467. if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
  6468. return 0;
  6469. /* Disregard pcpu kthreads; they are where they need to be. */
  6470. if (kthread_is_per_cpu(p))
  6471. return 0;
  6472. if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
  6473. int cpu;
  6474. schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
  6475. env->flags |= LBF_SOME_PINNED;
  6476. /*
  6477. * Remember if this task can be migrated to any other CPU in
  6478. * our sched_group. We may want to revisit it if we couldn't
  6479. * meet load balance goals by pulling other tasks on src_cpu.
  6480. *
  6481. * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have
  6482. * already computed one in current iteration.
  6483. */
  6484. if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED))
  6485. return 0;
  6486. /* Prevent to re-select dst_cpu via env's CPUs: */
  6487. for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
  6488. if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
  6489. env->flags |= LBF_DST_PINNED;
  6490. env->new_dst_cpu = cpu;
  6491. break;
  6492. }
  6493. }
  6494. return 0;
  6495. }
  6496. /* Record that we found atleast one task that could run on dst_cpu */
  6497. env->flags &= ~LBF_ALL_PINNED;
  6498. if (task_running(env->src_rq, p)) {
  6499. schedstat_inc(p->se.statistics.nr_failed_migrations_running);
  6500. return 0;
  6501. }
  6502. /*
  6503. * Aggressive migration if:
  6504. * 1) destination numa is preferred
  6505. * 2) task is cache cold, or
  6506. * 3) too many balance attempts have failed.
  6507. */
  6508. tsk_cache_hot = migrate_degrades_locality(p, env);
  6509. if (tsk_cache_hot == -1)
  6510. tsk_cache_hot = task_hot(p, env);
  6511. if (tsk_cache_hot <= 0 ||
  6512. env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
  6513. if (tsk_cache_hot == 1) {
  6514. schedstat_inc(env->sd->lb_hot_gained[env->idle]);
  6515. schedstat_inc(p->se.statistics.nr_forced_migrations);
  6516. }
  6517. return 1;
  6518. }
  6519. schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
  6520. return 0;
  6521. }
  6522. /*
  6523. * detach_task() -- detach the task for the migration specified in env
  6524. */
  6525. static void detach_task(struct task_struct *p, struct lb_env *env)
  6526. {
  6527. int detached = 0;
  6528. lockdep_assert_held(&env->src_rq->lock);
  6529. /*
  6530. * The vendor hook may drop the lock temporarily, so
  6531. * pass the rq flags to unpin lock. We expect the
  6532. * rq lock to be held after return.
  6533. */
  6534. trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p,
  6535. env->dst_cpu, &detached);
  6536. if (detached)
  6537. return;
  6538. deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
  6539. set_task_cpu(p, env->dst_cpu);
  6540. }
  6541. /*
  6542. * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
  6543. * part of active balancing operations within "domain".
  6544. *
  6545. * Returns a task if successful and NULL otherwise.
  6546. */
  6547. static struct task_struct *detach_one_task(struct lb_env *env)
  6548. {
  6549. struct task_struct *p;
  6550. lockdep_assert_held(&env->src_rq->lock);
  6551. list_for_each_entry_reverse(p,
  6552. &env->src_rq->cfs_tasks, se.group_node) {
  6553. if (!can_migrate_task(p, env))
  6554. continue;
  6555. detach_task(p, env);
  6556. /*
  6557. * Right now, this is only the second place where
  6558. * lb_gained[env->idle] is updated (other is detach_tasks)
  6559. * so we can safely collect stats here rather than
  6560. * inside detach_tasks().
  6561. */
  6562. schedstat_inc(env->sd->lb_gained[env->idle]);
  6563. return p;
  6564. }
  6565. return NULL;
  6566. }
  6567. static const unsigned int sched_nr_migrate_break = 32;
  6568. /*
  6569. * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
  6570. * busiest_rq, as part of a balancing operation within domain "sd".
  6571. *
  6572. * Returns number of detached tasks if successful and 0 otherwise.
  6573. */
  6574. static int detach_tasks(struct lb_env *env)
  6575. {
  6576. struct list_head *tasks = &env->src_rq->cfs_tasks;
  6577. unsigned long util, load;
  6578. struct task_struct *p;
  6579. int detached = 0;
  6580. lockdep_assert_held(&env->src_rq->lock);
  6581. if (env->imbalance <= 0)
  6582. return 0;
  6583. while (!list_empty(tasks)) {
  6584. /*
  6585. * We don't want to steal all, otherwise we may be treated likewise,
  6586. * which could at worst lead to a livelock crash.
  6587. */
  6588. if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
  6589. break;
  6590. p = list_last_entry(tasks, struct task_struct, se.group_node);
  6591. env->loop++;
  6592. /* We've more or less seen every task there is, call it quits */
  6593. if (env->loop > env->loop_max)
  6594. break;
  6595. /* take a breather every nr_migrate tasks */
  6596. if (env->loop > env->loop_break) {
  6597. env->loop_break += sched_nr_migrate_break;
  6598. env->flags |= LBF_NEED_BREAK;
  6599. break;
  6600. }
  6601. if (!can_migrate_task(p, env))
  6602. goto next;
  6603. switch (env->migration_type) {
  6604. case migrate_load:
  6605. /*
  6606. * Depending of the number of CPUs and tasks and the
  6607. * cgroup hierarchy, task_h_load() can return a null
  6608. * value. Make sure that env->imbalance decreases
  6609. * otherwise detach_tasks() will stop only after
  6610. * detaching up to loop_max tasks.
  6611. */
  6612. load = max_t(unsigned long, task_h_load(p), 1);
  6613. if (sched_feat(LB_MIN) &&
  6614. load < 16 && !env->sd->nr_balance_failed)
  6615. goto next;
  6616. /*
  6617. * Make sure that we don't migrate too much load.
  6618. * Nevertheless, let relax the constraint if
  6619. * scheduler fails to find a good waiting task to
  6620. * migrate.
  6621. */
  6622. if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
  6623. goto next;
  6624. env->imbalance -= load;
  6625. break;
  6626. case migrate_util:
  6627. util = task_util_est(p);
  6628. if (util > env->imbalance)
  6629. goto next;
  6630. env->imbalance -= util;
  6631. break;
  6632. case migrate_task:
  6633. env->imbalance--;
  6634. break;
  6635. case migrate_misfit:
  6636. /* This is not a misfit task */
  6637. if (task_fits_capacity(p, capacity_of(env->src_cpu)))
  6638. goto next;
  6639. env->imbalance = 0;
  6640. break;
  6641. }
  6642. detach_task(p, env);
  6643. list_add(&p->se.group_node, &env->tasks);
  6644. detached++;
  6645. #ifdef CONFIG_PREEMPTION
  6646. /*
  6647. * NEWIDLE balancing is a source of latency, so preemptible
  6648. * kernels will stop after the first task is detached to minimize
  6649. * the critical section.
  6650. */
  6651. if (env->idle == CPU_NEWLY_IDLE)
  6652. break;
  6653. #endif
  6654. /*
  6655. * We only want to steal up to the prescribed amount of
  6656. * load/util/tasks.
  6657. */
  6658. if (env->imbalance <= 0)
  6659. break;
  6660. continue;
  6661. next:
  6662. list_move(&p->se.group_node, tasks);
  6663. }
  6664. /*
  6665. * Right now, this is one of only two places we collect this stat
  6666. * so we can safely collect detach_one_task() stats here rather
  6667. * than inside detach_one_task().
  6668. */
  6669. schedstat_add(env->sd->lb_gained[env->idle], detached);
  6670. return detached;
  6671. }
  6672. /*
  6673. * attach_task() -- attach the task detached by detach_task() to its new rq.
  6674. */
  6675. static void attach_task(struct rq *rq, struct task_struct *p)
  6676. {
  6677. lockdep_assert_held(&rq->lock);
  6678. BUG_ON(task_rq(p) != rq);
  6679. activate_task(rq, p, ENQUEUE_NOCLOCK);
  6680. check_preempt_curr(rq, p, 0);
  6681. }
  6682. /*
  6683. * attach_one_task() -- attaches the task returned from detach_one_task() to
  6684. * its new rq.
  6685. */
  6686. static void attach_one_task(struct rq *rq, struct task_struct *p)
  6687. {
  6688. struct rq_flags rf;
  6689. rq_lock(rq, &rf);
  6690. update_rq_clock(rq);
  6691. attach_task(rq, p);
  6692. rq_unlock(rq, &rf);
  6693. }
  6694. /*
  6695. * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
  6696. * new rq.
  6697. */
  6698. static void attach_tasks(struct lb_env *env)
  6699. {
  6700. struct list_head *tasks = &env->tasks;
  6701. struct task_struct *p;
  6702. struct rq_flags rf;
  6703. rq_lock(env->dst_rq, &rf);
  6704. update_rq_clock(env->dst_rq);
  6705. while (!list_empty(tasks)) {
  6706. p = list_first_entry(tasks, struct task_struct, se.group_node);
  6707. list_del_init(&p->se.group_node);
  6708. attach_task(env->dst_rq, p);
  6709. }
  6710. rq_unlock(env->dst_rq, &rf);
  6711. }
  6712. #ifdef CONFIG_NO_HZ_COMMON
  6713. static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
  6714. {
  6715. if (cfs_rq->avg.load_avg)
  6716. return true;
  6717. if (cfs_rq->avg.util_avg)
  6718. return true;
  6719. return false;
  6720. }
  6721. static inline bool others_have_blocked(struct rq *rq)
  6722. {
  6723. if (READ_ONCE(rq->avg_rt.util_avg))
  6724. return true;
  6725. if (READ_ONCE(rq->avg_dl.util_avg))
  6726. return true;
  6727. if (thermal_load_avg(rq))
  6728. return true;
  6729. #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
  6730. if (READ_ONCE(rq->avg_irq.util_avg))
  6731. return true;
  6732. #endif
  6733. return false;
  6734. }
  6735. static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
  6736. {
  6737. rq->last_blocked_load_update_tick = jiffies;
  6738. if (!has_blocked)
  6739. rq->has_blocked_load = 0;
  6740. }
  6741. #else
  6742. static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
  6743. static inline bool others_have_blocked(struct rq *rq) { return false; }
  6744. static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
  6745. #endif
  6746. static bool __update_blocked_others(struct rq *rq, bool *done)
  6747. {
  6748. const struct sched_class *curr_class;
  6749. u64 now = rq_clock_pelt(rq);
  6750. unsigned long thermal_pressure;
  6751. bool decayed;
  6752. /*
  6753. * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
  6754. * DL and IRQ signals have been updated before updating CFS.
  6755. */
  6756. curr_class = rq->curr->sched_class;
  6757. thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
  6758. decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
  6759. update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
  6760. update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
  6761. update_irq_load_avg(rq, 0);
  6762. if (others_have_blocked(rq))
  6763. *done = false;
  6764. return decayed;
  6765. }
  6766. #ifdef CONFIG_FAIR_GROUP_SCHED
  6767. static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
  6768. {
  6769. if (cfs_rq->load.weight)
  6770. return false;
  6771. if (cfs_rq->avg.load_sum)
  6772. return false;
  6773. if (cfs_rq->avg.util_sum)
  6774. return false;
  6775. if (cfs_rq->avg.runnable_sum)
  6776. return false;
  6777. return true;
  6778. }
  6779. static bool __update_blocked_fair(struct rq *rq, bool *done)
  6780. {
  6781. struct cfs_rq *cfs_rq, *pos;
  6782. bool decayed = false;
  6783. int cpu = cpu_of(rq);
  6784. /*
  6785. * Iterates the task_group tree in a bottom up fashion, see
  6786. * list_add_leaf_cfs_rq() for details.
  6787. */
  6788. for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
  6789. struct sched_entity *se;
  6790. if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
  6791. update_tg_load_avg(cfs_rq);
  6792. if (cfs_rq == &rq->cfs)
  6793. decayed = true;
  6794. }
  6795. /* Propagate pending load changes to the parent, if any: */
  6796. se = cfs_rq->tg->se[cpu];
  6797. if (se && !skip_blocked_update(se))
  6798. update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
  6799. /*
  6800. * There can be a lot of idle CPU cgroups. Don't let fully
  6801. * decayed cfs_rqs linger on the list.
  6802. */
  6803. if (cfs_rq_is_decayed(cfs_rq))
  6804. list_del_leaf_cfs_rq(cfs_rq);
  6805. /* Don't need periodic decay once load/util_avg are null */
  6806. if (cfs_rq_has_blocked(cfs_rq))
  6807. *done = false;
  6808. }
  6809. return decayed;
  6810. }
  6811. /*
  6812. * Compute the hierarchical load factor for cfs_rq and all its ascendants.
  6813. * This needs to be done in a top-down fashion because the load of a child
  6814. * group is a fraction of its parents load.
  6815. */
  6816. static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
  6817. {
  6818. struct rq *rq = rq_of(cfs_rq);
  6819. struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
  6820. unsigned long now = jiffies;
  6821. unsigned long load;
  6822. if (cfs_rq->last_h_load_update == now)
  6823. return;
  6824. WRITE_ONCE(cfs_rq->h_load_next, NULL);
  6825. for_each_sched_entity(se) {
  6826. cfs_rq = cfs_rq_of(se);
  6827. WRITE_ONCE(cfs_rq->h_load_next, se);
  6828. if (cfs_rq->last_h_load_update == now)
  6829. break;
  6830. }
  6831. if (!se) {
  6832. cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
  6833. cfs_rq->last_h_load_update = now;
  6834. }
  6835. while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
  6836. load = cfs_rq->h_load;
  6837. load = div64_ul(load * se->avg.load_avg,
  6838. cfs_rq_load_avg(cfs_rq) + 1);
  6839. cfs_rq = group_cfs_rq(se);
  6840. cfs_rq->h_load = load;
  6841. cfs_rq->last_h_load_update = now;
  6842. }
  6843. }
  6844. static unsigned long task_h_load(struct task_struct *p)
  6845. {
  6846. struct cfs_rq *cfs_rq = task_cfs_rq(p);
  6847. update_cfs_rq_h_load(cfs_rq);
  6848. return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
  6849. cfs_rq_load_avg(cfs_rq) + 1);
  6850. }
  6851. #else
  6852. static bool __update_blocked_fair(struct rq *rq, bool *done)
  6853. {
  6854. struct cfs_rq *cfs_rq = &rq->cfs;
  6855. bool decayed;
  6856. decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
  6857. if (cfs_rq_has_blocked(cfs_rq))
  6858. *done = false;
  6859. return decayed;
  6860. }
  6861. static unsigned long task_h_load(struct task_struct *p)
  6862. {
  6863. return p->se.avg.load_avg;
  6864. }
  6865. #endif
  6866. static void update_blocked_averages(int cpu)
  6867. {
  6868. bool decayed = false, done = true;
  6869. struct rq *rq = cpu_rq(cpu);
  6870. struct rq_flags rf;
  6871. rq_lock_irqsave(rq, &rf);
  6872. update_rq_clock(rq);
  6873. decayed |= __update_blocked_others(rq, &done);
  6874. decayed |= __update_blocked_fair(rq, &done);
  6875. update_blocked_load_status(rq, !done);
  6876. if (decayed)
  6877. cpufreq_update_util(rq, 0);
  6878. rq_unlock_irqrestore(rq, &rf);
  6879. }
  6880. /********** Helpers for find_busiest_group ************************/
  6881. /*
  6882. * sg_lb_stats - stats of a sched_group required for load_balancing
  6883. */
  6884. struct sg_lb_stats {
  6885. unsigned long avg_load; /*Avg load across the CPUs of the group */
  6886. unsigned long group_load; /* Total load over the CPUs of the group */
  6887. unsigned long group_capacity;
  6888. unsigned long group_util; /* Total utilization over the CPUs of the group */
  6889. unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
  6890. unsigned int sum_nr_running; /* Nr of tasks running in the group */
  6891. unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
  6892. unsigned int idle_cpus;
  6893. unsigned int group_weight;
  6894. enum group_type group_type;
  6895. unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
  6896. unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
  6897. #ifdef CONFIG_NUMA_BALANCING
  6898. unsigned int nr_numa_running;
  6899. unsigned int nr_preferred_running;
  6900. #endif
  6901. };
  6902. /*
  6903. * sd_lb_stats - Structure to store the statistics of a sched_domain
  6904. * during load balancing.
  6905. */
  6906. struct sd_lb_stats {
  6907. struct sched_group *busiest; /* Busiest group in this sd */
  6908. struct sched_group *local; /* Local group in this sd */
  6909. unsigned long total_load; /* Total load of all groups in sd */
  6910. unsigned long total_capacity; /* Total capacity of all groups in sd */
  6911. unsigned long avg_load; /* Average load across all groups in sd */
  6912. unsigned int prefer_sibling; /* tasks should go to sibling first */
  6913. struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
  6914. struct sg_lb_stats local_stat; /* Statistics of the local group */
  6915. };
  6916. static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
  6917. {
  6918. /*
  6919. * Skimp on the clearing to avoid duplicate work. We can avoid clearing
  6920. * local_stat because update_sg_lb_stats() does a full clear/assignment.
  6921. * We must however set busiest_stat::group_type and
  6922. * busiest_stat::idle_cpus to the worst busiest group because
  6923. * update_sd_pick_busiest() reads these before assignment.
  6924. */
  6925. *sds = (struct sd_lb_stats){
  6926. .busiest = NULL,
  6927. .local = NULL,
  6928. .total_load = 0UL,
  6929. .total_capacity = 0UL,
  6930. .busiest_stat = {
  6931. .idle_cpus = UINT_MAX,
  6932. .group_type = group_has_spare,
  6933. },
  6934. };
  6935. }
  6936. static unsigned long scale_rt_capacity(int cpu)
  6937. {
  6938. struct rq *rq = cpu_rq(cpu);
  6939. unsigned long max = arch_scale_cpu_capacity(cpu);
  6940. unsigned long used, free;
  6941. unsigned long irq;
  6942. irq = cpu_util_irq(rq);
  6943. if (unlikely(irq >= max))
  6944. return 1;
  6945. /*
  6946. * avg_rt.util_avg and avg_dl.util_avg track binary signals
  6947. * (running and not running) with weights 0 and 1024 respectively.
  6948. * avg_thermal.load_avg tracks thermal pressure and the weighted
  6949. * average uses the actual delta max capacity(load).
  6950. */
  6951. used = READ_ONCE(rq->avg_rt.util_avg);
  6952. used += READ_ONCE(rq->avg_dl.util_avg);
  6953. used += thermal_load_avg(rq);
  6954. if (unlikely(used >= max))
  6955. return 1;
  6956. free = max - used;
  6957. return scale_irq_capacity(free, irq, max);
  6958. }
  6959. static void update_cpu_capacity(struct sched_domain *sd, int cpu)
  6960. {
  6961. unsigned long capacity = scale_rt_capacity(cpu);
  6962. struct sched_group *sdg = sd->groups;
  6963. cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
  6964. if (!capacity)
  6965. capacity = 1;
  6966. trace_android_rvh_update_cpu_capacity(cpu, &capacity);
  6967. cpu_rq(cpu)->cpu_capacity = capacity;
  6968. trace_sched_cpu_capacity_tp(cpu_rq(cpu));
  6969. sdg->sgc->capacity = capacity;
  6970. sdg->sgc->min_capacity = capacity;
  6971. sdg->sgc->max_capacity = capacity;
  6972. }
  6973. void update_group_capacity(struct sched_domain *sd, int cpu)
  6974. {
  6975. struct sched_domain *child = sd->child;
  6976. struct sched_group *group, *sdg = sd->groups;
  6977. unsigned long capacity, min_capacity, max_capacity;
  6978. unsigned long interval;
  6979. interval = msecs_to_jiffies(sd->balance_interval);
  6980. interval = clamp(interval, 1UL, max_load_balance_interval);
  6981. sdg->sgc->next_update = jiffies + interval;
  6982. if (!child) {
  6983. update_cpu_capacity(sd, cpu);
  6984. return;
  6985. }
  6986. capacity = 0;
  6987. min_capacity = ULONG_MAX;
  6988. max_capacity = 0;
  6989. if (child->flags & SD_OVERLAP) {
  6990. /*
  6991. * SD_OVERLAP domains cannot assume that child groups
  6992. * span the current group.
  6993. */
  6994. for_each_cpu(cpu, sched_group_span(sdg)) {
  6995. unsigned long cpu_cap = capacity_of(cpu);
  6996. capacity += cpu_cap;
  6997. min_capacity = min(cpu_cap, min_capacity);
  6998. max_capacity = max(cpu_cap, max_capacity);
  6999. }
  7000. } else {
  7001. /*
  7002. * !SD_OVERLAP domains can assume that child groups
  7003. * span the current group.
  7004. */
  7005. group = child->groups;
  7006. do {
  7007. struct sched_group_capacity *sgc = group->sgc;
  7008. capacity += sgc->capacity;
  7009. min_capacity = min(sgc->min_capacity, min_capacity);
  7010. max_capacity = max(sgc->max_capacity, max_capacity);
  7011. group = group->next;
  7012. } while (group != child->groups);
  7013. }
  7014. sdg->sgc->capacity = capacity;
  7015. sdg->sgc->min_capacity = min_capacity;
  7016. sdg->sgc->max_capacity = max_capacity;
  7017. }
  7018. /*
  7019. * Check whether the capacity of the rq has been noticeably reduced by side
  7020. * activity. The imbalance_pct is used for the threshold.
  7021. * Return true is the capacity is reduced
  7022. */
  7023. static inline int
  7024. check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
  7025. {
  7026. return ((rq->cpu_capacity * sd->imbalance_pct) <
  7027. (rq->cpu_capacity_orig * 100));
  7028. }
  7029. /*
  7030. * Check whether a rq has a misfit task and if it looks like we can actually
  7031. * help that task: we can migrate the task to a CPU of higher capacity, or
  7032. * the task's current CPU is heavily pressured.
  7033. */
  7034. static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
  7035. {
  7036. return rq->misfit_task_load &&
  7037. (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
  7038. check_cpu_capacity(rq, sd));
  7039. }
  7040. /*
  7041. * Group imbalance indicates (and tries to solve) the problem where balancing
  7042. * groups is inadequate due to ->cpus_ptr constraints.
  7043. *
  7044. * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
  7045. * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
  7046. * Something like:
  7047. *
  7048. * { 0 1 2 3 } { 4 5 6 7 }
  7049. * * * * *
  7050. *
  7051. * If we were to balance group-wise we'd place two tasks in the first group and
  7052. * two tasks in the second group. Clearly this is undesired as it will overload
  7053. * cpu 3 and leave one of the CPUs in the second group unused.
  7054. *
  7055. * The current solution to this issue is detecting the skew in the first group
  7056. * by noticing the lower domain failed to reach balance and had difficulty
  7057. * moving tasks due to affinity constraints.
  7058. *
  7059. * When this is so detected; this group becomes a candidate for busiest; see
  7060. * update_sd_pick_busiest(). And calculate_imbalance() and
  7061. * find_busiest_group() avoid some of the usual balance conditions to allow it
  7062. * to create an effective group imbalance.
  7063. *
  7064. * This is a somewhat tricky proposition since the next run might not find the
  7065. * group imbalance and decide the groups need to be balanced again. A most
  7066. * subtle and fragile situation.
  7067. */
  7068. static inline int sg_imbalanced(struct sched_group *group)
  7069. {
  7070. return group->sgc->imbalance;
  7071. }
  7072. /*
  7073. * group_has_capacity returns true if the group has spare capacity that could
  7074. * be used by some tasks.
  7075. * We consider that a group has spare capacity if the * number of task is
  7076. * smaller than the number of CPUs or if the utilization is lower than the
  7077. * available capacity for CFS tasks.
  7078. * For the latter, we use a threshold to stabilize the state, to take into
  7079. * account the variance of the tasks' load and to return true if the available
  7080. * capacity in meaningful for the load balancer.
  7081. * As an example, an available capacity of 1% can appear but it doesn't make
  7082. * any benefit for the load balance.
  7083. */
  7084. static inline bool
  7085. group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
  7086. {
  7087. if (sgs->sum_nr_running < sgs->group_weight)
  7088. return true;
  7089. if ((sgs->group_capacity * imbalance_pct) <
  7090. (sgs->group_runnable * 100))
  7091. return false;
  7092. if ((sgs->group_capacity * 100) >
  7093. (sgs->group_util * imbalance_pct))
  7094. return true;
  7095. return false;
  7096. }
  7097. /*
  7098. * group_is_overloaded returns true if the group has more tasks than it can
  7099. * handle.
  7100. * group_is_overloaded is not equals to !group_has_capacity because a group
  7101. * with the exact right number of tasks, has no more spare capacity but is not
  7102. * overloaded so both group_has_capacity and group_is_overloaded return
  7103. * false.
  7104. */
  7105. static inline bool
  7106. group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
  7107. {
  7108. if (sgs->sum_nr_running <= sgs->group_weight)
  7109. return false;
  7110. if ((sgs->group_capacity * 100) <
  7111. (sgs->group_util * imbalance_pct))
  7112. return true;
  7113. if ((sgs->group_capacity * imbalance_pct) <
  7114. (sgs->group_runnable * 100))
  7115. return true;
  7116. return false;
  7117. }
  7118. /*
  7119. * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
  7120. * per-CPU capacity than sched_group ref.
  7121. */
  7122. static inline bool
  7123. group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
  7124. {
  7125. return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity);
  7126. }
  7127. /*
  7128. * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
  7129. * per-CPU capacity_orig than sched_group ref.
  7130. */
  7131. static inline bool
  7132. group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
  7133. {
  7134. return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity);
  7135. }
  7136. static inline enum
  7137. group_type group_classify(unsigned int imbalance_pct,
  7138. struct sched_group *group,
  7139. struct sg_lb_stats *sgs)
  7140. {
  7141. if (group_is_overloaded(imbalance_pct, sgs))
  7142. return group_overloaded;
  7143. if (sg_imbalanced(group))
  7144. return group_imbalanced;
  7145. if (sgs->group_asym_packing)
  7146. return group_asym_packing;
  7147. if (sgs->group_misfit_task_load)
  7148. return group_misfit_task;
  7149. if (!group_has_capacity(imbalance_pct, sgs))
  7150. return group_fully_busy;
  7151. return group_has_spare;
  7152. }
  7153. static bool update_nohz_stats(struct rq *rq, bool force)
  7154. {
  7155. #ifdef CONFIG_NO_HZ_COMMON
  7156. unsigned int cpu = rq->cpu;
  7157. if (!rq->has_blocked_load)
  7158. return false;
  7159. if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
  7160. return false;
  7161. if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
  7162. return true;
  7163. update_blocked_averages(cpu);
  7164. return rq->has_blocked_load;
  7165. #else
  7166. return false;
  7167. #endif
  7168. }
  7169. /**
  7170. * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  7171. * @env: The load balancing environment.
  7172. * @group: sched_group whose statistics are to be updated.
  7173. * @sgs: variable to hold the statistics for this group.
  7174. * @sg_status: Holds flag indicating the status of the sched_group
  7175. */
  7176. static inline void update_sg_lb_stats(struct lb_env *env,
  7177. struct sched_group *group,
  7178. struct sg_lb_stats *sgs,
  7179. int *sg_status)
  7180. {
  7181. int i, nr_running, local_group;
  7182. memset(sgs, 0, sizeof(*sgs));
  7183. local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
  7184. for_each_cpu_and(i, sched_group_span(group), env->cpus) {
  7185. struct rq *rq = cpu_rq(i);
  7186. if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
  7187. env->flags |= LBF_NOHZ_AGAIN;
  7188. sgs->group_load += cpu_load(rq);
  7189. sgs->group_util += cpu_util(i);
  7190. sgs->group_runnable += cpu_runnable(rq);
  7191. sgs->sum_h_nr_running += rq->cfs.h_nr_running;
  7192. nr_running = rq->nr_running;
  7193. sgs->sum_nr_running += nr_running;
  7194. if (nr_running > 1)
  7195. *sg_status |= SG_OVERLOAD;
  7196. if (cpu_overutilized(i))
  7197. *sg_status |= SG_OVERUTILIZED;
  7198. #ifdef CONFIG_NUMA_BALANCING
  7199. sgs->nr_numa_running += rq->nr_numa_running;
  7200. sgs->nr_preferred_running += rq->nr_preferred_running;
  7201. #endif
  7202. /*
  7203. * No need to call idle_cpu() if nr_running is not 0
  7204. */
  7205. if (!nr_running && idle_cpu(i)) {
  7206. sgs->idle_cpus++;
  7207. /* Idle cpu can't have misfit task */
  7208. continue;
  7209. }
  7210. if (local_group)
  7211. continue;
  7212. /* Check for a misfit task on the cpu */
  7213. if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
  7214. sgs->group_misfit_task_load < rq->misfit_task_load) {
  7215. sgs->group_misfit_task_load = rq->misfit_task_load;
  7216. *sg_status |= SG_OVERLOAD;
  7217. }
  7218. }
  7219. /* Check if dst CPU is idle and preferred to this group */
  7220. if (env->sd->flags & SD_ASYM_PACKING &&
  7221. env->idle != CPU_NOT_IDLE &&
  7222. sgs->sum_h_nr_running &&
  7223. sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
  7224. sgs->group_asym_packing = 1;
  7225. }
  7226. sgs->group_capacity = group->sgc->capacity;
  7227. sgs->group_weight = group->group_weight;
  7228. sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
  7229. /* Computing avg_load makes sense only when group is overloaded */
  7230. if (sgs->group_type == group_overloaded)
  7231. sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
  7232. sgs->group_capacity;
  7233. }
  7234. /**
  7235. * update_sd_pick_busiest - return 1 on busiest group
  7236. * @env: The load balancing environment.
  7237. * @sds: sched_domain statistics
  7238. * @sg: sched_group candidate to be checked for being the busiest
  7239. * @sgs: sched_group statistics
  7240. *
  7241. * Determine if @sg is a busier group than the previously selected
  7242. * busiest group.
  7243. *
  7244. * Return: %true if @sg is a busier group than the previously selected
  7245. * busiest group. %false otherwise.
  7246. */
  7247. static bool update_sd_pick_busiest(struct lb_env *env,
  7248. struct sd_lb_stats *sds,
  7249. struct sched_group *sg,
  7250. struct sg_lb_stats *sgs)
  7251. {
  7252. struct sg_lb_stats *busiest = &sds->busiest_stat;
  7253. /* Make sure that there is at least one task to pull */
  7254. if (!sgs->sum_h_nr_running)
  7255. return false;
  7256. /*
  7257. * Don't try to pull misfit tasks we can't help.
  7258. * We can use max_capacity here as reduction in capacity on some
  7259. * CPUs in the group should either be possible to resolve
  7260. * internally or be covered by avg_load imbalance (eventually).
  7261. */
  7262. if (sgs->group_type == group_misfit_task &&
  7263. (!group_smaller_max_cpu_capacity(sg, sds->local) ||
  7264. sds->local_stat.group_type != group_has_spare))
  7265. return false;
  7266. if (sgs->group_type > busiest->group_type)
  7267. return true;
  7268. if (sgs->group_type < busiest->group_type)
  7269. return false;
  7270. /*
  7271. * The candidate and the current busiest group are the same type of
  7272. * group. Let check which one is the busiest according to the type.
  7273. */
  7274. switch (sgs->group_type) {
  7275. case group_overloaded:
  7276. /* Select the overloaded group with highest avg_load. */
  7277. if (sgs->avg_load <= busiest->avg_load)
  7278. return false;
  7279. break;
  7280. case group_imbalanced:
  7281. /*
  7282. * Select the 1st imbalanced group as we don't have any way to
  7283. * choose one more than another.
  7284. */
  7285. return false;
  7286. case group_asym_packing:
  7287. /* Prefer to move from lowest priority CPU's work */
  7288. if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
  7289. return false;
  7290. break;
  7291. case group_misfit_task:
  7292. /*
  7293. * If we have more than one misfit sg go with the biggest
  7294. * misfit.
  7295. */
  7296. if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
  7297. return false;
  7298. break;
  7299. case group_fully_busy:
  7300. /*
  7301. * Select the fully busy group with highest avg_load. In
  7302. * theory, there is no need to pull task from such kind of
  7303. * group because tasks have all compute capacity that they need
  7304. * but we can still improve the overall throughput by reducing
  7305. * contention when accessing shared HW resources.
  7306. *
  7307. * XXX for now avg_load is not computed and always 0 so we
  7308. * select the 1st one.
  7309. */
  7310. if (sgs->avg_load <= busiest->avg_load)
  7311. return false;
  7312. break;
  7313. case group_has_spare:
  7314. /*
  7315. * Select not overloaded group with lowest number of idle cpus
  7316. * and highest number of running tasks. We could also compare
  7317. * the spare capacity which is more stable but it can end up
  7318. * that the group has less spare capacity but finally more idle
  7319. * CPUs which means less opportunity to pull tasks.
  7320. */
  7321. if (sgs->idle_cpus > busiest->idle_cpus)
  7322. return false;
  7323. else if ((sgs->idle_cpus == busiest->idle_cpus) &&
  7324. (sgs->sum_nr_running <= busiest->sum_nr_running))
  7325. return false;
  7326. break;
  7327. }
  7328. /*
  7329. * Candidate sg has no more than one task per CPU and has higher
  7330. * per-CPU capacity. Migrating tasks to less capable CPUs may harm
  7331. * throughput. Maximize throughput, power/energy consequences are not
  7332. * considered.
  7333. */
  7334. if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
  7335. (sgs->group_type <= group_fully_busy) &&
  7336. (group_smaller_min_cpu_capacity(sds->local, sg)))
  7337. return false;
  7338. return true;
  7339. }
  7340. #ifdef CONFIG_NUMA_BALANCING
  7341. static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
  7342. {
  7343. if (sgs->sum_h_nr_running > sgs->nr_numa_running)
  7344. return regular;
  7345. if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
  7346. return remote;
  7347. return all;
  7348. }
  7349. static inline enum fbq_type fbq_classify_rq(struct rq *rq)
  7350. {
  7351. if (rq->nr_running > rq->nr_numa_running)
  7352. return regular;
  7353. if (rq->nr_running > rq->nr_preferred_running)
  7354. return remote;
  7355. return all;
  7356. }
  7357. #else
  7358. static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
  7359. {
  7360. return all;
  7361. }
  7362. static inline enum fbq_type fbq_classify_rq(struct rq *rq)
  7363. {
  7364. return regular;
  7365. }
  7366. #endif /* CONFIG_NUMA_BALANCING */
  7367. struct sg_lb_stats;
  7368. /*
  7369. * task_running_on_cpu - return 1 if @p is running on @cpu.
  7370. */
  7371. static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
  7372. {
  7373. /* Task has no contribution or is new */
  7374. if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
  7375. return 0;
  7376. if (task_on_rq_queued(p))
  7377. return 1;
  7378. return 0;
  7379. }
  7380. /**
  7381. * idle_cpu_without - would a given CPU be idle without p ?
  7382. * @cpu: the processor on which idleness is tested.
  7383. * @p: task which should be ignored.
  7384. *
  7385. * Return: 1 if the CPU would be idle. 0 otherwise.
  7386. */
  7387. static int idle_cpu_without(int cpu, struct task_struct *p)
  7388. {
  7389. struct rq *rq = cpu_rq(cpu);
  7390. if (rq->curr != rq->idle && rq->curr != p)
  7391. return 0;
  7392. /*
  7393. * rq->nr_running can't be used but an updated version without the
  7394. * impact of p on cpu must be used instead. The updated nr_running
  7395. * be computed and tested before calling idle_cpu_without().
  7396. */
  7397. #ifdef CONFIG_SMP
  7398. if (rq->ttwu_pending)
  7399. return 0;
  7400. #endif
  7401. return 1;
  7402. }
  7403. /*
  7404. * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
  7405. * @sd: The sched_domain level to look for idlest group.
  7406. * @group: sched_group whose statistics are to be updated.
  7407. * @sgs: variable to hold the statistics for this group.
  7408. * @p: The task for which we look for the idlest group/CPU.
  7409. */
  7410. static inline void update_sg_wakeup_stats(struct sched_domain *sd,
  7411. struct sched_group *group,
  7412. struct sg_lb_stats *sgs,
  7413. struct task_struct *p)
  7414. {
  7415. int i, nr_running;
  7416. memset(sgs, 0, sizeof(*sgs));
  7417. for_each_cpu(i, sched_group_span(group)) {
  7418. struct rq *rq = cpu_rq(i);
  7419. unsigned int local;
  7420. sgs->group_load += cpu_load_without(rq, p);
  7421. sgs->group_util += cpu_util_without(i, p);
  7422. sgs->group_runnable += cpu_runnable_without(rq, p);
  7423. local = task_running_on_cpu(i, p);
  7424. sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
  7425. nr_running = rq->nr_running - local;
  7426. sgs->sum_nr_running += nr_running;
  7427. /*
  7428. * No need to call idle_cpu_without() if nr_running is not 0
  7429. */
  7430. if (!nr_running && idle_cpu_without(i, p))
  7431. sgs->idle_cpus++;
  7432. }
  7433. /* Check if task fits in the group */
  7434. if (sd->flags & SD_ASYM_CPUCAPACITY &&
  7435. !task_fits_capacity(p, group->sgc->max_capacity)) {
  7436. sgs->group_misfit_task_load = 1;
  7437. }
  7438. sgs->group_capacity = group->sgc->capacity;
  7439. sgs->group_weight = group->group_weight;
  7440. sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
  7441. /*
  7442. * Computing avg_load makes sense only when group is fully busy or
  7443. * overloaded
  7444. */
  7445. if (sgs->group_type == group_fully_busy ||
  7446. sgs->group_type == group_overloaded)
  7447. sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
  7448. sgs->group_capacity;
  7449. }
  7450. static bool update_pick_idlest(struct sched_group *idlest,
  7451. struct sg_lb_stats *idlest_sgs,
  7452. struct sched_group *group,
  7453. struct sg_lb_stats *sgs)
  7454. {
  7455. if (sgs->group_type < idlest_sgs->group_type)
  7456. return true;
  7457. if (sgs->group_type > idlest_sgs->group_type)
  7458. return false;
  7459. /*
  7460. * The candidate and the current idlest group are the same type of
  7461. * group. Let check which one is the idlest according to the type.
  7462. */
  7463. switch (sgs->group_type) {
  7464. case group_overloaded:
  7465. case group_fully_busy:
  7466. /* Select the group with lowest avg_load. */
  7467. if (idlest_sgs->avg_load <= sgs->avg_load)
  7468. return false;
  7469. break;
  7470. case group_imbalanced:
  7471. case group_asym_packing:
  7472. /* Those types are not used in the slow wakeup path */
  7473. return false;
  7474. case group_misfit_task:
  7475. /* Select group with the highest max capacity */
  7476. if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
  7477. return false;
  7478. break;
  7479. case group_has_spare:
  7480. /* Select group with most idle CPUs */
  7481. if (idlest_sgs->idle_cpus > sgs->idle_cpus)
  7482. return false;
  7483. /* Select group with lowest group_util */
  7484. if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
  7485. idlest_sgs->group_util <= sgs->group_util)
  7486. return false;
  7487. break;
  7488. }
  7489. return true;
  7490. }
  7491. /*
  7492. * find_idlest_group() finds and returns the least busy CPU group within the
  7493. * domain.
  7494. *
  7495. * Assumes p is allowed on at least one CPU in sd.
  7496. */
  7497. static struct sched_group *
  7498. find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
  7499. {
  7500. struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
  7501. struct sg_lb_stats local_sgs, tmp_sgs;
  7502. struct sg_lb_stats *sgs;
  7503. unsigned long imbalance;
  7504. struct sg_lb_stats idlest_sgs = {
  7505. .avg_load = UINT_MAX,
  7506. .group_type = group_overloaded,
  7507. };
  7508. imbalance = scale_load_down(NICE_0_LOAD) *
  7509. (sd->imbalance_pct-100) / 100;
  7510. do {
  7511. int local_group;
  7512. /* Skip over this group if it has no CPUs allowed */
  7513. if (!cpumask_intersects(sched_group_span(group),
  7514. p->cpus_ptr))
  7515. continue;
  7516. local_group = cpumask_test_cpu(this_cpu,
  7517. sched_group_span(group));
  7518. if (local_group) {
  7519. sgs = &local_sgs;
  7520. local = group;
  7521. } else {
  7522. sgs = &tmp_sgs;
  7523. }
  7524. update_sg_wakeup_stats(sd, group, sgs, p);
  7525. if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
  7526. idlest = group;
  7527. idlest_sgs = *sgs;
  7528. }
  7529. } while (group = group->next, group != sd->groups);
  7530. /* There is no idlest group to push tasks to */
  7531. if (!idlest)
  7532. return NULL;
  7533. /* The local group has been skipped because of CPU affinity */
  7534. if (!local)
  7535. return idlest;
  7536. /*
  7537. * If the local group is idler than the selected idlest group
  7538. * don't try and push the task.
  7539. */
  7540. if (local_sgs.group_type < idlest_sgs.group_type)
  7541. return NULL;
  7542. /*
  7543. * If the local group is busier than the selected idlest group
  7544. * try and push the task.
  7545. */
  7546. if (local_sgs.group_type > idlest_sgs.group_type)
  7547. return idlest;
  7548. switch (local_sgs.group_type) {
  7549. case group_overloaded:
  7550. case group_fully_busy:
  7551. /*
  7552. * When comparing groups across NUMA domains, it's possible for
  7553. * the local domain to be very lightly loaded relative to the
  7554. * remote domains but "imbalance" skews the comparison making
  7555. * remote CPUs look much more favourable. When considering
  7556. * cross-domain, add imbalance to the load on the remote node
  7557. * and consider staying local.
  7558. */
  7559. if ((sd->flags & SD_NUMA) &&
  7560. ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
  7561. return NULL;
  7562. /*
  7563. * If the local group is less loaded than the selected
  7564. * idlest group don't try and push any tasks.
  7565. */
  7566. if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
  7567. return NULL;
  7568. if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
  7569. return NULL;
  7570. break;
  7571. case group_imbalanced:
  7572. case group_asym_packing:
  7573. /* Those type are not used in the slow wakeup path */
  7574. return NULL;
  7575. case group_misfit_task:
  7576. /* Select group with the highest max capacity */
  7577. if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
  7578. return NULL;
  7579. break;
  7580. case group_has_spare:
  7581. if (sd->flags & SD_NUMA) {
  7582. #ifdef CONFIG_NUMA_BALANCING
  7583. int idlest_cpu;
  7584. /*
  7585. * If there is spare capacity at NUMA, try to select
  7586. * the preferred node
  7587. */
  7588. if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
  7589. return NULL;
  7590. idlest_cpu = cpumask_first(sched_group_span(idlest));
  7591. if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
  7592. return idlest;
  7593. #endif
  7594. /*
  7595. * Otherwise, keep the task on this node to stay close
  7596. * its wakeup source and improve locality. If there is
  7597. * a real need of migration, periodic load balance will
  7598. * take care of it.
  7599. */
  7600. if (local_sgs.idle_cpus)
  7601. return NULL;
  7602. }
  7603. /*
  7604. * Select group with highest number of idle CPUs. We could also
  7605. * compare the utilization which is more stable but it can end
  7606. * up that the group has less spare capacity but finally more
  7607. * idle CPUs which means more opportunity to run task.
  7608. */
  7609. if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
  7610. return NULL;
  7611. break;
  7612. }
  7613. return idlest;
  7614. }
  7615. /**
  7616. * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  7617. * @env: The load balancing environment.
  7618. * @sds: variable to hold the statistics for this sched_domain.
  7619. */
  7620. static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
  7621. {
  7622. struct sched_domain *child = env->sd->child;
  7623. struct sched_group *sg = env->sd->groups;
  7624. struct sg_lb_stats *local = &sds->local_stat;
  7625. struct sg_lb_stats tmp_sgs;
  7626. int sg_status = 0;
  7627. #ifdef CONFIG_NO_HZ_COMMON
  7628. if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
  7629. env->flags |= LBF_NOHZ_STATS;
  7630. #endif
  7631. do {
  7632. struct sg_lb_stats *sgs = &tmp_sgs;
  7633. int local_group;
  7634. local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
  7635. if (local_group) {
  7636. sds->local = sg;
  7637. sgs = local;
  7638. if (env->idle != CPU_NEWLY_IDLE ||
  7639. time_after_eq(jiffies, sg->sgc->next_update))
  7640. update_group_capacity(env->sd, env->dst_cpu);
  7641. }
  7642. update_sg_lb_stats(env, sg, sgs, &sg_status);
  7643. if (local_group)
  7644. goto next_group;
  7645. if (update_sd_pick_busiest(env, sds, sg, sgs)) {
  7646. sds->busiest = sg;
  7647. sds->busiest_stat = *sgs;
  7648. }
  7649. next_group:
  7650. /* Now, start updating sd_lb_stats */
  7651. sds->total_load += sgs->group_load;
  7652. sds->total_capacity += sgs->group_capacity;
  7653. sg = sg->next;
  7654. } while (sg != env->sd->groups);
  7655. /* Tag domain that child domain prefers tasks go to siblings first */
  7656. sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
  7657. #ifdef CONFIG_NO_HZ_COMMON
  7658. if ((env->flags & LBF_NOHZ_AGAIN) &&
  7659. cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
  7660. WRITE_ONCE(nohz.next_blocked,
  7661. jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
  7662. }
  7663. #endif
  7664. if (env->sd->flags & SD_NUMA)
  7665. env->fbq_type = fbq_classify_group(&sds->busiest_stat);
  7666. if (!env->sd->parent) {
  7667. struct root_domain *rd = env->dst_rq->rd;
  7668. /* update overload indicator if we are at root domain */
  7669. WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
  7670. /* Update over-utilization (tipping point, U >= 0) indicator */
  7671. WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
  7672. trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
  7673. } else if (sg_status & SG_OVERUTILIZED) {
  7674. struct root_domain *rd = env->dst_rq->rd;
  7675. WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
  7676. trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
  7677. }
  7678. }
  7679. static inline long adjust_numa_imbalance(int imbalance, int nr_running)
  7680. {
  7681. unsigned int imbalance_min;
  7682. /*
  7683. * Allow a small imbalance based on a simple pair of communicating
  7684. * tasks that remain local when the source domain is almost idle.
  7685. */
  7686. imbalance_min = 2;
  7687. if (nr_running <= imbalance_min)
  7688. return 0;
  7689. return imbalance;
  7690. }
  7691. /**
  7692. * calculate_imbalance - Calculate the amount of imbalance present within the
  7693. * groups of a given sched_domain during load balance.
  7694. * @env: load balance environment
  7695. * @sds: statistics of the sched_domain whose imbalance is to be calculated.
  7696. */
  7697. static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
  7698. {
  7699. struct sg_lb_stats *local, *busiest;
  7700. local = &sds->local_stat;
  7701. busiest = &sds->busiest_stat;
  7702. if (busiest->group_type == group_misfit_task) {
  7703. /* Set imbalance to allow misfit tasks to be balanced. */
  7704. env->migration_type = migrate_misfit;
  7705. env->imbalance = 1;
  7706. return;
  7707. }
  7708. if (busiest->group_type == group_asym_packing) {
  7709. /*
  7710. * In case of asym capacity, we will try to migrate all load to
  7711. * the preferred CPU.
  7712. */
  7713. env->migration_type = migrate_task;
  7714. env->imbalance = busiest->sum_h_nr_running;
  7715. return;
  7716. }
  7717. if (busiest->group_type == group_imbalanced) {
  7718. /*
  7719. * In the group_imb case we cannot rely on group-wide averages
  7720. * to ensure CPU-load equilibrium, try to move any task to fix
  7721. * the imbalance. The next load balance will take care of
  7722. * balancing back the system.
  7723. */
  7724. env->migration_type = migrate_task;
  7725. env->imbalance = 1;
  7726. return;
  7727. }
  7728. /*
  7729. * Try to use spare capacity of local group without overloading it or
  7730. * emptying busiest.
  7731. */
  7732. if (local->group_type == group_has_spare) {
  7733. if ((busiest->group_type > group_fully_busy) &&
  7734. !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
  7735. /*
  7736. * If busiest is overloaded, try to fill spare
  7737. * capacity. This might end up creating spare capacity
  7738. * in busiest or busiest still being overloaded but
  7739. * there is no simple way to directly compute the
  7740. * amount of load to migrate in order to balance the
  7741. * system.
  7742. */
  7743. env->migration_type = migrate_util;
  7744. env->imbalance = max(local->group_capacity, local->group_util) -
  7745. local->group_util;
  7746. /*
  7747. * In some cases, the group's utilization is max or even
  7748. * higher than capacity because of migrations but the
  7749. * local CPU is (newly) idle. There is at least one
  7750. * waiting task in this overloaded busiest group. Let's
  7751. * try to pull it.
  7752. */
  7753. if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
  7754. env->migration_type = migrate_task;
  7755. env->imbalance = 1;
  7756. }
  7757. return;
  7758. }
  7759. if (busiest->group_weight == 1 || sds->prefer_sibling) {
  7760. unsigned int nr_diff = busiest->sum_nr_running;
  7761. /*
  7762. * When prefer sibling, evenly spread running tasks on
  7763. * groups.
  7764. */
  7765. env->migration_type = migrate_task;
  7766. lsub_positive(&nr_diff, local->sum_nr_running);
  7767. env->imbalance = nr_diff >> 1;
  7768. } else {
  7769. /*
  7770. * If there is no overload, we just want to even the number of
  7771. * idle cpus.
  7772. */
  7773. env->migration_type = migrate_task;
  7774. env->imbalance = max_t(long, 0, (local->idle_cpus -
  7775. busiest->idle_cpus) >> 1);
  7776. }
  7777. /* Consider allowing a small imbalance between NUMA groups */
  7778. if (env->sd->flags & SD_NUMA)
  7779. env->imbalance = adjust_numa_imbalance(env->imbalance,
  7780. busiest->sum_nr_running);
  7781. return;
  7782. }
  7783. /*
  7784. * Local is fully busy but has to take more load to relieve the
  7785. * busiest group
  7786. */
  7787. if (local->group_type < group_overloaded) {
  7788. /*
  7789. * Local will become overloaded so the avg_load metrics are
  7790. * finally needed.
  7791. */
  7792. local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
  7793. local->group_capacity;
  7794. sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
  7795. sds->total_capacity;
  7796. /*
  7797. * If the local group is more loaded than the selected
  7798. * busiest group don't try to pull any tasks.
  7799. */
  7800. if (local->avg_load >= busiest->avg_load) {
  7801. env->imbalance = 0;
  7802. return;
  7803. }
  7804. }
  7805. /*
  7806. * Both group are or will become overloaded and we're trying to get all
  7807. * the CPUs to the average_load, so we don't want to push ourselves
  7808. * above the average load, nor do we wish to reduce the max loaded CPU
  7809. * below the average load. At the same time, we also don't want to
  7810. * reduce the group load below the group capacity. Thus we look for
  7811. * the minimum possible imbalance.
  7812. */
  7813. env->migration_type = migrate_load;
  7814. env->imbalance = min(
  7815. (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
  7816. (sds->avg_load - local->avg_load) * local->group_capacity
  7817. ) / SCHED_CAPACITY_SCALE;
  7818. }
  7819. /******* find_busiest_group() helpers end here *********************/
  7820. /*
  7821. * Decision matrix according to the local and busiest group type:
  7822. *
  7823. * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
  7824. * has_spare nr_idle balanced N/A N/A balanced balanced
  7825. * fully_busy nr_idle nr_idle N/A N/A balanced balanced
  7826. * misfit_task force N/A N/A N/A force force
  7827. * asym_packing force force N/A N/A force force
  7828. * imbalanced force force N/A N/A force force
  7829. * overloaded force force N/A N/A force avg_load
  7830. *
  7831. * N/A : Not Applicable because already filtered while updating
  7832. * statistics.
  7833. * balanced : The system is balanced for these 2 groups.
  7834. * force : Calculate the imbalance as load migration is probably needed.
  7835. * avg_load : Only if imbalance is significant enough.
  7836. * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
  7837. * different in groups.
  7838. */
  7839. /**
  7840. * find_busiest_group - Returns the busiest group within the sched_domain
  7841. * if there is an imbalance.
  7842. *
  7843. * Also calculates the amount of runnable load which should be moved
  7844. * to restore balance.
  7845. *
  7846. * @env: The load balancing environment.
  7847. *
  7848. * Return: - The busiest group if imbalance exists.
  7849. */
  7850. static struct sched_group *find_busiest_group(struct lb_env *env)
  7851. {
  7852. struct sg_lb_stats *local, *busiest;
  7853. struct sd_lb_stats sds;
  7854. init_sd_lb_stats(&sds);
  7855. /*
  7856. * Compute the various statistics relevant for load balancing at
  7857. * this level.
  7858. */
  7859. update_sd_lb_stats(env, &sds);
  7860. if (sched_energy_enabled()) {
  7861. struct root_domain *rd = env->dst_rq->rd;
  7862. int out_balance = 1;
  7863. trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq,
  7864. &out_balance);
  7865. if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)
  7866. && out_balance)
  7867. goto out_balanced;
  7868. }
  7869. local = &sds.local_stat;
  7870. busiest = &sds.busiest_stat;
  7871. /* There is no busy sibling group to pull tasks from */
  7872. if (!sds.busiest)
  7873. goto out_balanced;
  7874. /* Misfit tasks should be dealt with regardless of the avg load */
  7875. if (busiest->group_type == group_misfit_task)
  7876. goto force_balance;
  7877. /* ASYM feature bypasses nice load balance check */
  7878. if (busiest->group_type == group_asym_packing)
  7879. goto force_balance;
  7880. /*
  7881. * If the busiest group is imbalanced the below checks don't
  7882. * work because they assume all things are equal, which typically
  7883. * isn't true due to cpus_ptr constraints and the like.
  7884. */
  7885. if (busiest->group_type == group_imbalanced)
  7886. goto force_balance;
  7887. /*
  7888. * If the local group is busier than the selected busiest group
  7889. * don't try and pull any tasks.
  7890. */
  7891. if (local->group_type > busiest->group_type)
  7892. goto out_balanced;
  7893. /*
  7894. * When groups are overloaded, use the avg_load to ensure fairness
  7895. * between tasks.
  7896. */
  7897. if (local->group_type == group_overloaded) {
  7898. /*
  7899. * If the local group is more loaded than the selected
  7900. * busiest group don't try to pull any tasks.
  7901. */
  7902. if (local->avg_load >= busiest->avg_load)
  7903. goto out_balanced;
  7904. /* XXX broken for overlapping NUMA groups */
  7905. sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
  7906. sds.total_capacity;
  7907. /*
  7908. * Don't pull any tasks if this group is already above the
  7909. * domain average load.
  7910. */
  7911. if (local->avg_load >= sds.avg_load)
  7912. goto out_balanced;
  7913. /*
  7914. * If the busiest group is more loaded, use imbalance_pct to be
  7915. * conservative.
  7916. */
  7917. if (100 * busiest->avg_load <=
  7918. env->sd->imbalance_pct * local->avg_load)
  7919. goto out_balanced;
  7920. }
  7921. /* Try to move all excess tasks to child's sibling domain */
  7922. if (sds.prefer_sibling && local->group_type == group_has_spare &&
  7923. busiest->sum_nr_running > local->sum_nr_running + 1)
  7924. goto force_balance;
  7925. if (busiest->group_type != group_overloaded) {
  7926. if (env->idle == CPU_NOT_IDLE)
  7927. /*
  7928. * If the busiest group is not overloaded (and as a
  7929. * result the local one too) but this CPU is already
  7930. * busy, let another idle CPU try to pull task.
  7931. */
  7932. goto out_balanced;
  7933. if (busiest->group_weight > 1 &&
  7934. local->idle_cpus <= (busiest->idle_cpus + 1))
  7935. /*
  7936. * If the busiest group is not overloaded
  7937. * and there is no imbalance between this and busiest
  7938. * group wrt idle CPUs, it is balanced. The imbalance
  7939. * becomes significant if the diff is greater than 1
  7940. * otherwise we might end up to just move the imbalance
  7941. * on another group. Of course this applies only if
  7942. * there is more than 1 CPU per group.
  7943. */
  7944. goto out_balanced;
  7945. if (busiest->sum_h_nr_running == 1)
  7946. /*
  7947. * busiest doesn't have any tasks waiting to run
  7948. */
  7949. goto out_balanced;
  7950. }
  7951. force_balance:
  7952. /* Looks like there is an imbalance. Compute it */
  7953. calculate_imbalance(env, &sds);
  7954. return env->imbalance ? sds.busiest : NULL;
  7955. out_balanced:
  7956. env->imbalance = 0;
  7957. return NULL;
  7958. }
  7959. /*
  7960. * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
  7961. */
  7962. static struct rq *find_busiest_queue(struct lb_env *env,
  7963. struct sched_group *group)
  7964. {
  7965. struct rq *busiest = NULL, *rq;
  7966. unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
  7967. unsigned int busiest_nr = 0;
  7968. int i, done = 0;
  7969. trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus,
  7970. &busiest, &done);
  7971. if (done)
  7972. return busiest;
  7973. for_each_cpu_and(i, sched_group_span(group), env->cpus) {
  7974. unsigned long capacity, load, util;
  7975. unsigned int nr_running;
  7976. enum fbq_type rt;
  7977. rq = cpu_rq(i);
  7978. rt = fbq_classify_rq(rq);
  7979. /*
  7980. * We classify groups/runqueues into three groups:
  7981. * - regular: there are !numa tasks
  7982. * - remote: there are numa tasks that run on the 'wrong' node
  7983. * - all: there is no distinction
  7984. *
  7985. * In order to avoid migrating ideally placed numa tasks,
  7986. * ignore those when there's better options.
  7987. *
  7988. * If we ignore the actual busiest queue to migrate another
  7989. * task, the next balance pass can still reduce the busiest
  7990. * queue by moving tasks around inside the node.
  7991. *
  7992. * If we cannot move enough load due to this classification
  7993. * the next pass will adjust the group classification and
  7994. * allow migration of more tasks.
  7995. *
  7996. * Both cases only affect the total convergence complexity.
  7997. */
  7998. if (rt > env->fbq_type)
  7999. continue;
  8000. capacity = capacity_of(i);
  8001. nr_running = rq->cfs.h_nr_running;
  8002. /*
  8003. * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
  8004. * eventually lead to active_balancing high->low capacity.
  8005. * Higher per-CPU capacity is considered better than balancing
  8006. * average load.
  8007. */
  8008. if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
  8009. capacity_of(env->dst_cpu) < capacity &&
  8010. nr_running == 1)
  8011. continue;
  8012. switch (env->migration_type) {
  8013. case migrate_load:
  8014. /*
  8015. * When comparing with load imbalance, use cpu_load()
  8016. * which is not scaled with the CPU capacity.
  8017. */
  8018. load = cpu_load(rq);
  8019. if (nr_running == 1 && load > env->imbalance &&
  8020. !check_cpu_capacity(rq, env->sd))
  8021. break;
  8022. /*
  8023. * For the load comparisons with the other CPUs,
  8024. * consider the cpu_load() scaled with the CPU
  8025. * capacity, so that the load can be moved away
  8026. * from the CPU that is potentially running at a
  8027. * lower capacity.
  8028. *
  8029. * Thus we're looking for max(load_i / capacity_i),
  8030. * crosswise multiplication to rid ourselves of the
  8031. * division works out to:
  8032. * load_i * capacity_j > load_j * capacity_i;
  8033. * where j is our previous maximum.
  8034. */
  8035. if (load * busiest_capacity > busiest_load * capacity) {
  8036. busiest_load = load;
  8037. busiest_capacity = capacity;
  8038. busiest = rq;
  8039. }
  8040. break;
  8041. case migrate_util:
  8042. util = cpu_util(cpu_of(rq));
  8043. /*
  8044. * Don't try to pull utilization from a CPU with one
  8045. * running task. Whatever its utilization, we will fail
  8046. * detach the task.
  8047. */
  8048. if (nr_running <= 1)
  8049. continue;
  8050. if (busiest_util < util) {
  8051. busiest_util = util;
  8052. busiest = rq;
  8053. }
  8054. break;
  8055. case migrate_task:
  8056. if (busiest_nr < nr_running) {
  8057. busiest_nr = nr_running;
  8058. busiest = rq;
  8059. }
  8060. break;
  8061. case migrate_misfit:
  8062. /*
  8063. * For ASYM_CPUCAPACITY domains with misfit tasks we
  8064. * simply seek the "biggest" misfit task.
  8065. */
  8066. if (rq->misfit_task_load > busiest_load) {
  8067. busiest_load = rq->misfit_task_load;
  8068. busiest = rq;
  8069. }
  8070. break;
  8071. }
  8072. }
  8073. return busiest;
  8074. }
  8075. /*
  8076. * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  8077. * so long as it is large enough.
  8078. */
  8079. #define MAX_PINNED_INTERVAL 512
  8080. static inline bool
  8081. asym_active_balance(struct lb_env *env)
  8082. {
  8083. /*
  8084. * ASYM_PACKING needs to force migrate tasks from busy but
  8085. * lower priority CPUs in order to pack all tasks in the
  8086. * highest priority CPUs.
  8087. */
  8088. return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
  8089. sched_asym_prefer(env->dst_cpu, env->src_cpu);
  8090. }
  8091. static inline bool
  8092. voluntary_active_balance(struct lb_env *env)
  8093. {
  8094. struct sched_domain *sd = env->sd;
  8095. if (asym_active_balance(env))
  8096. return 1;
  8097. /*
  8098. * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
  8099. * It's worth migrating the task if the src_cpu's capacity is reduced
  8100. * because of other sched_class or IRQs if more capacity stays
  8101. * available on dst_cpu.
  8102. */
  8103. if ((env->idle != CPU_NOT_IDLE) &&
  8104. (env->src_rq->cfs.h_nr_running == 1)) {
  8105. if ((check_cpu_capacity(env->src_rq, sd)) &&
  8106. (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
  8107. return 1;
  8108. }
  8109. if (env->migration_type == migrate_misfit)
  8110. return 1;
  8111. return 0;
  8112. }
  8113. static int need_active_balance(struct lb_env *env)
  8114. {
  8115. struct sched_domain *sd = env->sd;
  8116. if (voluntary_active_balance(env))
  8117. return 1;
  8118. return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
  8119. }
  8120. static int active_load_balance_cpu_stop(void *data);
  8121. static int should_we_balance(struct lb_env *env)
  8122. {
  8123. struct sched_group *sg = env->sd->groups;
  8124. int cpu;
  8125. /*
  8126. * Ensure the balancing environment is consistent; can happen
  8127. * when the softirq triggers 'during' hotplug.
  8128. */
  8129. if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
  8130. return 0;
  8131. /*
  8132. * In the newly idle case, we will allow all the CPUs
  8133. * to do the newly idle load balance.
  8134. */
  8135. if (env->idle == CPU_NEWLY_IDLE)
  8136. return 1;
  8137. /* Try to find first idle CPU */
  8138. for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
  8139. if (!idle_cpu(cpu))
  8140. continue;
  8141. /* Are we the first idle CPU? */
  8142. return cpu == env->dst_cpu;
  8143. }
  8144. /* Are we the first CPU of this group ? */
  8145. return group_balance_cpu(sg) == env->dst_cpu;
  8146. }
  8147. /*
  8148. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  8149. * tasks if there is an imbalance.
  8150. */
  8151. static int load_balance(int this_cpu, struct rq *this_rq,
  8152. struct sched_domain *sd, enum cpu_idle_type idle,
  8153. int *continue_balancing)
  8154. {
  8155. int ld_moved, cur_ld_moved, active_balance = 0;
  8156. struct sched_domain *sd_parent = sd->parent;
  8157. struct sched_group *group;
  8158. struct rq *busiest;
  8159. struct rq_flags rf;
  8160. struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
  8161. struct lb_env env = {
  8162. .sd = sd,
  8163. .dst_cpu = this_cpu,
  8164. .dst_rq = this_rq,
  8165. .dst_grpmask = sched_group_span(sd->groups),
  8166. .idle = idle,
  8167. .loop_break = sched_nr_migrate_break,
  8168. .cpus = cpus,
  8169. .fbq_type = all,
  8170. .tasks = LIST_HEAD_INIT(env.tasks),
  8171. };
  8172. cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
  8173. schedstat_inc(sd->lb_count[idle]);
  8174. redo:
  8175. if (!should_we_balance(&env)) {
  8176. *continue_balancing = 0;
  8177. goto out_balanced;
  8178. }
  8179. group = find_busiest_group(&env);
  8180. if (!group) {
  8181. schedstat_inc(sd->lb_nobusyg[idle]);
  8182. goto out_balanced;
  8183. }
  8184. busiest = find_busiest_queue(&env, group);
  8185. if (!busiest) {
  8186. schedstat_inc(sd->lb_nobusyq[idle]);
  8187. goto out_balanced;
  8188. }
  8189. BUG_ON(busiest == env.dst_rq);
  8190. schedstat_add(sd->lb_imbalance[idle], env.imbalance);
  8191. env.src_cpu = busiest->cpu;
  8192. env.src_rq = busiest;
  8193. ld_moved = 0;
  8194. if (busiest->nr_running > 1) {
  8195. /*
  8196. * Attempt to move tasks. If find_busiest_group has found
  8197. * an imbalance but busiest->nr_running <= 1, the group is
  8198. * still unbalanced. ld_moved simply stays zero, so it is
  8199. * correctly treated as an imbalance.
  8200. */
  8201. env.flags |= LBF_ALL_PINNED;
  8202. env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
  8203. more_balance:
  8204. rq_lock_irqsave(busiest, &rf);
  8205. env.src_rq_rf = &rf;
  8206. update_rq_clock(busiest);
  8207. /*
  8208. * cur_ld_moved - load moved in current iteration
  8209. * ld_moved - cumulative load moved across iterations
  8210. */
  8211. cur_ld_moved = detach_tasks(&env);
  8212. /*
  8213. * We've detached some tasks from busiest_rq. Every
  8214. * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
  8215. * unlock busiest->lock, and we are able to be sure
  8216. * that nobody can manipulate the tasks in parallel.
  8217. * See task_rq_lock() family for the details.
  8218. */
  8219. rq_unlock(busiest, &rf);
  8220. if (cur_ld_moved) {
  8221. attach_tasks(&env);
  8222. ld_moved += cur_ld_moved;
  8223. }
  8224. local_irq_restore(rf.flags);
  8225. if (env.flags & LBF_NEED_BREAK) {
  8226. env.flags &= ~LBF_NEED_BREAK;
  8227. goto more_balance;
  8228. }
  8229. /*
  8230. * Revisit (affine) tasks on src_cpu that couldn't be moved to
  8231. * us and move them to an alternate dst_cpu in our sched_group
  8232. * where they can run. The upper limit on how many times we
  8233. * iterate on same src_cpu is dependent on number of CPUs in our
  8234. * sched_group.
  8235. *
  8236. * This changes load balance semantics a bit on who can move
  8237. * load to a given_cpu. In addition to the given_cpu itself
  8238. * (or a ilb_cpu acting on its behalf where given_cpu is
  8239. * nohz-idle), we now have balance_cpu in a position to move
  8240. * load to given_cpu. In rare situations, this may cause
  8241. * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
  8242. * _independently_ and at _same_ time to move some load to
  8243. * given_cpu) causing exceess load to be moved to given_cpu.
  8244. * This however should not happen so much in practice and
  8245. * moreover subsequent load balance cycles should correct the
  8246. * excess load moved.
  8247. */
  8248. if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
  8249. /* Prevent to re-select dst_cpu via env's CPUs */
  8250. __cpumask_clear_cpu(env.dst_cpu, env.cpus);
  8251. env.dst_rq = cpu_rq(env.new_dst_cpu);
  8252. env.dst_cpu = env.new_dst_cpu;
  8253. env.flags &= ~LBF_DST_PINNED;
  8254. env.loop = 0;
  8255. env.loop_break = sched_nr_migrate_break;
  8256. /*
  8257. * Go back to "more_balance" rather than "redo" since we
  8258. * need to continue with same src_cpu.
  8259. */
  8260. goto more_balance;
  8261. }
  8262. /*
  8263. * We failed to reach balance because of affinity.
  8264. */
  8265. if (sd_parent) {
  8266. int *group_imbalance = &sd_parent->groups->sgc->imbalance;
  8267. if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
  8268. *group_imbalance = 1;
  8269. }
  8270. /* All tasks on this runqueue were pinned by CPU affinity */
  8271. if (unlikely(env.flags & LBF_ALL_PINNED)) {
  8272. __cpumask_clear_cpu(cpu_of(busiest), cpus);
  8273. /*
  8274. * Attempting to continue load balancing at the current
  8275. * sched_domain level only makes sense if there are
  8276. * active CPUs remaining as possible busiest CPUs to
  8277. * pull load from which are not contained within the
  8278. * destination group that is receiving any migrated
  8279. * load.
  8280. */
  8281. if (!cpumask_subset(cpus, env.dst_grpmask)) {
  8282. env.loop = 0;
  8283. env.loop_break = sched_nr_migrate_break;
  8284. goto redo;
  8285. }
  8286. goto out_all_pinned;
  8287. }
  8288. }
  8289. if (!ld_moved) {
  8290. schedstat_inc(sd->lb_failed[idle]);
  8291. /*
  8292. * Increment the failure counter only on periodic balance.
  8293. * We do not want newidle balance, which can be very
  8294. * frequent, pollute the failure counter causing
  8295. * excessive cache_hot migrations and active balances.
  8296. */
  8297. if (idle != CPU_NEWLY_IDLE)
  8298. sd->nr_balance_failed++;
  8299. if (need_active_balance(&env)) {
  8300. unsigned long flags;
  8301. raw_spin_lock_irqsave(&busiest->lock, flags);
  8302. /*
  8303. * Don't kick the active_load_balance_cpu_stop,
  8304. * if the curr task on busiest CPU can't be
  8305. * moved to this_cpu:
  8306. */
  8307. if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
  8308. raw_spin_unlock_irqrestore(&busiest->lock,
  8309. flags);
  8310. env.flags |= LBF_ALL_PINNED;
  8311. goto out_one_pinned;
  8312. }
  8313. /*
  8314. * ->active_balance synchronizes accesses to
  8315. * ->active_balance_work. Once set, it's cleared
  8316. * only after active load balance is finished.
  8317. */
  8318. if (!busiest->active_balance) {
  8319. busiest->active_balance = 1;
  8320. busiest->push_cpu = this_cpu;
  8321. active_balance = 1;
  8322. }
  8323. raw_spin_unlock_irqrestore(&busiest->lock, flags);
  8324. if (active_balance) {
  8325. stop_one_cpu_nowait(cpu_of(busiest),
  8326. active_load_balance_cpu_stop, busiest,
  8327. &busiest->active_balance_work);
  8328. }
  8329. /* We've kicked active balancing, force task migration. */
  8330. sd->nr_balance_failed = sd->cache_nice_tries+1;
  8331. }
  8332. } else
  8333. sd->nr_balance_failed = 0;
  8334. if (likely(!active_balance) || voluntary_active_balance(&env)) {
  8335. /* We were unbalanced, so reset the balancing interval */
  8336. sd->balance_interval = sd->min_interval;
  8337. } else {
  8338. /*
  8339. * If we've begun active balancing, start to back off. This
  8340. * case may not be covered by the all_pinned logic if there
  8341. * is only 1 task on the busy runqueue (because we don't call
  8342. * detach_tasks).
  8343. */
  8344. if (sd->balance_interval < sd->max_interval)
  8345. sd->balance_interval *= 2;
  8346. }
  8347. goto out;
  8348. out_balanced:
  8349. /*
  8350. * We reach balance although we may have faced some affinity
  8351. * constraints. Clear the imbalance flag only if other tasks got
  8352. * a chance to move and fix the imbalance.
  8353. */
  8354. if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
  8355. int *group_imbalance = &sd_parent->groups->sgc->imbalance;
  8356. if (*group_imbalance)
  8357. *group_imbalance = 0;
  8358. }
  8359. out_all_pinned:
  8360. /*
  8361. * We reach balance because all tasks are pinned at this level so
  8362. * we can't migrate them. Let the imbalance flag set so parent level
  8363. * can try to migrate them.
  8364. */
  8365. schedstat_inc(sd->lb_balanced[idle]);
  8366. sd->nr_balance_failed = 0;
  8367. out_one_pinned:
  8368. ld_moved = 0;
  8369. /*
  8370. * newidle_balance() disregards balance intervals, so we could
  8371. * repeatedly reach this code, which would lead to balance_interval
  8372. * skyrocketting in a short amount of time. Skip the balance_interval
  8373. * increase logic to avoid that.
  8374. */
  8375. if (env.idle == CPU_NEWLY_IDLE)
  8376. goto out;
  8377. /* tune up the balancing interval */
  8378. if ((env.flags & LBF_ALL_PINNED &&
  8379. sd->balance_interval < MAX_PINNED_INTERVAL) ||
  8380. sd->balance_interval < sd->max_interval)
  8381. sd->balance_interval *= 2;
  8382. out:
  8383. return ld_moved;
  8384. }
  8385. static inline unsigned long
  8386. get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
  8387. {
  8388. unsigned long interval = sd->balance_interval;
  8389. if (cpu_busy)
  8390. interval *= sd->busy_factor;
  8391. /* scale ms to jiffies */
  8392. interval = msecs_to_jiffies(interval);
  8393. /*
  8394. * Reduce likelihood of busy balancing at higher domains racing with
  8395. * balancing at lower domains by preventing their balancing periods
  8396. * from being multiples of each other.
  8397. */
  8398. if (cpu_busy)
  8399. interval -= 1;
  8400. interval = clamp(interval, 1UL, max_load_balance_interval);
  8401. return interval;
  8402. }
  8403. static inline void
  8404. update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
  8405. {
  8406. unsigned long interval, next;
  8407. /* used by idle balance, so cpu_busy = 0 */
  8408. interval = get_sd_balance_interval(sd, 0);
  8409. next = sd->last_balance + interval;
  8410. if (time_after(*next_balance, next))
  8411. *next_balance = next;
  8412. }
  8413. /*
  8414. * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
  8415. * running tasks off the busiest CPU onto idle CPUs. It requires at
  8416. * least 1 task to be running on each physical CPU where possible, and
  8417. * avoids physical / logical imbalances.
  8418. */
  8419. static int active_load_balance_cpu_stop(void *data)
  8420. {
  8421. struct rq *busiest_rq = data;
  8422. int busiest_cpu = cpu_of(busiest_rq);
  8423. int target_cpu = busiest_rq->push_cpu;
  8424. struct rq *target_rq = cpu_rq(target_cpu);
  8425. struct sched_domain *sd;
  8426. struct task_struct *p = NULL;
  8427. struct rq_flags rf;
  8428. rq_lock_irq(busiest_rq, &rf);
  8429. /*
  8430. * Between queueing the stop-work and running it is a hole in which
  8431. * CPUs can become inactive. We should not move tasks from or to
  8432. * inactive CPUs.
  8433. */
  8434. if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
  8435. goto out_unlock;
  8436. /* Make sure the requested CPU hasn't gone down in the meantime: */
  8437. if (unlikely(busiest_cpu != smp_processor_id() ||
  8438. !busiest_rq->active_balance))
  8439. goto out_unlock;
  8440. /* Is there any task to move? */
  8441. if (busiest_rq->nr_running <= 1)
  8442. goto out_unlock;
  8443. /*
  8444. * This condition is "impossible", if it occurs
  8445. * we need to fix it. Originally reported by
  8446. * Bjorn Helgaas on a 128-CPU setup.
  8447. */
  8448. BUG_ON(busiest_rq == target_rq);
  8449. /* Search for an sd spanning us and the target CPU. */
  8450. rcu_read_lock();
  8451. for_each_domain(target_cpu, sd) {
  8452. if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
  8453. break;
  8454. }
  8455. if (likely(sd)) {
  8456. struct lb_env env = {
  8457. .sd = sd,
  8458. .dst_cpu = target_cpu,
  8459. .dst_rq = target_rq,
  8460. .src_cpu = busiest_rq->cpu,
  8461. .src_rq = busiest_rq,
  8462. .idle = CPU_IDLE,
  8463. /*
  8464. * can_migrate_task() doesn't need to compute new_dst_cpu
  8465. * for active balancing. Since we have CPU_IDLE, but no
  8466. * @dst_grpmask we need to make that test go away with lying
  8467. * about DST_PINNED.
  8468. */
  8469. .flags = LBF_DST_PINNED,
  8470. .src_rq_rf = &rf,
  8471. };
  8472. schedstat_inc(sd->alb_count);
  8473. update_rq_clock(busiest_rq);
  8474. p = detach_one_task(&env);
  8475. if (p) {
  8476. schedstat_inc(sd->alb_pushed);
  8477. /* Active balancing done, reset the failure counter. */
  8478. sd->nr_balance_failed = 0;
  8479. } else {
  8480. schedstat_inc(sd->alb_failed);
  8481. }
  8482. }
  8483. rcu_read_unlock();
  8484. out_unlock:
  8485. busiest_rq->active_balance = 0;
  8486. rq_unlock(busiest_rq, &rf);
  8487. if (p)
  8488. attach_one_task(target_rq, p);
  8489. local_irq_enable();
  8490. return 0;
  8491. }
  8492. static DEFINE_SPINLOCK(balancing);
  8493. /*
  8494. * Scale the max load_balance interval with the number of CPUs in the system.
  8495. * This trades load-balance latency on larger machines for less cross talk.
  8496. */
  8497. void update_max_interval(void)
  8498. {
  8499. max_load_balance_interval = HZ*num_active_cpus()/10;
  8500. }
  8501. /*
  8502. * It checks each scheduling domain to see if it is due to be balanced,
  8503. * and initiates a balancing operation if so.
  8504. *
  8505. * Balancing parameters are set up in init_sched_domains.
  8506. */
  8507. static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
  8508. {
  8509. int continue_balancing = 1;
  8510. int cpu = rq->cpu;
  8511. int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
  8512. unsigned long interval;
  8513. struct sched_domain *sd;
  8514. /* Earliest time when we have to do rebalance again */
  8515. unsigned long next_balance = jiffies + 60*HZ;
  8516. int update_next_balance = 0;
  8517. int need_serialize, need_decay = 0;
  8518. u64 max_cost = 0;
  8519. trace_android_rvh_sched_rebalance_domains(rq, &continue_balancing);
  8520. if (!continue_balancing)
  8521. return;
  8522. rcu_read_lock();
  8523. for_each_domain(cpu, sd) {
  8524. /*
  8525. * Decay the newidle max times here because this is a regular
  8526. * visit to all the domains. Decay ~1% per second.
  8527. */
  8528. if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
  8529. sd->max_newidle_lb_cost =
  8530. (sd->max_newidle_lb_cost * 253) / 256;
  8531. sd->next_decay_max_lb_cost = jiffies + HZ;
  8532. need_decay = 1;
  8533. }
  8534. max_cost += sd->max_newidle_lb_cost;
  8535. /*
  8536. * Stop the load balance at this level. There is another
  8537. * CPU in our sched group which is doing load balancing more
  8538. * actively.
  8539. */
  8540. if (!continue_balancing) {
  8541. if (need_decay)
  8542. continue;
  8543. break;
  8544. }
  8545. interval = get_sd_balance_interval(sd, busy);
  8546. need_serialize = sd->flags & SD_SERIALIZE;
  8547. if (need_serialize) {
  8548. if (!spin_trylock(&balancing))
  8549. goto out;
  8550. }
  8551. if (time_after_eq(jiffies, sd->last_balance + interval)) {
  8552. if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
  8553. /*
  8554. * The LBF_DST_PINNED logic could have changed
  8555. * env->dst_cpu, so we can't know our idle
  8556. * state even if we migrated tasks. Update it.
  8557. */
  8558. idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
  8559. busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
  8560. }
  8561. sd->last_balance = jiffies;
  8562. interval = get_sd_balance_interval(sd, busy);
  8563. }
  8564. if (need_serialize)
  8565. spin_unlock(&balancing);
  8566. out:
  8567. if (time_after(next_balance, sd->last_balance + interval)) {
  8568. next_balance = sd->last_balance + interval;
  8569. update_next_balance = 1;
  8570. }
  8571. }
  8572. if (need_decay) {
  8573. /*
  8574. * Ensure the rq-wide value also decays but keep it at a
  8575. * reasonable floor to avoid funnies with rq->avg_idle.
  8576. */
  8577. rq->max_idle_balance_cost =
  8578. max((u64)sysctl_sched_migration_cost, max_cost);
  8579. }
  8580. rcu_read_unlock();
  8581. /*
  8582. * next_balance will be updated only when there is a need.
  8583. * When the cpu is attached to null domain for ex, it will not be
  8584. * updated.
  8585. */
  8586. if (likely(update_next_balance)) {
  8587. rq->next_balance = next_balance;
  8588. #ifdef CONFIG_NO_HZ_COMMON
  8589. /*
  8590. * If this CPU has been elected to perform the nohz idle
  8591. * balance. Other idle CPUs have already rebalanced with
  8592. * nohz_idle_balance() and nohz.next_balance has been
  8593. * updated accordingly. This CPU is now running the idle load
  8594. * balance for itself and we need to update the
  8595. * nohz.next_balance accordingly.
  8596. */
  8597. if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
  8598. nohz.next_balance = rq->next_balance;
  8599. #endif
  8600. }
  8601. }
  8602. static inline int on_null_domain(struct rq *rq)
  8603. {
  8604. return unlikely(!rcu_dereference_sched(rq->sd));
  8605. }
  8606. #ifdef CONFIG_NO_HZ_COMMON
  8607. /*
  8608. * idle load balancing details
  8609. * - When one of the busy CPUs notice that there may be an idle rebalancing
  8610. * needed, they will kick the idle load balancer, which then does idle
  8611. * load balancing for all the idle CPUs.
  8612. * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
  8613. * anywhere yet.
  8614. */
  8615. static inline int find_new_ilb(void)
  8616. {
  8617. int ilb = -1;
  8618. trace_android_rvh_find_new_ilb(nohz.idle_cpus_mask, &ilb);
  8619. if (ilb >= 0)
  8620. return ilb;
  8621. for_each_cpu_and(ilb, nohz.idle_cpus_mask,
  8622. housekeeping_cpumask(HK_FLAG_MISC)) {
  8623. if (idle_cpu(ilb))
  8624. return ilb;
  8625. }
  8626. return nr_cpu_ids;
  8627. }
  8628. /*
  8629. * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
  8630. * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
  8631. */
  8632. static void kick_ilb(unsigned int flags)
  8633. {
  8634. int ilb_cpu;
  8635. /*
  8636. * Increase nohz.next_balance only when if full ilb is triggered but
  8637. * not if we only update stats.
  8638. */
  8639. if (flags & NOHZ_BALANCE_KICK)
  8640. nohz.next_balance = jiffies+1;
  8641. ilb_cpu = find_new_ilb();
  8642. if (ilb_cpu >= nr_cpu_ids)
  8643. return;
  8644. /*
  8645. * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
  8646. * the first flag owns it; cleared by nohz_csd_func().
  8647. */
  8648. flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
  8649. if (flags & NOHZ_KICK_MASK)
  8650. return;
  8651. /*
  8652. * This way we generate an IPI on the target CPU which
  8653. * is idle. And the softirq performing nohz idle load balance
  8654. * will be run before returning from the IPI.
  8655. */
  8656. smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
  8657. }
  8658. /*
  8659. * Current decision point for kicking the idle load balancer in the presence
  8660. * of idle CPUs in the system.
  8661. */
  8662. static void nohz_balancer_kick(struct rq *rq)
  8663. {
  8664. unsigned long now = jiffies;
  8665. struct sched_domain_shared *sds;
  8666. struct sched_domain *sd;
  8667. int nr_busy, i, cpu = rq->cpu;
  8668. unsigned int flags = 0;
  8669. int done = 0;
  8670. if (unlikely(rq->idle_balance))
  8671. return;
  8672. /*
  8673. * We may be recently in ticked or tickless idle mode. At the first
  8674. * busy tick after returning from idle, we will update the busy stats.
  8675. */
  8676. nohz_balance_exit_idle(rq);
  8677. /*
  8678. * None are in tickless mode and hence no need for NOHZ idle load
  8679. * balancing.
  8680. */
  8681. if (likely(!atomic_read(&nohz.nr_cpus)))
  8682. return;
  8683. if (READ_ONCE(nohz.has_blocked) &&
  8684. time_after(now, READ_ONCE(nohz.next_blocked)))
  8685. flags = NOHZ_STATS_KICK;
  8686. if (time_before(now, nohz.next_balance))
  8687. goto out;
  8688. trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done);
  8689. if (done)
  8690. goto out;
  8691. if (rq->nr_running >= 2) {
  8692. flags = NOHZ_KICK_MASK;
  8693. goto out;
  8694. }
  8695. rcu_read_lock();
  8696. sd = rcu_dereference(rq->sd);
  8697. if (sd) {
  8698. /*
  8699. * If there's a CFS task and the current CPU has reduced
  8700. * capacity; kick the ILB to see if there's a better CPU to run
  8701. * on.
  8702. */
  8703. if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
  8704. flags = NOHZ_KICK_MASK;
  8705. goto unlock;
  8706. }
  8707. }
  8708. sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
  8709. if (sd) {
  8710. /*
  8711. * When ASYM_PACKING; see if there's a more preferred CPU
  8712. * currently idle; in which case, kick the ILB to move tasks
  8713. * around.
  8714. */
  8715. for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
  8716. if (sched_asym_prefer(i, cpu)) {
  8717. flags = NOHZ_KICK_MASK;
  8718. goto unlock;
  8719. }
  8720. }
  8721. }
  8722. sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
  8723. if (sd) {
  8724. /*
  8725. * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
  8726. * to run the misfit task on.
  8727. */
  8728. if (check_misfit_status(rq, sd)) {
  8729. flags = NOHZ_KICK_MASK;
  8730. goto unlock;
  8731. }
  8732. /*
  8733. * For asymmetric systems, we do not want to nicely balance
  8734. * cache use, instead we want to embrace asymmetry and only
  8735. * ensure tasks have enough CPU capacity.
  8736. *
  8737. * Skip the LLC logic because it's not relevant in that case.
  8738. */
  8739. goto unlock;
  8740. }
  8741. sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
  8742. if (sds) {
  8743. /*
  8744. * If there is an imbalance between LLC domains (IOW we could
  8745. * increase the overall cache use), we need some less-loaded LLC
  8746. * domain to pull some load. Likewise, we may need to spread
  8747. * load within the current LLC domain (e.g. packed SMT cores but
  8748. * other CPUs are idle). We can't really know from here how busy
  8749. * the others are - so just get a nohz balance going if it looks
  8750. * like this LLC domain has tasks we could move.
  8751. */
  8752. nr_busy = atomic_read(&sds->nr_busy_cpus);
  8753. if (nr_busy > 1) {
  8754. flags = NOHZ_KICK_MASK;
  8755. goto unlock;
  8756. }
  8757. }
  8758. unlock:
  8759. rcu_read_unlock();
  8760. out:
  8761. if (flags)
  8762. kick_ilb(flags);
  8763. }
  8764. static void set_cpu_sd_state_busy(int cpu)
  8765. {
  8766. struct sched_domain *sd;
  8767. rcu_read_lock();
  8768. sd = rcu_dereference(per_cpu(sd_llc, cpu));
  8769. if (!sd || !sd->nohz_idle)
  8770. goto unlock;
  8771. sd->nohz_idle = 0;
  8772. atomic_inc(&sd->shared->nr_busy_cpus);
  8773. unlock:
  8774. rcu_read_unlock();
  8775. }
  8776. void nohz_balance_exit_idle(struct rq *rq)
  8777. {
  8778. SCHED_WARN_ON(rq != this_rq());
  8779. if (likely(!rq->nohz_tick_stopped))
  8780. return;
  8781. rq->nohz_tick_stopped = 0;
  8782. cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
  8783. atomic_dec(&nohz.nr_cpus);
  8784. set_cpu_sd_state_busy(rq->cpu);
  8785. }
  8786. static void set_cpu_sd_state_idle(int cpu)
  8787. {
  8788. struct sched_domain *sd;
  8789. rcu_read_lock();
  8790. sd = rcu_dereference(per_cpu(sd_llc, cpu));
  8791. if (!sd || sd->nohz_idle)
  8792. goto unlock;
  8793. sd->nohz_idle = 1;
  8794. atomic_dec(&sd->shared->nr_busy_cpus);
  8795. unlock:
  8796. rcu_read_unlock();
  8797. }
  8798. /*
  8799. * This routine will record that the CPU is going idle with tick stopped.
  8800. * This info will be used in performing idle load balancing in the future.
  8801. */
  8802. void nohz_balance_enter_idle(int cpu)
  8803. {
  8804. struct rq *rq = cpu_rq(cpu);
  8805. SCHED_WARN_ON(cpu != smp_processor_id());
  8806. if (!cpu_active(cpu)) {
  8807. /*
  8808. * A CPU can be paused while it is idle with it's tick
  8809. * stopped. nohz_balance_exit_idle() should be called
  8810. * from the local CPU, so it can't be called during
  8811. * pause. This results in paused CPU participating in
  8812. * the nohz idle balance, which should be avoided.
  8813. *
  8814. * When the paused CPU exits idle and enters again,
  8815. * exempt the paused CPU from nohz_balance_exit_idle.
  8816. */
  8817. nohz_balance_exit_idle(rq);
  8818. return;
  8819. }
  8820. /* Spare idle load balancing on CPUs that don't want to be disturbed: */
  8821. if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
  8822. return;
  8823. /*
  8824. * Can be set safely without rq->lock held
  8825. * If a clear happens, it will have evaluated last additions because
  8826. * rq->lock is held during the check and the clear
  8827. */
  8828. rq->has_blocked_load = 1;
  8829. /*
  8830. * The tick is still stopped but load could have been added in the
  8831. * meantime. We set the nohz.has_blocked flag to trig a check of the
  8832. * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
  8833. * of nohz.has_blocked can only happen after checking the new load
  8834. */
  8835. if (rq->nohz_tick_stopped)
  8836. goto out;
  8837. /* If we're a completely isolated CPU, we don't play: */
  8838. if (on_null_domain(rq))
  8839. return;
  8840. rq->nohz_tick_stopped = 1;
  8841. cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
  8842. atomic_inc(&nohz.nr_cpus);
  8843. /*
  8844. * Ensures that if nohz_idle_balance() fails to observe our
  8845. * @idle_cpus_mask store, it must observe the @has_blocked
  8846. * store.
  8847. */
  8848. smp_mb__after_atomic();
  8849. set_cpu_sd_state_idle(cpu);
  8850. out:
  8851. /*
  8852. * Each time a cpu enter idle, we assume that it has blocked load and
  8853. * enable the periodic update of the load of idle cpus
  8854. */
  8855. WRITE_ONCE(nohz.has_blocked, 1);
  8856. }
  8857. /*
  8858. * Internal function that runs load balance for all idle cpus. The load balance
  8859. * can be a simple update of blocked load or a complete load balance with
  8860. * tasks movement depending of flags.
  8861. * The function returns false if the loop has stopped before running
  8862. * through all idle CPUs.
  8863. */
  8864. static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
  8865. enum cpu_idle_type idle)
  8866. {
  8867. /* Earliest time when we have to do rebalance again */
  8868. unsigned long now = jiffies;
  8869. unsigned long next_balance = now + 60*HZ;
  8870. bool has_blocked_load = false;
  8871. int update_next_balance = 0;
  8872. int this_cpu = this_rq->cpu;
  8873. int balance_cpu;
  8874. int ret = false;
  8875. struct rq *rq;
  8876. SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
  8877. /*
  8878. * We assume there will be no idle load after this update and clear
  8879. * the has_blocked flag. If a cpu enters idle in the mean time, it will
  8880. * set the has_blocked flag and trig another update of idle load.
  8881. * Because a cpu that becomes idle, is added to idle_cpus_mask before
  8882. * setting the flag, we are sure to not clear the state and not
  8883. * check the load of an idle cpu.
  8884. */
  8885. WRITE_ONCE(nohz.has_blocked, 0);
  8886. /*
  8887. * Ensures that if we miss the CPU, we must see the has_blocked
  8888. * store from nohz_balance_enter_idle().
  8889. */
  8890. smp_mb();
  8891. for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
  8892. if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
  8893. continue;
  8894. /*
  8895. * If this CPU gets work to do, stop the load balancing
  8896. * work being done for other CPUs. Next load
  8897. * balancing owner will pick it up.
  8898. */
  8899. if (need_resched()) {
  8900. has_blocked_load = true;
  8901. goto abort;
  8902. }
  8903. rq = cpu_rq(balance_cpu);
  8904. has_blocked_load |= update_nohz_stats(rq, true);
  8905. /*
  8906. * If time for next balance is due,
  8907. * do the balance.
  8908. */
  8909. if (time_after_eq(jiffies, rq->next_balance)) {
  8910. struct rq_flags rf;
  8911. rq_lock_irqsave(rq, &rf);
  8912. update_rq_clock(rq);
  8913. rq_unlock_irqrestore(rq, &rf);
  8914. if (flags & NOHZ_BALANCE_KICK)
  8915. rebalance_domains(rq, CPU_IDLE);
  8916. }
  8917. if (time_after(next_balance, rq->next_balance)) {
  8918. next_balance = rq->next_balance;
  8919. update_next_balance = 1;
  8920. }
  8921. }
  8922. /*
  8923. * next_balance will be updated only when there is a need.
  8924. * When the CPU is attached to null domain for ex, it will not be
  8925. * updated.
  8926. */
  8927. if (likely(update_next_balance))
  8928. nohz.next_balance = next_balance;
  8929. /* Newly idle CPU doesn't need an update */
  8930. if (idle != CPU_NEWLY_IDLE) {
  8931. update_blocked_averages(this_cpu);
  8932. has_blocked_load |= this_rq->has_blocked_load;
  8933. }
  8934. if (flags & NOHZ_BALANCE_KICK)
  8935. rebalance_domains(this_rq, CPU_IDLE);
  8936. WRITE_ONCE(nohz.next_blocked,
  8937. now + msecs_to_jiffies(LOAD_AVG_PERIOD));
  8938. /* The full idle balance loop has been done */
  8939. ret = true;
  8940. abort:
  8941. /* There is still blocked load, enable periodic update */
  8942. if (has_blocked_load)
  8943. WRITE_ONCE(nohz.has_blocked, 1);
  8944. return ret;
  8945. }
  8946. /*
  8947. * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
  8948. * rebalancing for all the cpus for whom scheduler ticks are stopped.
  8949. */
  8950. static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
  8951. {
  8952. unsigned int flags = this_rq->nohz_idle_balance;
  8953. if (!flags)
  8954. return false;
  8955. this_rq->nohz_idle_balance = 0;
  8956. if (idle != CPU_IDLE)
  8957. return false;
  8958. _nohz_idle_balance(this_rq, flags, idle);
  8959. return true;
  8960. }
  8961. static void nohz_newidle_balance(struct rq *this_rq)
  8962. {
  8963. int this_cpu = this_rq->cpu;
  8964. /*
  8965. * This CPU doesn't want to be disturbed by scheduler
  8966. * housekeeping
  8967. */
  8968. if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
  8969. return;
  8970. /* Will wake up very soon. No time for doing anything else*/
  8971. if (this_rq->avg_idle < sysctl_sched_migration_cost)
  8972. return;
  8973. /* Don't need to update blocked load of idle CPUs*/
  8974. if (!READ_ONCE(nohz.has_blocked) ||
  8975. time_before(jiffies, READ_ONCE(nohz.next_blocked)))
  8976. return;
  8977. raw_spin_unlock(&this_rq->lock);
  8978. /*
  8979. * This CPU is going to be idle and blocked load of idle CPUs
  8980. * need to be updated. Run the ilb locally as it is a good
  8981. * candidate for ilb instead of waking up another idle CPU.
  8982. * Kick an normal ilb if we failed to do the update.
  8983. */
  8984. if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
  8985. kick_ilb(NOHZ_STATS_KICK);
  8986. raw_spin_lock(&this_rq->lock);
  8987. }
  8988. #else /* !CONFIG_NO_HZ_COMMON */
  8989. static inline void nohz_balancer_kick(struct rq *rq) { }
  8990. static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
  8991. {
  8992. return false;
  8993. }
  8994. static inline void nohz_newidle_balance(struct rq *this_rq) { }
  8995. #endif /* CONFIG_NO_HZ_COMMON */
  8996. /*
  8997. * idle_balance is called by schedule() if this_cpu is about to become
  8998. * idle. Attempts to pull tasks from other CPUs.
  8999. *
  9000. * Returns:
  9001. * < 0 - we released the lock and there are !fair tasks present
  9002. * 0 - failed, no new tasks
  9003. * > 0 - success, new (fair) tasks present
  9004. */
  9005. static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
  9006. {
  9007. unsigned long next_balance = jiffies + HZ;
  9008. int this_cpu = this_rq->cpu;
  9009. struct sched_domain *sd;
  9010. int pulled_task = 0;
  9011. u64 curr_cost = 0;
  9012. int done = 0;
  9013. trace_android_rvh_sched_newidle_balance(this_rq, rf, &pulled_task, &done);
  9014. if (done)
  9015. return pulled_task;
  9016. update_misfit_status(NULL, this_rq);
  9017. /*
  9018. * We must set idle_stamp _before_ calling idle_balance(), such that we
  9019. * measure the duration of idle_balance() as idle time.
  9020. */
  9021. this_rq->idle_stamp = rq_clock(this_rq);
  9022. /*
  9023. * Do not pull tasks towards !active CPUs...
  9024. */
  9025. if (!cpu_active(this_cpu))
  9026. return 0;
  9027. /*
  9028. * This is OK, because current is on_cpu, which avoids it being picked
  9029. * for load-balance and preemption/IRQs are still disabled avoiding
  9030. * further scheduler activity on it and we're being very careful to
  9031. * re-start the picking loop.
  9032. */
  9033. rq_unpin_lock(this_rq, rf);
  9034. if (this_rq->avg_idle < sysctl_sched_migration_cost ||
  9035. !READ_ONCE(this_rq->rd->overload)) {
  9036. rcu_read_lock();
  9037. sd = rcu_dereference_check_sched_domain(this_rq->sd);
  9038. if (sd)
  9039. update_next_balance(sd, &next_balance);
  9040. rcu_read_unlock();
  9041. nohz_newidle_balance(this_rq);
  9042. goto out;
  9043. }
  9044. raw_spin_unlock(&this_rq->lock);
  9045. update_blocked_averages(this_cpu);
  9046. rcu_read_lock();
  9047. for_each_domain(this_cpu, sd) {
  9048. int continue_balancing = 1;
  9049. u64 t0, domain_cost;
  9050. if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
  9051. update_next_balance(sd, &next_balance);
  9052. break;
  9053. }
  9054. if (sd->flags & SD_BALANCE_NEWIDLE) {
  9055. t0 = sched_clock_cpu(this_cpu);
  9056. pulled_task = load_balance(this_cpu, this_rq,
  9057. sd, CPU_NEWLY_IDLE,
  9058. &continue_balancing);
  9059. domain_cost = sched_clock_cpu(this_cpu) - t0;
  9060. if (domain_cost > sd->max_newidle_lb_cost)
  9061. sd->max_newidle_lb_cost = domain_cost;
  9062. curr_cost += domain_cost;
  9063. }
  9064. update_next_balance(sd, &next_balance);
  9065. /*
  9066. * Stop searching for tasks to pull if there are
  9067. * now runnable tasks on this rq.
  9068. */
  9069. if (pulled_task || this_rq->nr_running > 0)
  9070. break;
  9071. }
  9072. rcu_read_unlock();
  9073. raw_spin_lock(&this_rq->lock);
  9074. if (curr_cost > this_rq->max_idle_balance_cost)
  9075. this_rq->max_idle_balance_cost = curr_cost;
  9076. out:
  9077. /*
  9078. * While browsing the domains, we released the rq lock, a task could
  9079. * have been enqueued in the meantime. Since we're not going idle,
  9080. * pretend we pulled a task.
  9081. */
  9082. if (this_rq->cfs.h_nr_running && !pulled_task)
  9083. pulled_task = 1;
  9084. /* Move the next balance forward */
  9085. if (time_after(this_rq->next_balance, next_balance))
  9086. this_rq->next_balance = next_balance;
  9087. /* Is there a task of a high priority class? */
  9088. if (this_rq->nr_running != this_rq->cfs.h_nr_running)
  9089. pulled_task = -1;
  9090. if (pulled_task)
  9091. this_rq->idle_stamp = 0;
  9092. rq_repin_lock(this_rq, rf);
  9093. return pulled_task;
  9094. }
  9095. /*
  9096. * run_rebalance_domains is triggered when needed from the scheduler tick.
  9097. * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
  9098. */
  9099. static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
  9100. {
  9101. struct rq *this_rq = this_rq();
  9102. enum cpu_idle_type idle = this_rq->idle_balance ?
  9103. CPU_IDLE : CPU_NOT_IDLE;
  9104. /*
  9105. * If this CPU has a pending nohz_balance_kick, then do the
  9106. * balancing on behalf of the other idle CPUs whose ticks are
  9107. * stopped. Do nohz_idle_balance *before* rebalance_domains to
  9108. * give the idle CPUs a chance to load balance. Else we may
  9109. * load balance only within the local sched_domain hierarchy
  9110. * and abort nohz_idle_balance altogether if we pull some load.
  9111. */
  9112. if (nohz_idle_balance(this_rq, idle))
  9113. return;
  9114. /* normal load balance */
  9115. update_blocked_averages(this_rq->cpu);
  9116. rebalance_domains(this_rq, idle);
  9117. }
  9118. /*
  9119. * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  9120. */
  9121. void trigger_load_balance(struct rq *rq)
  9122. {
  9123. /* Don't need to rebalance while attached to NULL domain */
  9124. if (unlikely(on_null_domain(rq)))
  9125. return;
  9126. if (time_after_eq(jiffies, rq->next_balance))
  9127. raise_softirq(SCHED_SOFTIRQ);
  9128. nohz_balancer_kick(rq);
  9129. }
  9130. static void rq_online_fair(struct rq *rq)
  9131. {
  9132. update_sysctl();
  9133. update_runtime_enabled(rq);
  9134. }
  9135. static void rq_offline_fair(struct rq *rq)
  9136. {
  9137. update_sysctl();
  9138. /* Ensure any throttled groups are reachable by pick_next_task */
  9139. unthrottle_offline_cfs_rqs(rq);
  9140. }
  9141. #endif /* CONFIG_SMP */
  9142. /*
  9143. * scheduler tick hitting a task of our scheduling class.
  9144. *
  9145. * NOTE: This function can be called remotely by the tick offload that
  9146. * goes along full dynticks. Therefore no local assumption can be made
  9147. * and everything must be accessed through the @rq and @curr passed in
  9148. * parameters.
  9149. */
  9150. static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
  9151. {
  9152. struct cfs_rq *cfs_rq;
  9153. struct sched_entity *se = &curr->se;
  9154. for_each_sched_entity(se) {
  9155. cfs_rq = cfs_rq_of(se);
  9156. entity_tick(cfs_rq, se, queued);
  9157. }
  9158. if (static_branch_unlikely(&sched_numa_balancing))
  9159. task_tick_numa(rq, curr);
  9160. update_misfit_status(curr, rq);
  9161. update_overutilized_status(task_rq(curr));
  9162. }
  9163. /*
  9164. * called on fork with the child task as argument from the parent's context
  9165. * - child not yet on the tasklist
  9166. * - preemption disabled
  9167. */
  9168. static void task_fork_fair(struct task_struct *p)
  9169. {
  9170. struct cfs_rq *cfs_rq;
  9171. struct sched_entity *se = &p->se, *curr;
  9172. struct rq *rq = this_rq();
  9173. struct rq_flags rf;
  9174. rq_lock(rq, &rf);
  9175. update_rq_clock(rq);
  9176. cfs_rq = task_cfs_rq(current);
  9177. curr = cfs_rq->curr;
  9178. if (curr) {
  9179. update_curr(cfs_rq);
  9180. se->vruntime = curr->vruntime;
  9181. }
  9182. place_entity(cfs_rq, se, 1);
  9183. if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
  9184. /*
  9185. * Upon rescheduling, sched_class::put_prev_task() will place
  9186. * 'current' within the tree based on its new key value.
  9187. */
  9188. swap(curr->vruntime, se->vruntime);
  9189. resched_curr(rq);
  9190. }
  9191. se->vruntime -= cfs_rq->min_vruntime;
  9192. rq_unlock(rq, &rf);
  9193. }
  9194. /*
  9195. * Priority of the task has changed. Check to see if we preempt
  9196. * the current task.
  9197. */
  9198. static void
  9199. prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
  9200. {
  9201. if (!task_on_rq_queued(p))
  9202. return;
  9203. if (rq->cfs.nr_running == 1)
  9204. return;
  9205. /*
  9206. * Reschedule if we are currently running on this runqueue and
  9207. * our priority decreased, or if we are not currently running on
  9208. * this runqueue and our priority is higher than the current's
  9209. */
  9210. if (rq->curr == p) {
  9211. if (p->prio > oldprio)
  9212. resched_curr(rq);
  9213. } else
  9214. check_preempt_curr(rq, p, 0);
  9215. }
  9216. static inline bool vruntime_normalized(struct task_struct *p)
  9217. {
  9218. struct sched_entity *se = &p->se;
  9219. /*
  9220. * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
  9221. * the dequeue_entity(.flags=0) will already have normalized the
  9222. * vruntime.
  9223. */
  9224. if (p->on_rq)
  9225. return true;
  9226. /*
  9227. * When !on_rq, vruntime of the task has usually NOT been normalized.
  9228. * But there are some cases where it has already been normalized:
  9229. *
  9230. * - A forked child which is waiting for being woken up by
  9231. * wake_up_new_task().
  9232. * - A task which has been woken up by try_to_wake_up() and
  9233. * waiting for actually being woken up by sched_ttwu_pending().
  9234. */
  9235. if (!se->sum_exec_runtime ||
  9236. (p->state == TASK_WAKING && p->sched_remote_wakeup))
  9237. return true;
  9238. return false;
  9239. }
  9240. #ifdef CONFIG_FAIR_GROUP_SCHED
  9241. /*
  9242. * Propagate the changes of the sched_entity across the tg tree to make it
  9243. * visible to the root
  9244. */
  9245. static void propagate_entity_cfs_rq(struct sched_entity *se)
  9246. {
  9247. struct cfs_rq *cfs_rq;
  9248. list_add_leaf_cfs_rq(cfs_rq_of(se));
  9249. /* Start to propagate at parent */
  9250. se = se->parent;
  9251. for_each_sched_entity(se) {
  9252. cfs_rq = cfs_rq_of(se);
  9253. if (!cfs_rq_throttled(cfs_rq)){
  9254. update_load_avg(cfs_rq, se, UPDATE_TG);
  9255. list_add_leaf_cfs_rq(cfs_rq);
  9256. continue;
  9257. }
  9258. if (list_add_leaf_cfs_rq(cfs_rq))
  9259. break;
  9260. }
  9261. }
  9262. #else
  9263. static void propagate_entity_cfs_rq(struct sched_entity *se) { }
  9264. #endif
  9265. static void detach_entity_cfs_rq(struct sched_entity *se)
  9266. {
  9267. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  9268. /* Catch up with the cfs_rq and remove our load when we leave */
  9269. update_load_avg(cfs_rq, se, 0);
  9270. detach_entity_load_avg(cfs_rq, se);
  9271. update_tg_load_avg(cfs_rq);
  9272. propagate_entity_cfs_rq(se);
  9273. }
  9274. static void attach_entity_cfs_rq(struct sched_entity *se)
  9275. {
  9276. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  9277. #ifdef CONFIG_FAIR_GROUP_SCHED
  9278. /*
  9279. * Since the real-depth could have been changed (only FAIR
  9280. * class maintain depth value), reset depth properly.
  9281. */
  9282. se->depth = se->parent ? se->parent->depth + 1 : 0;
  9283. #endif
  9284. /* Synchronize entity with its cfs_rq */
  9285. update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
  9286. attach_entity_load_avg(cfs_rq, se);
  9287. update_tg_load_avg(cfs_rq);
  9288. propagate_entity_cfs_rq(se);
  9289. }
  9290. static void detach_task_cfs_rq(struct task_struct *p)
  9291. {
  9292. struct sched_entity *se = &p->se;
  9293. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  9294. if (!vruntime_normalized(p)) {
  9295. /*
  9296. * Fix up our vruntime so that the current sleep doesn't
  9297. * cause 'unlimited' sleep bonus.
  9298. */
  9299. place_entity(cfs_rq, se, 0);
  9300. se->vruntime -= cfs_rq->min_vruntime;
  9301. }
  9302. detach_entity_cfs_rq(se);
  9303. }
  9304. static void attach_task_cfs_rq(struct task_struct *p)
  9305. {
  9306. struct sched_entity *se = &p->se;
  9307. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  9308. attach_entity_cfs_rq(se);
  9309. if (!vruntime_normalized(p))
  9310. se->vruntime += cfs_rq->min_vruntime;
  9311. }
  9312. static void switched_from_fair(struct rq *rq, struct task_struct *p)
  9313. {
  9314. detach_task_cfs_rq(p);
  9315. }
  9316. static void switched_to_fair(struct rq *rq, struct task_struct *p)
  9317. {
  9318. attach_task_cfs_rq(p);
  9319. if (task_on_rq_queued(p)) {
  9320. /*
  9321. * We were most likely switched from sched_rt, so
  9322. * kick off the schedule if running, otherwise just see
  9323. * if we can still preempt the current task.
  9324. */
  9325. if (rq->curr == p)
  9326. resched_curr(rq);
  9327. else
  9328. check_preempt_curr(rq, p, 0);
  9329. }
  9330. }
  9331. /* Account for a task changing its policy or group.
  9332. *
  9333. * This routine is mostly called to set cfs_rq->curr field when a task
  9334. * migrates between groups/classes.
  9335. */
  9336. static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
  9337. {
  9338. struct sched_entity *se = &p->se;
  9339. #ifdef CONFIG_SMP
  9340. if (task_on_rq_queued(p)) {
  9341. /*
  9342. * Move the next running task to the front of the list, so our
  9343. * cfs_tasks list becomes MRU one.
  9344. */
  9345. list_move(&se->group_node, &rq->cfs_tasks);
  9346. }
  9347. #endif
  9348. for_each_sched_entity(se) {
  9349. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  9350. set_next_entity(cfs_rq, se);
  9351. /* ensure bandwidth has been allocated on our new cfs_rq */
  9352. account_cfs_rq_runtime(cfs_rq, 0);
  9353. }
  9354. }
  9355. void init_cfs_rq(struct cfs_rq *cfs_rq)
  9356. {
  9357. cfs_rq->tasks_timeline = RB_ROOT_CACHED;
  9358. cfs_rq->min_vruntime = (u64)(-(1LL << 20));
  9359. #ifndef CONFIG_64BIT
  9360. cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
  9361. #endif
  9362. #ifdef CONFIG_SMP
  9363. raw_spin_lock_init(&cfs_rq->removed.lock);
  9364. #endif
  9365. }
  9366. #ifdef CONFIG_FAIR_GROUP_SCHED
  9367. static void task_set_group_fair(struct task_struct *p)
  9368. {
  9369. struct sched_entity *se = &p->se;
  9370. set_task_rq(p, task_cpu(p));
  9371. se->depth = se->parent ? se->parent->depth + 1 : 0;
  9372. }
  9373. static void task_move_group_fair(struct task_struct *p)
  9374. {
  9375. detach_task_cfs_rq(p);
  9376. set_task_rq(p, task_cpu(p));
  9377. #ifdef CONFIG_SMP
  9378. /* Tell se's cfs_rq has been changed -- migrated */
  9379. p->se.avg.last_update_time = 0;
  9380. #endif
  9381. attach_task_cfs_rq(p);
  9382. }
  9383. static void task_change_group_fair(struct task_struct *p, int type)
  9384. {
  9385. switch (type) {
  9386. case TASK_SET_GROUP:
  9387. task_set_group_fair(p);
  9388. break;
  9389. case TASK_MOVE_GROUP:
  9390. task_move_group_fair(p);
  9391. break;
  9392. }
  9393. }
  9394. void free_fair_sched_group(struct task_group *tg)
  9395. {
  9396. int i;
  9397. destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
  9398. for_each_possible_cpu(i) {
  9399. if (tg->cfs_rq)
  9400. kfree(tg->cfs_rq[i]);
  9401. if (tg->se)
  9402. kfree(tg->se[i]);
  9403. }
  9404. kfree(tg->cfs_rq);
  9405. kfree(tg->se);
  9406. }
  9407. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  9408. {
  9409. struct sched_entity *se;
  9410. struct cfs_rq *cfs_rq;
  9411. int i;
  9412. tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
  9413. if (!tg->cfs_rq)
  9414. goto err;
  9415. tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
  9416. if (!tg->se)
  9417. goto err;
  9418. tg->shares = NICE_0_LOAD;
  9419. init_cfs_bandwidth(tg_cfs_bandwidth(tg));
  9420. for_each_possible_cpu(i) {
  9421. cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
  9422. GFP_KERNEL, cpu_to_node(i));
  9423. if (!cfs_rq)
  9424. goto err;
  9425. se = kzalloc_node(sizeof(struct sched_entity),
  9426. GFP_KERNEL, cpu_to_node(i));
  9427. if (!se)
  9428. goto err_free_rq;
  9429. init_cfs_rq(cfs_rq);
  9430. init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
  9431. init_entity_runnable_average(se);
  9432. }
  9433. return 1;
  9434. err_free_rq:
  9435. kfree(cfs_rq);
  9436. err:
  9437. return 0;
  9438. }
  9439. void online_fair_sched_group(struct task_group *tg)
  9440. {
  9441. struct sched_entity *se;
  9442. struct rq_flags rf;
  9443. struct rq *rq;
  9444. int i;
  9445. for_each_possible_cpu(i) {
  9446. rq = cpu_rq(i);
  9447. se = tg->se[i];
  9448. rq_lock_irq(rq, &rf);
  9449. update_rq_clock(rq);
  9450. attach_entity_cfs_rq(se);
  9451. sync_throttle(tg, i);
  9452. rq_unlock_irq(rq, &rf);
  9453. }
  9454. }
  9455. void unregister_fair_sched_group(struct task_group *tg)
  9456. {
  9457. unsigned long flags;
  9458. struct rq *rq;
  9459. int cpu;
  9460. for_each_possible_cpu(cpu) {
  9461. if (tg->se[cpu])
  9462. remove_entity_load_avg(tg->se[cpu]);
  9463. /*
  9464. * Only empty task groups can be destroyed; so we can speculatively
  9465. * check on_list without danger of it being re-added.
  9466. */
  9467. if (!tg->cfs_rq[cpu]->on_list)
  9468. continue;
  9469. rq = cpu_rq(cpu);
  9470. raw_spin_lock_irqsave(&rq->lock, flags);
  9471. list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
  9472. raw_spin_unlock_irqrestore(&rq->lock, flags);
  9473. }
  9474. }
  9475. void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
  9476. struct sched_entity *se, int cpu,
  9477. struct sched_entity *parent)
  9478. {
  9479. struct rq *rq = cpu_rq(cpu);
  9480. cfs_rq->tg = tg;
  9481. cfs_rq->rq = rq;
  9482. init_cfs_rq_runtime(cfs_rq);
  9483. tg->cfs_rq[cpu] = cfs_rq;
  9484. tg->se[cpu] = se;
  9485. /* se could be NULL for root_task_group */
  9486. if (!se)
  9487. return;
  9488. if (!parent) {
  9489. se->cfs_rq = &rq->cfs;
  9490. se->depth = 0;
  9491. } else {
  9492. se->cfs_rq = parent->my_q;
  9493. se->depth = parent->depth + 1;
  9494. }
  9495. se->my_q = cfs_rq;
  9496. /* guarantee group entities always have weight */
  9497. update_load_set(&se->load, NICE_0_LOAD);
  9498. se->parent = parent;
  9499. }
  9500. static DEFINE_MUTEX(shares_mutex);
  9501. int sched_group_set_shares(struct task_group *tg, unsigned long shares)
  9502. {
  9503. int i;
  9504. /*
  9505. * We can't change the weight of the root cgroup.
  9506. */
  9507. if (!tg->se[0])
  9508. return -EINVAL;
  9509. shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
  9510. mutex_lock(&shares_mutex);
  9511. if (tg->shares == shares)
  9512. goto done;
  9513. tg->shares = shares;
  9514. for_each_possible_cpu(i) {
  9515. struct rq *rq = cpu_rq(i);
  9516. struct sched_entity *se = tg->se[i];
  9517. struct rq_flags rf;
  9518. /* Propagate contribution to hierarchy */
  9519. rq_lock_irqsave(rq, &rf);
  9520. update_rq_clock(rq);
  9521. for_each_sched_entity(se) {
  9522. update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
  9523. update_cfs_group(se);
  9524. }
  9525. rq_unlock_irqrestore(rq, &rf);
  9526. }
  9527. done:
  9528. mutex_unlock(&shares_mutex);
  9529. return 0;
  9530. }
  9531. #else /* CONFIG_FAIR_GROUP_SCHED */
  9532. void free_fair_sched_group(struct task_group *tg) { }
  9533. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  9534. {
  9535. return 1;
  9536. }
  9537. void online_fair_sched_group(struct task_group *tg) { }
  9538. void unregister_fair_sched_group(struct task_group *tg) { }
  9539. #endif /* CONFIG_FAIR_GROUP_SCHED */
  9540. static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
  9541. {
  9542. struct sched_entity *se = &task->se;
  9543. unsigned int rr_interval = 0;
  9544. /*
  9545. * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
  9546. * idle runqueue:
  9547. */
  9548. if (rq->cfs.load.weight)
  9549. rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
  9550. return rr_interval;
  9551. }
  9552. /*
  9553. * All the scheduling class methods:
  9554. */
  9555. const struct sched_class fair_sched_class
  9556. __section("__fair_sched_class") = {
  9557. .enqueue_task = enqueue_task_fair,
  9558. .dequeue_task = dequeue_task_fair,
  9559. .yield_task = yield_task_fair,
  9560. .yield_to_task = yield_to_task_fair,
  9561. .check_preempt_curr = check_preempt_wakeup,
  9562. .pick_next_task = __pick_next_task_fair,
  9563. .put_prev_task = put_prev_task_fair,
  9564. .set_next_task = set_next_task_fair,
  9565. #ifdef CONFIG_SMP
  9566. .balance = balance_fair,
  9567. .select_task_rq = select_task_rq_fair,
  9568. .migrate_task_rq = migrate_task_rq_fair,
  9569. .rq_online = rq_online_fair,
  9570. .rq_offline = rq_offline_fair,
  9571. .task_dead = task_dead_fair,
  9572. .set_cpus_allowed = set_cpus_allowed_common,
  9573. #endif
  9574. .task_tick = task_tick_fair,
  9575. .task_fork = task_fork_fair,
  9576. .prio_changed = prio_changed_fair,
  9577. .switched_from = switched_from_fair,
  9578. .switched_to = switched_to_fair,
  9579. .get_rr_interval = get_rr_interval_fair,
  9580. .update_curr = update_curr_fair,
  9581. #ifdef CONFIG_FAIR_GROUP_SCHED
  9582. .task_change_group = task_change_group_fair,
  9583. #endif
  9584. #ifdef CONFIG_UCLAMP_TASK
  9585. .uclamp_enabled = 1,
  9586. #endif
  9587. };
  9588. #ifdef CONFIG_SCHED_DEBUG
  9589. void print_cfs_stats(struct seq_file *m, int cpu)
  9590. {
  9591. struct cfs_rq *cfs_rq, *pos;
  9592. rcu_read_lock();
  9593. for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
  9594. print_cfs_rq(m, cpu, cfs_rq);
  9595. rcu_read_unlock();
  9596. }
  9597. #ifdef CONFIG_NUMA_BALANCING
  9598. void show_numa_stats(struct task_struct *p, struct seq_file *m)
  9599. {
  9600. int node;
  9601. unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
  9602. struct numa_group *ng;
  9603. rcu_read_lock();
  9604. ng = rcu_dereference(p->numa_group);
  9605. for_each_online_node(node) {
  9606. if (p->numa_faults) {
  9607. tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
  9608. tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
  9609. }
  9610. if (ng) {
  9611. gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
  9612. gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
  9613. }
  9614. print_numa_stats(m, node, tsf, tpf, gsf, gpf);
  9615. }
  9616. rcu_read_unlock();
  9617. }
  9618. #endif /* CONFIG_NUMA_BALANCING */
  9619. #endif /* CONFIG_SCHED_DEBUG */
  9620. __init void init_sched_fair_class(void)
  9621. {
  9622. #ifdef CONFIG_SMP
  9623. open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
  9624. #ifdef CONFIG_NO_HZ_COMMON
  9625. nohz.next_balance = jiffies;
  9626. nohz.next_blocked = jiffies;
  9627. zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
  9628. #endif
  9629. #endif /* SMP */
  9630. }
  9631. /*
  9632. * Helper functions to facilitate extracting info from tracepoints.
  9633. */
  9634. const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq)
  9635. {
  9636. #ifdef CONFIG_SMP
  9637. return cfs_rq ? &cfs_rq->avg : NULL;
  9638. #else
  9639. return NULL;
  9640. #endif
  9641. }
  9642. EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg);
  9643. char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len)
  9644. {
  9645. if (!cfs_rq) {
  9646. if (str)
  9647. strlcpy(str, "(null)", len);
  9648. else
  9649. return NULL;
  9650. }
  9651. cfs_rq_tg_path(cfs_rq, str, len);
  9652. return str;
  9653. }
  9654. EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path);
  9655. int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq)
  9656. {
  9657. return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
  9658. }
  9659. EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu);
  9660. const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
  9661. {
  9662. #ifdef CONFIG_SMP
  9663. return rq ? &rq->avg_rt : NULL;
  9664. #else
  9665. return NULL;
  9666. #endif
  9667. }
  9668. EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt);
  9669. const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
  9670. {
  9671. #ifdef CONFIG_SMP
  9672. return rq ? &rq->avg_dl : NULL;
  9673. #else
  9674. return NULL;
  9675. #endif
  9676. }
  9677. EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl);
  9678. const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
  9679. {
  9680. #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ)
  9681. return rq ? &rq->avg_irq : NULL;
  9682. #else
  9683. return NULL;
  9684. #endif
  9685. }
  9686. EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq);
  9687. int sched_trace_rq_cpu(struct rq *rq)
  9688. {
  9689. return rq ? cpu_of(rq) : -1;
  9690. }
  9691. EXPORT_SYMBOL_GPL(sched_trace_rq_cpu);
  9692. int sched_trace_rq_cpu_capacity(struct rq *rq)
  9693. {
  9694. return rq ?
  9695. #ifdef CONFIG_SMP
  9696. rq->cpu_capacity
  9697. #else
  9698. SCHED_CAPACITY_SCALE
  9699. #endif
  9700. : -1;
  9701. }
  9702. EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity);
  9703. const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
  9704. {
  9705. #ifdef CONFIG_SMP
  9706. return rd ? rd->span : NULL;
  9707. #else
  9708. return NULL;
  9709. #endif
  9710. }
  9711. EXPORT_SYMBOL_GPL(sched_trace_rd_span);
  9712. int sched_trace_rq_nr_running(struct rq *rq)
  9713. {
  9714. return rq ? rq->nr_running : -1;
  9715. }
  9716. EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running);