rt.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
  4. * policies)
  5. */
  6. #include "sched.h"
  7. #include "pelt.h"
  8. #include <trace/hooks/sched.h>
  9. int sched_rr_timeslice = RR_TIMESLICE;
  10. int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
  11. /* More than 4 hours if BW_SHIFT equals 20. */
  12. static const u64 max_rt_runtime = MAX_BW;
  13. static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  14. struct rt_bandwidth def_rt_bandwidth;
  15. static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  16. {
  17. struct rt_bandwidth *rt_b =
  18. container_of(timer, struct rt_bandwidth, rt_period_timer);
  19. int idle = 0;
  20. int overrun;
  21. raw_spin_lock(&rt_b->rt_runtime_lock);
  22. for (;;) {
  23. overrun = hrtimer_forward_now(timer, rt_b->rt_period);
  24. if (!overrun)
  25. break;
  26. raw_spin_unlock(&rt_b->rt_runtime_lock);
  27. idle = do_sched_rt_period_timer(rt_b, overrun);
  28. raw_spin_lock(&rt_b->rt_runtime_lock);
  29. }
  30. if (idle)
  31. rt_b->rt_period_active = 0;
  32. raw_spin_unlock(&rt_b->rt_runtime_lock);
  33. return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  34. }
  35. void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  36. {
  37. rt_b->rt_period = ns_to_ktime(period);
  38. rt_b->rt_runtime = runtime;
  39. raw_spin_lock_init(&rt_b->rt_runtime_lock);
  40. hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
  41. HRTIMER_MODE_REL_HARD);
  42. rt_b->rt_period_timer.function = sched_rt_period_timer;
  43. }
  44. static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
  45. {
  46. raw_spin_lock(&rt_b->rt_runtime_lock);
  47. if (!rt_b->rt_period_active) {
  48. rt_b->rt_period_active = 1;
  49. /*
  50. * SCHED_DEADLINE updates the bandwidth, as a run away
  51. * RT task with a DL task could hog a CPU. But DL does
  52. * not reset the period. If a deadline task was running
  53. * without an RT task running, it can cause RT tasks to
  54. * throttle when they start up. Kick the timer right away
  55. * to update the period.
  56. */
  57. hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
  58. hrtimer_start_expires(&rt_b->rt_period_timer,
  59. HRTIMER_MODE_ABS_PINNED_HARD);
  60. }
  61. raw_spin_unlock(&rt_b->rt_runtime_lock);
  62. }
  63. static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  64. {
  65. if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  66. return;
  67. do_start_rt_bandwidth(rt_b);
  68. }
  69. void init_rt_rq(struct rt_rq *rt_rq)
  70. {
  71. struct rt_prio_array *array;
  72. int i;
  73. array = &rt_rq->active;
  74. for (i = 0; i < MAX_RT_PRIO; i++) {
  75. INIT_LIST_HEAD(array->queue + i);
  76. __clear_bit(i, array->bitmap);
  77. }
  78. /* delimiter for bitsearch: */
  79. __set_bit(MAX_RT_PRIO, array->bitmap);
  80. #if defined CONFIG_SMP
  81. rt_rq->highest_prio.curr = MAX_RT_PRIO;
  82. rt_rq->highest_prio.next = MAX_RT_PRIO;
  83. rt_rq->rt_nr_migratory = 0;
  84. rt_rq->overloaded = 0;
  85. plist_head_init(&rt_rq->pushable_tasks);
  86. #endif /* CONFIG_SMP */
  87. /* We start is dequeued state, because no RT tasks are queued */
  88. rt_rq->rt_queued = 0;
  89. rt_rq->rt_time = 0;
  90. rt_rq->rt_throttled = 0;
  91. rt_rq->rt_runtime = 0;
  92. raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  93. }
  94. #ifdef CONFIG_RT_GROUP_SCHED
  95. static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  96. {
  97. hrtimer_cancel(&rt_b->rt_period_timer);
  98. }
  99. #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
  100. static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  101. {
  102. #ifdef CONFIG_SCHED_DEBUG
  103. WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  104. #endif
  105. return container_of(rt_se, struct task_struct, rt);
  106. }
  107. static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  108. {
  109. return rt_rq->rq;
  110. }
  111. static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  112. {
  113. return rt_se->rt_rq;
  114. }
  115. static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
  116. {
  117. struct rt_rq *rt_rq = rt_se->rt_rq;
  118. return rt_rq->rq;
  119. }
  120. void free_rt_sched_group(struct task_group *tg)
  121. {
  122. int i;
  123. if (tg->rt_se)
  124. destroy_rt_bandwidth(&tg->rt_bandwidth);
  125. for_each_possible_cpu(i) {
  126. if (tg->rt_rq)
  127. kfree(tg->rt_rq[i]);
  128. if (tg->rt_se)
  129. kfree(tg->rt_se[i]);
  130. }
  131. kfree(tg->rt_rq);
  132. kfree(tg->rt_se);
  133. }
  134. void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
  135. struct sched_rt_entity *rt_se, int cpu,
  136. struct sched_rt_entity *parent)
  137. {
  138. struct rq *rq = cpu_rq(cpu);
  139. rt_rq->highest_prio.curr = MAX_RT_PRIO;
  140. rt_rq->rt_nr_boosted = 0;
  141. rt_rq->rq = rq;
  142. rt_rq->tg = tg;
  143. tg->rt_rq[cpu] = rt_rq;
  144. tg->rt_se[cpu] = rt_se;
  145. if (!rt_se)
  146. return;
  147. if (!parent)
  148. rt_se->rt_rq = &rq->rt;
  149. else
  150. rt_se->rt_rq = parent->my_q;
  151. rt_se->my_q = rt_rq;
  152. rt_se->parent = parent;
  153. INIT_LIST_HEAD(&rt_se->run_list);
  154. }
  155. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  156. {
  157. struct rt_rq *rt_rq;
  158. struct sched_rt_entity *rt_se;
  159. int i;
  160. tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
  161. if (!tg->rt_rq)
  162. goto err;
  163. tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
  164. if (!tg->rt_se)
  165. goto err;
  166. init_rt_bandwidth(&tg->rt_bandwidth,
  167. ktime_to_ns(def_rt_bandwidth.rt_period), 0);
  168. for_each_possible_cpu(i) {
  169. rt_rq = kzalloc_node(sizeof(struct rt_rq),
  170. GFP_KERNEL, cpu_to_node(i));
  171. if (!rt_rq)
  172. goto err;
  173. rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
  174. GFP_KERNEL, cpu_to_node(i));
  175. if (!rt_se)
  176. goto err_free_rq;
  177. init_rt_rq(rt_rq);
  178. rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
  179. init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
  180. }
  181. return 1;
  182. err_free_rq:
  183. kfree(rt_rq);
  184. err:
  185. return 0;
  186. }
  187. #else /* CONFIG_RT_GROUP_SCHED */
  188. #define rt_entity_is_task(rt_se) (1)
  189. static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  190. {
  191. return container_of(rt_se, struct task_struct, rt);
  192. }
  193. static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  194. {
  195. return container_of(rt_rq, struct rq, rt);
  196. }
  197. static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
  198. {
  199. struct task_struct *p = rt_task_of(rt_se);
  200. return task_rq(p);
  201. }
  202. static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  203. {
  204. struct rq *rq = rq_of_rt_se(rt_se);
  205. return &rq->rt;
  206. }
  207. void free_rt_sched_group(struct task_group *tg) { }
  208. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  209. {
  210. return 1;
  211. }
  212. #endif /* CONFIG_RT_GROUP_SCHED */
  213. #ifdef CONFIG_SMP
  214. static void pull_rt_task(struct rq *this_rq);
  215. static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
  216. {
  217. /* Try to pull RT tasks here if we lower this rq's prio */
  218. return rq->rt.highest_prio.curr > prev->prio;
  219. }
  220. static inline int rt_overloaded(struct rq *rq)
  221. {
  222. return atomic_read(&rq->rd->rto_count);
  223. }
  224. static inline void rt_set_overload(struct rq *rq)
  225. {
  226. if (!rq->online)
  227. return;
  228. cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
  229. /*
  230. * Make sure the mask is visible before we set
  231. * the overload count. That is checked to determine
  232. * if we should look at the mask. It would be a shame
  233. * if we looked at the mask, but the mask was not
  234. * updated yet.
  235. *
  236. * Matched by the barrier in pull_rt_task().
  237. */
  238. smp_wmb();
  239. atomic_inc(&rq->rd->rto_count);
  240. }
  241. static inline void rt_clear_overload(struct rq *rq)
  242. {
  243. if (!rq->online)
  244. return;
  245. /* the order here really doesn't matter */
  246. atomic_dec(&rq->rd->rto_count);
  247. cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
  248. }
  249. static void update_rt_migration(struct rt_rq *rt_rq)
  250. {
  251. if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
  252. if (!rt_rq->overloaded) {
  253. rt_set_overload(rq_of_rt_rq(rt_rq));
  254. rt_rq->overloaded = 1;
  255. }
  256. } else if (rt_rq->overloaded) {
  257. rt_clear_overload(rq_of_rt_rq(rt_rq));
  258. rt_rq->overloaded = 0;
  259. }
  260. }
  261. static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  262. {
  263. struct task_struct *p;
  264. if (!rt_entity_is_task(rt_se))
  265. return;
  266. p = rt_task_of(rt_se);
  267. rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  268. rt_rq->rt_nr_total++;
  269. if (p->nr_cpus_allowed > 1)
  270. rt_rq->rt_nr_migratory++;
  271. update_rt_migration(rt_rq);
  272. }
  273. static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  274. {
  275. struct task_struct *p;
  276. if (!rt_entity_is_task(rt_se))
  277. return;
  278. p = rt_task_of(rt_se);
  279. rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  280. rt_rq->rt_nr_total--;
  281. if (p->nr_cpus_allowed > 1)
  282. rt_rq->rt_nr_migratory--;
  283. update_rt_migration(rt_rq);
  284. }
  285. static inline int has_pushable_tasks(struct rq *rq)
  286. {
  287. return !plist_head_empty(&rq->rt.pushable_tasks);
  288. }
  289. static DEFINE_PER_CPU(struct callback_head, rt_push_head);
  290. static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
  291. static void push_rt_tasks(struct rq *);
  292. static void pull_rt_task(struct rq *);
  293. static inline void rt_queue_push_tasks(struct rq *rq)
  294. {
  295. if (!has_pushable_tasks(rq))
  296. return;
  297. queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
  298. }
  299. static inline void rt_queue_pull_task(struct rq *rq)
  300. {
  301. queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
  302. }
  303. static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  304. {
  305. plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  306. plist_node_init(&p->pushable_tasks, p->prio);
  307. plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
  308. /* Update the highest prio pushable task */
  309. if (p->prio < rq->rt.highest_prio.next)
  310. rq->rt.highest_prio.next = p->prio;
  311. }
  312. static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  313. {
  314. plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  315. /* Update the new highest prio pushable task */
  316. if (has_pushable_tasks(rq)) {
  317. p = plist_first_entry(&rq->rt.pushable_tasks,
  318. struct task_struct, pushable_tasks);
  319. rq->rt.highest_prio.next = p->prio;
  320. } else
  321. rq->rt.highest_prio.next = MAX_RT_PRIO;
  322. }
  323. #else
  324. static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  325. {
  326. }
  327. static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  328. {
  329. }
  330. static inline
  331. void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  332. {
  333. }
  334. static inline
  335. void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  336. {
  337. }
  338. static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
  339. {
  340. return false;
  341. }
  342. static inline void pull_rt_task(struct rq *this_rq)
  343. {
  344. }
  345. static inline void rt_queue_push_tasks(struct rq *rq)
  346. {
  347. }
  348. #endif /* CONFIG_SMP */
  349. static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
  350. static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
  351. static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  352. {
  353. return rt_se->on_rq;
  354. }
  355. #ifdef CONFIG_UCLAMP_TASK
  356. /*
  357. * Verify the fitness of task @p to run on @cpu taking into account the uclamp
  358. * settings.
  359. *
  360. * This check is only important for heterogeneous systems where uclamp_min value
  361. * is higher than the capacity of a @cpu. For non-heterogeneous system this
  362. * function will always return true.
  363. *
  364. * The function will return true if the capacity of the @cpu is >= the
  365. * uclamp_min and false otherwise.
  366. *
  367. * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
  368. * > uclamp_max.
  369. */
  370. static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
  371. {
  372. unsigned int min_cap;
  373. unsigned int max_cap;
  374. unsigned int cpu_cap;
  375. /* Only heterogeneous systems can benefit from this check */
  376. if (!static_branch_unlikely(&sched_asym_cpucapacity))
  377. return true;
  378. min_cap = uclamp_eff_value(p, UCLAMP_MIN);
  379. max_cap = uclamp_eff_value(p, UCLAMP_MAX);
  380. cpu_cap = capacity_orig_of(cpu);
  381. return cpu_cap >= min(min_cap, max_cap);
  382. }
  383. #else
  384. static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
  385. {
  386. return true;
  387. }
  388. #endif
  389. #ifdef CONFIG_RT_GROUP_SCHED
  390. static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  391. {
  392. if (!rt_rq->tg)
  393. return RUNTIME_INF;
  394. return rt_rq->rt_runtime;
  395. }
  396. static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  397. {
  398. return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
  399. }
  400. typedef struct task_group *rt_rq_iter_t;
  401. static inline struct task_group *next_task_group(struct task_group *tg)
  402. {
  403. do {
  404. tg = list_entry_rcu(tg->list.next,
  405. typeof(struct task_group), list);
  406. } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
  407. if (&tg->list == &task_groups)
  408. tg = NULL;
  409. return tg;
  410. }
  411. #define for_each_rt_rq(rt_rq, iter, rq) \
  412. for (iter = container_of(&task_groups, typeof(*iter), list); \
  413. (iter = next_task_group(iter)) && \
  414. (rt_rq = iter->rt_rq[cpu_of(rq)]);)
  415. #define for_each_sched_rt_entity(rt_se) \
  416. for (; rt_se; rt_se = rt_se->parent)
  417. static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  418. {
  419. return rt_se->my_q;
  420. }
  421. static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
  422. static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
  423. static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
  424. {
  425. struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
  426. struct rq *rq = rq_of_rt_rq(rt_rq);
  427. struct sched_rt_entity *rt_se;
  428. int cpu = cpu_of(rq);
  429. rt_se = rt_rq->tg->rt_se[cpu];
  430. if (rt_rq->rt_nr_running) {
  431. if (!rt_se)
  432. enqueue_top_rt_rq(rt_rq);
  433. else if (!on_rt_rq(rt_se))
  434. enqueue_rt_entity(rt_se, 0);
  435. if (rt_rq->highest_prio.curr < curr->prio)
  436. resched_curr(rq);
  437. }
  438. }
  439. static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
  440. {
  441. struct sched_rt_entity *rt_se;
  442. int cpu = cpu_of(rq_of_rt_rq(rt_rq));
  443. rt_se = rt_rq->tg->rt_se[cpu];
  444. if (!rt_se) {
  445. dequeue_top_rt_rq(rt_rq);
  446. /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
  447. cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
  448. }
  449. else if (on_rt_rq(rt_se))
  450. dequeue_rt_entity(rt_se, 0);
  451. }
  452. static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  453. {
  454. return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
  455. }
  456. static int rt_se_boosted(struct sched_rt_entity *rt_se)
  457. {
  458. struct rt_rq *rt_rq = group_rt_rq(rt_se);
  459. struct task_struct *p;
  460. if (rt_rq)
  461. return !!rt_rq->rt_nr_boosted;
  462. p = rt_task_of(rt_se);
  463. return p->prio != p->normal_prio;
  464. }
  465. #ifdef CONFIG_SMP
  466. static inline const struct cpumask *sched_rt_period_mask(void)
  467. {
  468. return this_rq()->rd->span;
  469. }
  470. #else
  471. static inline const struct cpumask *sched_rt_period_mask(void)
  472. {
  473. return cpu_online_mask;
  474. }
  475. #endif
  476. static inline
  477. struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  478. {
  479. return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
  480. }
  481. static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  482. {
  483. return &rt_rq->tg->rt_bandwidth;
  484. }
  485. #else /* !CONFIG_RT_GROUP_SCHED */
  486. static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  487. {
  488. return rt_rq->rt_runtime;
  489. }
  490. static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  491. {
  492. return ktime_to_ns(def_rt_bandwidth.rt_period);
  493. }
  494. typedef struct rt_rq *rt_rq_iter_t;
  495. #define for_each_rt_rq(rt_rq, iter, rq) \
  496. for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
  497. #define for_each_sched_rt_entity(rt_se) \
  498. for (; rt_se; rt_se = NULL)
  499. static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  500. {
  501. return NULL;
  502. }
  503. static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
  504. {
  505. struct rq *rq = rq_of_rt_rq(rt_rq);
  506. if (!rt_rq->rt_nr_running)
  507. return;
  508. enqueue_top_rt_rq(rt_rq);
  509. resched_curr(rq);
  510. }
  511. static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
  512. {
  513. dequeue_top_rt_rq(rt_rq);
  514. }
  515. static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  516. {
  517. return rt_rq->rt_throttled;
  518. }
  519. static inline const struct cpumask *sched_rt_period_mask(void)
  520. {
  521. return cpu_online_mask;
  522. }
  523. static inline
  524. struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  525. {
  526. return &cpu_rq(cpu)->rt;
  527. }
  528. static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  529. {
  530. return &def_rt_bandwidth;
  531. }
  532. #endif /* CONFIG_RT_GROUP_SCHED */
  533. bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
  534. {
  535. struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  536. return (hrtimer_active(&rt_b->rt_period_timer) ||
  537. rt_rq->rt_time < rt_b->rt_runtime);
  538. }
  539. #ifdef CONFIG_SMP
  540. /*
  541. * We ran out of runtime, see if we can borrow some from our neighbours.
  542. */
  543. static void do_balance_runtime(struct rt_rq *rt_rq)
  544. {
  545. struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  546. struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
  547. int i, weight;
  548. u64 rt_period;
  549. weight = cpumask_weight(rd->span);
  550. raw_spin_lock(&rt_b->rt_runtime_lock);
  551. rt_period = ktime_to_ns(rt_b->rt_period);
  552. for_each_cpu(i, rd->span) {
  553. struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  554. s64 diff;
  555. if (iter == rt_rq)
  556. continue;
  557. raw_spin_lock(&iter->rt_runtime_lock);
  558. /*
  559. * Either all rqs have inf runtime and there's nothing to steal
  560. * or __disable_runtime() below sets a specific rq to inf to
  561. * indicate its been disabled and disalow stealing.
  562. */
  563. if (iter->rt_runtime == RUNTIME_INF)
  564. goto next;
  565. /*
  566. * From runqueues with spare time, take 1/n part of their
  567. * spare time, but no more than our period.
  568. */
  569. diff = iter->rt_runtime - iter->rt_time;
  570. if (diff > 0) {
  571. diff = div_u64((u64)diff, weight);
  572. if (rt_rq->rt_runtime + diff > rt_period)
  573. diff = rt_period - rt_rq->rt_runtime;
  574. iter->rt_runtime -= diff;
  575. rt_rq->rt_runtime += diff;
  576. if (rt_rq->rt_runtime == rt_period) {
  577. raw_spin_unlock(&iter->rt_runtime_lock);
  578. break;
  579. }
  580. }
  581. next:
  582. raw_spin_unlock(&iter->rt_runtime_lock);
  583. }
  584. raw_spin_unlock(&rt_b->rt_runtime_lock);
  585. }
  586. /*
  587. * Ensure this RQ takes back all the runtime it lend to its neighbours.
  588. */
  589. static void __disable_runtime(struct rq *rq)
  590. {
  591. struct root_domain *rd = rq->rd;
  592. rt_rq_iter_t iter;
  593. struct rt_rq *rt_rq;
  594. if (unlikely(!scheduler_running))
  595. return;
  596. for_each_rt_rq(rt_rq, iter, rq) {
  597. struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  598. s64 want;
  599. int i;
  600. raw_spin_lock(&rt_b->rt_runtime_lock);
  601. raw_spin_lock(&rt_rq->rt_runtime_lock);
  602. /*
  603. * Either we're all inf and nobody needs to borrow, or we're
  604. * already disabled and thus have nothing to do, or we have
  605. * exactly the right amount of runtime to take out.
  606. */
  607. if (rt_rq->rt_runtime == RUNTIME_INF ||
  608. rt_rq->rt_runtime == rt_b->rt_runtime)
  609. goto balanced;
  610. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  611. /*
  612. * Calculate the difference between what we started out with
  613. * and what we current have, that's the amount of runtime
  614. * we lend and now have to reclaim.
  615. */
  616. want = rt_b->rt_runtime - rt_rq->rt_runtime;
  617. /*
  618. * Greedy reclaim, take back as much as we can.
  619. */
  620. for_each_cpu(i, rd->span) {
  621. struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  622. s64 diff;
  623. /*
  624. * Can't reclaim from ourselves or disabled runqueues.
  625. */
  626. if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
  627. continue;
  628. raw_spin_lock(&iter->rt_runtime_lock);
  629. if (want > 0) {
  630. diff = min_t(s64, iter->rt_runtime, want);
  631. iter->rt_runtime -= diff;
  632. want -= diff;
  633. } else {
  634. iter->rt_runtime -= want;
  635. want -= want;
  636. }
  637. raw_spin_unlock(&iter->rt_runtime_lock);
  638. if (!want)
  639. break;
  640. }
  641. raw_spin_lock(&rt_rq->rt_runtime_lock);
  642. /*
  643. * We cannot be left wanting - that would mean some runtime
  644. * leaked out of the system.
  645. */
  646. BUG_ON(want);
  647. balanced:
  648. /*
  649. * Disable all the borrow logic by pretending we have inf
  650. * runtime - in which case borrowing doesn't make sense.
  651. */
  652. rt_rq->rt_runtime = RUNTIME_INF;
  653. rt_rq->rt_throttled = 0;
  654. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  655. raw_spin_unlock(&rt_b->rt_runtime_lock);
  656. /* Make rt_rq available for pick_next_task() */
  657. sched_rt_rq_enqueue(rt_rq);
  658. }
  659. }
  660. static void __enable_runtime(struct rq *rq)
  661. {
  662. rt_rq_iter_t iter;
  663. struct rt_rq *rt_rq;
  664. if (unlikely(!scheduler_running))
  665. return;
  666. /*
  667. * Reset each runqueue's bandwidth settings
  668. */
  669. for_each_rt_rq(rt_rq, iter, rq) {
  670. struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  671. raw_spin_lock(&rt_b->rt_runtime_lock);
  672. raw_spin_lock(&rt_rq->rt_runtime_lock);
  673. rt_rq->rt_runtime = rt_b->rt_runtime;
  674. rt_rq->rt_time = 0;
  675. rt_rq->rt_throttled = 0;
  676. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  677. raw_spin_unlock(&rt_b->rt_runtime_lock);
  678. }
  679. }
  680. static void balance_runtime(struct rt_rq *rt_rq)
  681. {
  682. if (!sched_feat(RT_RUNTIME_SHARE))
  683. return;
  684. if (rt_rq->rt_time > rt_rq->rt_runtime) {
  685. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  686. do_balance_runtime(rt_rq);
  687. raw_spin_lock(&rt_rq->rt_runtime_lock);
  688. }
  689. }
  690. #else /* !CONFIG_SMP */
  691. static inline void balance_runtime(struct rt_rq *rt_rq) {}
  692. #endif /* CONFIG_SMP */
  693. static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
  694. {
  695. int i, idle = 1, throttled = 0;
  696. const struct cpumask *span;
  697. span = sched_rt_period_mask();
  698. #ifdef CONFIG_RT_GROUP_SCHED
  699. /*
  700. * FIXME: isolated CPUs should really leave the root task group,
  701. * whether they are isolcpus or were isolated via cpusets, lest
  702. * the timer run on a CPU which does not service all runqueues,
  703. * potentially leaving other CPUs indefinitely throttled. If
  704. * isolation is really required, the user will turn the throttle
  705. * off to kill the perturbations it causes anyway. Meanwhile,
  706. * this maintains functionality for boot and/or troubleshooting.
  707. */
  708. if (rt_b == &root_task_group.rt_bandwidth)
  709. span = cpu_online_mask;
  710. #endif
  711. for_each_cpu(i, span) {
  712. int enqueue = 0;
  713. struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
  714. struct rq *rq = rq_of_rt_rq(rt_rq);
  715. int skip;
  716. /*
  717. * When span == cpu_online_mask, taking each rq->lock
  718. * can be time-consuming. Try to avoid it when possible.
  719. */
  720. raw_spin_lock(&rt_rq->rt_runtime_lock);
  721. if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
  722. rt_rq->rt_runtime = rt_b->rt_runtime;
  723. skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
  724. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  725. if (skip)
  726. continue;
  727. raw_spin_lock(&rq->lock);
  728. update_rq_clock(rq);
  729. if (rt_rq->rt_time) {
  730. u64 runtime;
  731. raw_spin_lock(&rt_rq->rt_runtime_lock);
  732. if (rt_rq->rt_throttled)
  733. balance_runtime(rt_rq);
  734. runtime = rt_rq->rt_runtime;
  735. rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
  736. if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
  737. rt_rq->rt_throttled = 0;
  738. enqueue = 1;
  739. /*
  740. * When we're idle and a woken (rt) task is
  741. * throttled check_preempt_curr() will set
  742. * skip_update and the time between the wakeup
  743. * and this unthrottle will get accounted as
  744. * 'runtime'.
  745. */
  746. if (rt_rq->rt_nr_running && rq->curr == rq->idle)
  747. rq_clock_cancel_skipupdate(rq);
  748. }
  749. if (rt_rq->rt_time || rt_rq->rt_nr_running)
  750. idle = 0;
  751. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  752. } else if (rt_rq->rt_nr_running) {
  753. idle = 0;
  754. if (!rt_rq_throttled(rt_rq))
  755. enqueue = 1;
  756. }
  757. if (rt_rq->rt_throttled)
  758. throttled = 1;
  759. if (enqueue)
  760. sched_rt_rq_enqueue(rt_rq);
  761. raw_spin_unlock(&rq->lock);
  762. }
  763. if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
  764. return 1;
  765. return idle;
  766. }
  767. static inline int rt_se_prio(struct sched_rt_entity *rt_se)
  768. {
  769. #ifdef CONFIG_RT_GROUP_SCHED
  770. struct rt_rq *rt_rq = group_rt_rq(rt_se);
  771. if (rt_rq)
  772. return rt_rq->highest_prio.curr;
  773. #endif
  774. return rt_task_of(rt_se)->prio;
  775. }
  776. static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
  777. {
  778. u64 runtime = sched_rt_runtime(rt_rq);
  779. if (rt_rq->rt_throttled)
  780. return rt_rq_throttled(rt_rq);
  781. if (runtime >= sched_rt_period(rt_rq))
  782. return 0;
  783. balance_runtime(rt_rq);
  784. runtime = sched_rt_runtime(rt_rq);
  785. if (runtime == RUNTIME_INF)
  786. return 0;
  787. if (rt_rq->rt_time > runtime) {
  788. struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  789. /*
  790. * Don't actually throttle groups that have no runtime assigned
  791. * but accrue some time due to boosting.
  792. */
  793. if (likely(rt_b->rt_runtime)) {
  794. rt_rq->rt_throttled = 1;
  795. printk_deferred_once("sched: RT throttling activated\n");
  796. trace_android_vh_dump_throttled_rt_tasks(
  797. raw_smp_processor_id(),
  798. rq_clock(rq_of_rt_rq(rt_rq)),
  799. sched_rt_period(rt_rq),
  800. runtime,
  801. hrtimer_get_expires_ns(&rt_b->rt_period_timer));
  802. } else {
  803. /*
  804. * In case we did anyway, make it go away,
  805. * replenishment is a joke, since it will replenish us
  806. * with exactly 0 ns.
  807. */
  808. rt_rq->rt_time = 0;
  809. }
  810. if (rt_rq_throttled(rt_rq)) {
  811. sched_rt_rq_dequeue(rt_rq);
  812. return 1;
  813. }
  814. }
  815. return 0;
  816. }
  817. /*
  818. * Update the current task's runtime statistics. Skip current tasks that
  819. * are not in our scheduling class.
  820. */
  821. static void update_curr_rt(struct rq *rq)
  822. {
  823. struct task_struct *curr = rq->curr;
  824. struct sched_rt_entity *rt_se = &curr->rt;
  825. u64 delta_exec;
  826. u64 now;
  827. if (curr->sched_class != &rt_sched_class)
  828. return;
  829. now = rq_clock_task(rq);
  830. delta_exec = now - curr->se.exec_start;
  831. if (unlikely((s64)delta_exec <= 0))
  832. return;
  833. schedstat_set(curr->se.statistics.exec_max,
  834. max(curr->se.statistics.exec_max, delta_exec));
  835. curr->se.sum_exec_runtime += delta_exec;
  836. account_group_exec_runtime(curr, delta_exec);
  837. curr->se.exec_start = now;
  838. cgroup_account_cputime(curr, delta_exec);
  839. trace_android_vh_sched_stat_runtime_rt(curr, delta_exec);
  840. if (!rt_bandwidth_enabled())
  841. return;
  842. for_each_sched_rt_entity(rt_se) {
  843. struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  844. int exceeded;
  845. if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
  846. raw_spin_lock(&rt_rq->rt_runtime_lock);
  847. rt_rq->rt_time += delta_exec;
  848. exceeded = sched_rt_runtime_exceeded(rt_rq);
  849. if (exceeded)
  850. resched_curr(rq);
  851. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  852. if (exceeded)
  853. do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
  854. }
  855. }
  856. }
  857. static void
  858. dequeue_top_rt_rq(struct rt_rq *rt_rq)
  859. {
  860. struct rq *rq = rq_of_rt_rq(rt_rq);
  861. BUG_ON(&rq->rt != rt_rq);
  862. if (!rt_rq->rt_queued)
  863. return;
  864. BUG_ON(!rq->nr_running);
  865. sub_nr_running(rq, rt_rq->rt_nr_running);
  866. rt_rq->rt_queued = 0;
  867. }
  868. static void
  869. enqueue_top_rt_rq(struct rt_rq *rt_rq)
  870. {
  871. struct rq *rq = rq_of_rt_rq(rt_rq);
  872. BUG_ON(&rq->rt != rt_rq);
  873. if (rt_rq->rt_queued)
  874. return;
  875. if (rt_rq_throttled(rt_rq))
  876. return;
  877. if (rt_rq->rt_nr_running) {
  878. add_nr_running(rq, rt_rq->rt_nr_running);
  879. rt_rq->rt_queued = 1;
  880. }
  881. /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
  882. cpufreq_update_util(rq, 0);
  883. }
  884. #if defined CONFIG_SMP
  885. static void
  886. inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  887. {
  888. struct rq *rq = rq_of_rt_rq(rt_rq);
  889. #ifdef CONFIG_RT_GROUP_SCHED
  890. /*
  891. * Change rq's cpupri only if rt_rq is the top queue.
  892. */
  893. if (&rq->rt != rt_rq)
  894. return;
  895. #endif
  896. if (rq->online && prio < prev_prio)
  897. cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
  898. }
  899. static void
  900. dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  901. {
  902. struct rq *rq = rq_of_rt_rq(rt_rq);
  903. #ifdef CONFIG_RT_GROUP_SCHED
  904. /*
  905. * Change rq's cpupri only if rt_rq is the top queue.
  906. */
  907. if (&rq->rt != rt_rq)
  908. return;
  909. #endif
  910. if (rq->online && rt_rq->highest_prio.curr != prev_prio)
  911. cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
  912. }
  913. #else /* CONFIG_SMP */
  914. static inline
  915. void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  916. static inline
  917. void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  918. #endif /* CONFIG_SMP */
  919. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  920. static void
  921. inc_rt_prio(struct rt_rq *rt_rq, int prio)
  922. {
  923. int prev_prio = rt_rq->highest_prio.curr;
  924. if (prio < prev_prio)
  925. rt_rq->highest_prio.curr = prio;
  926. inc_rt_prio_smp(rt_rq, prio, prev_prio);
  927. }
  928. static void
  929. dec_rt_prio(struct rt_rq *rt_rq, int prio)
  930. {
  931. int prev_prio = rt_rq->highest_prio.curr;
  932. if (rt_rq->rt_nr_running) {
  933. WARN_ON(prio < prev_prio);
  934. /*
  935. * This may have been our highest task, and therefore
  936. * we may have some recomputation to do
  937. */
  938. if (prio == prev_prio) {
  939. struct rt_prio_array *array = &rt_rq->active;
  940. rt_rq->highest_prio.curr =
  941. sched_find_first_bit(array->bitmap);
  942. }
  943. } else
  944. rt_rq->highest_prio.curr = MAX_RT_PRIO;
  945. dec_rt_prio_smp(rt_rq, prio, prev_prio);
  946. }
  947. #else
  948. static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
  949. static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
  950. #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
  951. #ifdef CONFIG_RT_GROUP_SCHED
  952. static void
  953. inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  954. {
  955. if (rt_se_boosted(rt_se))
  956. rt_rq->rt_nr_boosted++;
  957. if (rt_rq->tg)
  958. start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
  959. }
  960. static void
  961. dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  962. {
  963. if (rt_se_boosted(rt_se))
  964. rt_rq->rt_nr_boosted--;
  965. WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
  966. }
  967. #else /* CONFIG_RT_GROUP_SCHED */
  968. static void
  969. inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  970. {
  971. start_rt_bandwidth(&def_rt_bandwidth);
  972. }
  973. static inline
  974. void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
  975. #endif /* CONFIG_RT_GROUP_SCHED */
  976. static inline
  977. unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
  978. {
  979. struct rt_rq *group_rq = group_rt_rq(rt_se);
  980. if (group_rq)
  981. return group_rq->rt_nr_running;
  982. else
  983. return 1;
  984. }
  985. static inline
  986. unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
  987. {
  988. struct rt_rq *group_rq = group_rt_rq(rt_se);
  989. struct task_struct *tsk;
  990. if (group_rq)
  991. return group_rq->rr_nr_running;
  992. tsk = rt_task_of(rt_se);
  993. return (tsk->policy == SCHED_RR) ? 1 : 0;
  994. }
  995. static inline
  996. void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  997. {
  998. int prio = rt_se_prio(rt_se);
  999. WARN_ON(!rt_prio(prio));
  1000. rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
  1001. rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
  1002. inc_rt_prio(rt_rq, prio);
  1003. inc_rt_migration(rt_se, rt_rq);
  1004. inc_rt_group(rt_se, rt_rq);
  1005. }
  1006. static inline
  1007. void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  1008. {
  1009. WARN_ON(!rt_prio(rt_se_prio(rt_se)));
  1010. WARN_ON(!rt_rq->rt_nr_running);
  1011. rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
  1012. rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
  1013. dec_rt_prio(rt_rq, rt_se_prio(rt_se));
  1014. dec_rt_migration(rt_se, rt_rq);
  1015. dec_rt_group(rt_se, rt_rq);
  1016. }
  1017. /*
  1018. * Change rt_se->run_list location unless SAVE && !MOVE
  1019. *
  1020. * assumes ENQUEUE/DEQUEUE flags match
  1021. */
  1022. static inline bool move_entity(unsigned int flags)
  1023. {
  1024. if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
  1025. return false;
  1026. return true;
  1027. }
  1028. static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
  1029. {
  1030. list_del_init(&rt_se->run_list);
  1031. if (list_empty(array->queue + rt_se_prio(rt_se)))
  1032. __clear_bit(rt_se_prio(rt_se), array->bitmap);
  1033. rt_se->on_list = 0;
  1034. }
  1035. static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
  1036. {
  1037. struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  1038. struct rt_prio_array *array = &rt_rq->active;
  1039. struct rt_rq *group_rq = group_rt_rq(rt_se);
  1040. struct list_head *queue = array->queue + rt_se_prio(rt_se);
  1041. /*
  1042. * Don't enqueue the group if its throttled, or when empty.
  1043. * The latter is a consequence of the former when a child group
  1044. * get throttled and the current group doesn't have any other
  1045. * active members.
  1046. */
  1047. if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
  1048. if (rt_se->on_list)
  1049. __delist_rt_entity(rt_se, array);
  1050. return;
  1051. }
  1052. if (move_entity(flags)) {
  1053. WARN_ON_ONCE(rt_se->on_list);
  1054. if (flags & ENQUEUE_HEAD)
  1055. list_add(&rt_se->run_list, queue);
  1056. else
  1057. list_add_tail(&rt_se->run_list, queue);
  1058. __set_bit(rt_se_prio(rt_se), array->bitmap);
  1059. rt_se->on_list = 1;
  1060. }
  1061. rt_se->on_rq = 1;
  1062. inc_rt_tasks(rt_se, rt_rq);
  1063. }
  1064. static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
  1065. {
  1066. struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  1067. struct rt_prio_array *array = &rt_rq->active;
  1068. if (move_entity(flags)) {
  1069. WARN_ON_ONCE(!rt_se->on_list);
  1070. __delist_rt_entity(rt_se, array);
  1071. }
  1072. rt_se->on_rq = 0;
  1073. dec_rt_tasks(rt_se, rt_rq);
  1074. }
  1075. /*
  1076. * Because the prio of an upper entry depends on the lower
  1077. * entries, we must remove entries top - down.
  1078. */
  1079. static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
  1080. {
  1081. struct sched_rt_entity *back = NULL;
  1082. for_each_sched_rt_entity(rt_se) {
  1083. rt_se->back = back;
  1084. back = rt_se;
  1085. }
  1086. dequeue_top_rt_rq(rt_rq_of_se(back));
  1087. for (rt_se = back; rt_se; rt_se = rt_se->back) {
  1088. if (on_rt_rq(rt_se))
  1089. __dequeue_rt_entity(rt_se, flags);
  1090. }
  1091. }
  1092. static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
  1093. {
  1094. struct rq *rq = rq_of_rt_se(rt_se);
  1095. dequeue_rt_stack(rt_se, flags);
  1096. for_each_sched_rt_entity(rt_se)
  1097. __enqueue_rt_entity(rt_se, flags);
  1098. enqueue_top_rt_rq(&rq->rt);
  1099. }
  1100. static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
  1101. {
  1102. struct rq *rq = rq_of_rt_se(rt_se);
  1103. dequeue_rt_stack(rt_se, flags);
  1104. for_each_sched_rt_entity(rt_se) {
  1105. struct rt_rq *rt_rq = group_rt_rq(rt_se);
  1106. if (rt_rq && rt_rq->rt_nr_running)
  1107. __enqueue_rt_entity(rt_se, flags);
  1108. }
  1109. enqueue_top_rt_rq(&rq->rt);
  1110. }
  1111. #ifdef CONFIG_SMP
  1112. static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
  1113. bool sync)
  1114. {
  1115. /*
  1116. * If the waker is CFS, then an RT sync wakeup would preempt the waker
  1117. * and force it to run for a likely small time after the RT wakee is
  1118. * done. So, only honor RT sync wakeups from RT wakers.
  1119. */
  1120. return sync && task_has_rt_policy(rq->curr) &&
  1121. p->prio <= rq->rt.highest_prio.next &&
  1122. rq->rt.rt_nr_running <= 2;
  1123. }
  1124. #else
  1125. static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
  1126. bool sync)
  1127. {
  1128. return 0;
  1129. }
  1130. #endif
  1131. /*
  1132. * Adding/removing a task to/from a priority array:
  1133. */
  1134. static void
  1135. enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
  1136. {
  1137. struct sched_rt_entity *rt_se = &p->rt;
  1138. bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
  1139. if (flags & ENQUEUE_WAKEUP)
  1140. rt_se->timeout = 0;
  1141. enqueue_rt_entity(rt_se, flags);
  1142. if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
  1143. !should_honor_rt_sync(rq, p, sync))
  1144. enqueue_pushable_task(rq, p);
  1145. }
  1146. static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
  1147. {
  1148. struct sched_rt_entity *rt_se = &p->rt;
  1149. update_curr_rt(rq);
  1150. dequeue_rt_entity(rt_se, flags);
  1151. dequeue_pushable_task(rq, p);
  1152. }
  1153. /*
  1154. * Put task to the head or the end of the run list without the overhead of
  1155. * dequeue followed by enqueue.
  1156. */
  1157. static void
  1158. requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
  1159. {
  1160. if (on_rt_rq(rt_se)) {
  1161. struct rt_prio_array *array = &rt_rq->active;
  1162. struct list_head *queue = array->queue + rt_se_prio(rt_se);
  1163. if (head)
  1164. list_move(&rt_se->run_list, queue);
  1165. else
  1166. list_move_tail(&rt_se->run_list, queue);
  1167. }
  1168. }
  1169. static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
  1170. {
  1171. struct sched_rt_entity *rt_se = &p->rt;
  1172. struct rt_rq *rt_rq;
  1173. for_each_sched_rt_entity(rt_se) {
  1174. rt_rq = rt_rq_of_se(rt_se);
  1175. requeue_rt_entity(rt_rq, rt_se, head);
  1176. }
  1177. }
  1178. static void yield_task_rt(struct rq *rq)
  1179. {
  1180. requeue_task_rt(rq, rq->curr, 0);
  1181. }
  1182. #ifdef CONFIG_SMP
  1183. static int find_lowest_rq(struct task_struct *task);
  1184. #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
  1185. /*
  1186. * Return whether the task on the given cpu is currently non-preemptible
  1187. * while handling a potentially long softint, or if the task is likely
  1188. * to block preemptions soon because it is a ksoftirq thread that is
  1189. * handling slow softints.
  1190. */
  1191. bool
  1192. task_may_not_preempt(struct task_struct *task, int cpu)
  1193. {
  1194. __u32 softirqs = per_cpu(active_softirqs, cpu) |
  1195. __IRQ_STAT(cpu, __softirq_pending);
  1196. struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
  1197. return ((softirqs & LONG_SOFTIRQ_MASK) &&
  1198. (task == cpu_ksoftirqd ||
  1199. task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
  1200. }
  1201. EXPORT_SYMBOL_GPL(task_may_not_preempt);
  1202. #endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
  1203. static int
  1204. select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
  1205. {
  1206. struct task_struct *curr;
  1207. struct rq *rq;
  1208. struct rq *this_cpu_rq;
  1209. bool test;
  1210. int target_cpu = -1;
  1211. bool may_not_preempt;
  1212. bool sync = !!(flags & WF_SYNC);
  1213. int this_cpu;
  1214. trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
  1215. flags, &target_cpu);
  1216. if (target_cpu >= 0)
  1217. return target_cpu;
  1218. /* For anything but wake ups, just return the task_cpu */
  1219. if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
  1220. goto out;
  1221. rq = cpu_rq(cpu);
  1222. rcu_read_lock();
  1223. curr = READ_ONCE(rq->curr); /* unlocked access */
  1224. this_cpu = smp_processor_id();
  1225. this_cpu_rq = cpu_rq(this_cpu);
  1226. /*
  1227. * If the current task on @p's runqueue is a softirq task,
  1228. * it may run without preemption for a time that is
  1229. * ill-suited for a waiting RT task. Therefore, try to
  1230. * wake this RT task on another runqueue.
  1231. *
  1232. * Also, if the current task on @p's runqueue is an RT task, then
  1233. * try to see if we can wake this RT task up on another
  1234. * runqueue. Otherwise simply start this RT task
  1235. * on its current runqueue.
  1236. *
  1237. * We want to avoid overloading runqueues. If the woken
  1238. * task is a higher priority, then it will stay on this CPU
  1239. * and the lower prio task should be moved to another CPU.
  1240. * Even though this will probably make the lower prio task
  1241. * lose its cache, we do not want to bounce a higher task
  1242. * around just because it gave up its CPU, perhaps for a
  1243. * lock?
  1244. *
  1245. * For equal prio tasks, we just let the scheduler sort it out.
  1246. *
  1247. * Otherwise, just let it ride on the affined RQ and the
  1248. * post-schedule router will push the preempted task away
  1249. *
  1250. * This test is optimistic, if we get it wrong the load-balancer
  1251. * will have to sort it out.
  1252. *
  1253. * We take into account the capacity of the CPU to ensure it fits the
  1254. * requirement of the task - which is only important on heterogeneous
  1255. * systems like big.LITTLE.
  1256. */
  1257. may_not_preempt = task_may_not_preempt(curr, cpu);
  1258. test = (curr && (may_not_preempt ||
  1259. (unlikely(rt_task(curr)) &&
  1260. (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
  1261. /*
  1262. * Respect the sync flag as long as the task can run on this CPU.
  1263. */
  1264. if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
  1265. cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
  1266. cpu = this_cpu;
  1267. goto out_unlock;
  1268. }
  1269. if (test || !rt_task_fits_capacity(p, cpu)) {
  1270. int target = find_lowest_rq(p);
  1271. /*
  1272. * Bail out if we were forcing a migration to find a better
  1273. * fitting CPU but our search failed.
  1274. */
  1275. if (!test && target != -1 && !rt_task_fits_capacity(p, target))
  1276. goto out_unlock;
  1277. /*
  1278. * If cpu is non-preemptible, prefer remote cpu
  1279. * even if it's running a higher-prio task.
  1280. * Otherwise: Don't bother moving it if the destination CPU is
  1281. * not running a lower priority task.
  1282. */
  1283. if (target != -1 &&
  1284. (may_not_preempt ||
  1285. p->prio < cpu_rq(target)->rt.highest_prio.curr))
  1286. cpu = target;
  1287. }
  1288. out_unlock:
  1289. rcu_read_unlock();
  1290. out:
  1291. return cpu;
  1292. }
  1293. static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  1294. {
  1295. /*
  1296. * Current can't be migrated, useless to reschedule,
  1297. * let's hope p can move out.
  1298. */
  1299. if (rq->curr->nr_cpus_allowed == 1 ||
  1300. !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
  1301. return;
  1302. /*
  1303. * p is migratable, so let's not schedule it and
  1304. * see if it is pushed or pulled somewhere else.
  1305. */
  1306. if (p->nr_cpus_allowed != 1 &&
  1307. cpupri_find(&rq->rd->cpupri, p, NULL))
  1308. return;
  1309. /*
  1310. * There appear to be other CPUs that can accept
  1311. * the current task but none can run 'p', so lets reschedule
  1312. * to try and push the current task away:
  1313. */
  1314. requeue_task_rt(rq, p, 1);
  1315. resched_curr(rq);
  1316. }
  1317. static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
  1318. {
  1319. if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
  1320. int done = 0;
  1321. /*
  1322. * This is OK, because current is on_cpu, which avoids it being
  1323. * picked for load-balance and preemption/IRQs are still
  1324. * disabled avoiding further scheduler activity on it and we've
  1325. * not yet started the picking loop.
  1326. */
  1327. rq_unpin_lock(rq, rf);
  1328. trace_android_rvh_sched_balance_rt(rq, p, &done);
  1329. if (!done)
  1330. pull_rt_task(rq);
  1331. rq_repin_lock(rq, rf);
  1332. }
  1333. return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
  1334. }
  1335. #endif /* CONFIG_SMP */
  1336. /*
  1337. * Preempt the current task with a newly woken task if needed:
  1338. */
  1339. static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
  1340. {
  1341. if (p->prio < rq->curr->prio) {
  1342. resched_curr(rq);
  1343. return;
  1344. }
  1345. #ifdef CONFIG_SMP
  1346. /*
  1347. * If:
  1348. *
  1349. * - the newly woken task is of equal priority to the current task
  1350. * - the newly woken task is non-migratable while current is migratable
  1351. * - current will be preempted on the next reschedule
  1352. *
  1353. * we should check to see if current can readily move to a different
  1354. * cpu. If so, we will reschedule to allow the push logic to try
  1355. * to move current somewhere else, making room for our non-migratable
  1356. * task.
  1357. */
  1358. if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
  1359. check_preempt_equal_prio(rq, p);
  1360. #endif
  1361. }
  1362. static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
  1363. {
  1364. p->se.exec_start = rq_clock_task(rq);
  1365. /* The running task is never eligible for pushing */
  1366. dequeue_pushable_task(rq, p);
  1367. if (!first)
  1368. return;
  1369. /*
  1370. * If prev task was rt, put_prev_task() has already updated the
  1371. * utilization. We only care of the case where we start to schedule a
  1372. * rt task
  1373. */
  1374. if (rq->curr->sched_class != &rt_sched_class)
  1375. update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
  1376. rt_queue_push_tasks(rq);
  1377. }
  1378. static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
  1379. struct rt_rq *rt_rq)
  1380. {
  1381. struct rt_prio_array *array = &rt_rq->active;
  1382. struct sched_rt_entity *next = NULL;
  1383. struct list_head *queue;
  1384. int idx;
  1385. idx = sched_find_first_bit(array->bitmap);
  1386. BUG_ON(idx >= MAX_RT_PRIO);
  1387. queue = array->queue + idx;
  1388. next = list_entry(queue->next, struct sched_rt_entity, run_list);
  1389. return next;
  1390. }
  1391. static struct task_struct *_pick_next_task_rt(struct rq *rq)
  1392. {
  1393. struct sched_rt_entity *rt_se;
  1394. struct rt_rq *rt_rq = &rq->rt;
  1395. do {
  1396. rt_se = pick_next_rt_entity(rq, rt_rq);
  1397. BUG_ON(!rt_se);
  1398. rt_rq = group_rt_rq(rt_se);
  1399. } while (rt_rq);
  1400. return rt_task_of(rt_se);
  1401. }
  1402. static struct task_struct *pick_next_task_rt(struct rq *rq)
  1403. {
  1404. struct task_struct *p;
  1405. if (!sched_rt_runnable(rq))
  1406. return NULL;
  1407. p = _pick_next_task_rt(rq);
  1408. set_next_task_rt(rq, p, true);
  1409. return p;
  1410. }
  1411. static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
  1412. {
  1413. update_curr_rt(rq);
  1414. update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
  1415. /*
  1416. * The previous task needs to be made eligible for pushing
  1417. * if it is still active
  1418. */
  1419. if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
  1420. enqueue_pushable_task(rq, p);
  1421. }
  1422. #ifdef CONFIG_SMP
  1423. /* Only try algorithms three times */
  1424. #define RT_MAX_TRIES 3
  1425. static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
  1426. {
  1427. if (!task_running(rq, p) &&
  1428. cpumask_test_cpu(cpu, p->cpus_ptr))
  1429. return 1;
  1430. return 0;
  1431. }
  1432. /*
  1433. * Return the highest pushable rq's task, which is suitable to be executed
  1434. * on the CPU, NULL otherwise
  1435. */
  1436. struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
  1437. {
  1438. struct plist_head *head = &rq->rt.pushable_tasks;
  1439. struct task_struct *p;
  1440. if (!has_pushable_tasks(rq))
  1441. return NULL;
  1442. plist_for_each_entry(p, head, pushable_tasks) {
  1443. if (pick_rt_task(rq, p, cpu))
  1444. return p;
  1445. }
  1446. return NULL;
  1447. }
  1448. EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
  1449. static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
  1450. static int find_lowest_rq(struct task_struct *task)
  1451. {
  1452. struct sched_domain *sd;
  1453. struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
  1454. int this_cpu = smp_processor_id();
  1455. int cpu = -1;
  1456. int ret;
  1457. /* Make sure the mask is initialized first */
  1458. if (unlikely(!lowest_mask))
  1459. return -1;
  1460. if (task->nr_cpus_allowed == 1)
  1461. return -1; /* No other targets possible */
  1462. /*
  1463. * If we're on asym system ensure we consider the different capacities
  1464. * of the CPUs when searching for the lowest_mask.
  1465. */
  1466. if (static_branch_unlikely(&sched_asym_cpucapacity)) {
  1467. ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
  1468. task, lowest_mask,
  1469. rt_task_fits_capacity);
  1470. } else {
  1471. ret = cpupri_find(&task_rq(task)->rd->cpupri,
  1472. task, lowest_mask);
  1473. }
  1474. trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
  1475. if (cpu >= 0)
  1476. return cpu;
  1477. if (!ret)
  1478. return -1; /* No targets found */
  1479. cpu = task_cpu(task);
  1480. /*
  1481. * At this point we have built a mask of CPUs representing the
  1482. * lowest priority tasks in the system. Now we want to elect
  1483. * the best one based on our affinity and topology.
  1484. *
  1485. * We prioritize the last CPU that the task executed on since
  1486. * it is most likely cache-hot in that location.
  1487. */
  1488. if (cpumask_test_cpu(cpu, lowest_mask))
  1489. return cpu;
  1490. /*
  1491. * Otherwise, we consult the sched_domains span maps to figure
  1492. * out which CPU is logically closest to our hot cache data.
  1493. */
  1494. if (!cpumask_test_cpu(this_cpu, lowest_mask))
  1495. this_cpu = -1; /* Skip this_cpu opt if not among lowest */
  1496. rcu_read_lock();
  1497. for_each_domain(cpu, sd) {
  1498. if (sd->flags & SD_WAKE_AFFINE) {
  1499. int best_cpu;
  1500. /*
  1501. * "this_cpu" is cheaper to preempt than a
  1502. * remote processor.
  1503. */
  1504. if (this_cpu != -1 &&
  1505. cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
  1506. rcu_read_unlock();
  1507. return this_cpu;
  1508. }
  1509. best_cpu = cpumask_first_and(lowest_mask,
  1510. sched_domain_span(sd));
  1511. if (best_cpu < nr_cpu_ids) {
  1512. rcu_read_unlock();
  1513. return best_cpu;
  1514. }
  1515. }
  1516. }
  1517. rcu_read_unlock();
  1518. /*
  1519. * And finally, if there were no matches within the domains
  1520. * just give the caller *something* to work with from the compatible
  1521. * locations.
  1522. */
  1523. if (this_cpu != -1)
  1524. return this_cpu;
  1525. cpu = cpumask_any(lowest_mask);
  1526. if (cpu < nr_cpu_ids)
  1527. return cpu;
  1528. return -1;
  1529. }
  1530. /* Will lock the rq it finds */
  1531. static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
  1532. {
  1533. struct rq *lowest_rq = NULL;
  1534. int tries;
  1535. int cpu;
  1536. for (tries = 0; tries < RT_MAX_TRIES; tries++) {
  1537. cpu = find_lowest_rq(task);
  1538. if ((cpu == -1) || (cpu == rq->cpu))
  1539. break;
  1540. lowest_rq = cpu_rq(cpu);
  1541. if (lowest_rq->rt.highest_prio.curr <= task->prio) {
  1542. /*
  1543. * Target rq has tasks of equal or higher priority,
  1544. * retrying does not release any lock and is unlikely
  1545. * to yield a different result.
  1546. */
  1547. lowest_rq = NULL;
  1548. break;
  1549. }
  1550. /* if the prio of this runqueue changed, try again */
  1551. if (double_lock_balance(rq, lowest_rq)) {
  1552. /*
  1553. * We had to unlock the run queue. In
  1554. * the mean time, task could have
  1555. * migrated already or had its affinity changed.
  1556. * Also make sure that it wasn't scheduled on its rq.
  1557. */
  1558. if (unlikely(task_rq(task) != rq ||
  1559. !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
  1560. task_running(rq, task) ||
  1561. !rt_task(task) ||
  1562. !task_on_rq_queued(task))) {
  1563. double_unlock_balance(rq, lowest_rq);
  1564. lowest_rq = NULL;
  1565. break;
  1566. }
  1567. }
  1568. /* If this rq is still suitable use it. */
  1569. if (lowest_rq->rt.highest_prio.curr > task->prio)
  1570. break;
  1571. /* try again */
  1572. double_unlock_balance(rq, lowest_rq);
  1573. lowest_rq = NULL;
  1574. }
  1575. return lowest_rq;
  1576. }
  1577. static struct task_struct *pick_next_pushable_task(struct rq *rq)
  1578. {
  1579. struct task_struct *p;
  1580. if (!has_pushable_tasks(rq))
  1581. return NULL;
  1582. p = plist_first_entry(&rq->rt.pushable_tasks,
  1583. struct task_struct, pushable_tasks);
  1584. BUG_ON(rq->cpu != task_cpu(p));
  1585. BUG_ON(task_current(rq, p));
  1586. BUG_ON(p->nr_cpus_allowed <= 1);
  1587. BUG_ON(!task_on_rq_queued(p));
  1588. BUG_ON(!rt_task(p));
  1589. return p;
  1590. }
  1591. /*
  1592. * If the current CPU has more than one RT task, see if the non
  1593. * running task can migrate over to a CPU that is running a task
  1594. * of lesser priority.
  1595. */
  1596. static int push_rt_task(struct rq *rq)
  1597. {
  1598. struct task_struct *next_task;
  1599. struct rq *lowest_rq;
  1600. int ret = 0;
  1601. if (!rq->rt.overloaded)
  1602. return 0;
  1603. next_task = pick_next_pushable_task(rq);
  1604. if (!next_task)
  1605. return 0;
  1606. retry:
  1607. if (WARN_ON(next_task == rq->curr))
  1608. return 0;
  1609. /*
  1610. * It's possible that the next_task slipped in of
  1611. * higher priority than current. If that's the case
  1612. * just reschedule current.
  1613. */
  1614. if (unlikely(next_task->prio < rq->curr->prio)) {
  1615. resched_curr(rq);
  1616. return 0;
  1617. }
  1618. /* We might release rq lock */
  1619. get_task_struct(next_task);
  1620. /* find_lock_lowest_rq locks the rq if found */
  1621. lowest_rq = find_lock_lowest_rq(next_task, rq);
  1622. if (!lowest_rq) {
  1623. struct task_struct *task;
  1624. /*
  1625. * find_lock_lowest_rq releases rq->lock
  1626. * so it is possible that next_task has migrated.
  1627. *
  1628. * We need to make sure that the task is still on the same
  1629. * run-queue and is also still the next task eligible for
  1630. * pushing.
  1631. */
  1632. task = pick_next_pushable_task(rq);
  1633. if (task == next_task) {
  1634. /*
  1635. * The task hasn't migrated, and is still the next
  1636. * eligible task, but we failed to find a run-queue
  1637. * to push it to. Do not retry in this case, since
  1638. * other CPUs will pull from us when ready.
  1639. */
  1640. goto out;
  1641. }
  1642. if (!task)
  1643. /* No more tasks, just exit */
  1644. goto out;
  1645. /*
  1646. * Something has shifted, try again.
  1647. */
  1648. put_task_struct(next_task);
  1649. next_task = task;
  1650. goto retry;
  1651. }
  1652. deactivate_task(rq, next_task, 0);
  1653. set_task_cpu(next_task, lowest_rq->cpu);
  1654. activate_task(lowest_rq, next_task, 0);
  1655. ret = 1;
  1656. resched_curr(lowest_rq);
  1657. double_unlock_balance(rq, lowest_rq);
  1658. out:
  1659. put_task_struct(next_task);
  1660. return ret;
  1661. }
  1662. static void push_rt_tasks(struct rq *rq)
  1663. {
  1664. /* push_rt_task will return true if it moved an RT */
  1665. while (push_rt_task(rq))
  1666. ;
  1667. }
  1668. #ifdef HAVE_RT_PUSH_IPI
  1669. /*
  1670. * When a high priority task schedules out from a CPU and a lower priority
  1671. * task is scheduled in, a check is made to see if there's any RT tasks
  1672. * on other CPUs that are waiting to run because a higher priority RT task
  1673. * is currently running on its CPU. In this case, the CPU with multiple RT
  1674. * tasks queued on it (overloaded) needs to be notified that a CPU has opened
  1675. * up that may be able to run one of its non-running queued RT tasks.
  1676. *
  1677. * All CPUs with overloaded RT tasks need to be notified as there is currently
  1678. * no way to know which of these CPUs have the highest priority task waiting
  1679. * to run. Instead of trying to take a spinlock on each of these CPUs,
  1680. * which has shown to cause large latency when done on machines with many
  1681. * CPUs, sending an IPI to the CPUs to have them push off the overloaded
  1682. * RT tasks waiting to run.
  1683. *
  1684. * Just sending an IPI to each of the CPUs is also an issue, as on large
  1685. * count CPU machines, this can cause an IPI storm on a CPU, especially
  1686. * if its the only CPU with multiple RT tasks queued, and a large number
  1687. * of CPUs scheduling a lower priority task at the same time.
  1688. *
  1689. * Each root domain has its own irq work function that can iterate over
  1690. * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
  1691. * tassk must be checked if there's one or many CPUs that are lowering
  1692. * their priority, there's a single irq work iterator that will try to
  1693. * push off RT tasks that are waiting to run.
  1694. *
  1695. * When a CPU schedules a lower priority task, it will kick off the
  1696. * irq work iterator that will jump to each CPU with overloaded RT tasks.
  1697. * As it only takes the first CPU that schedules a lower priority task
  1698. * to start the process, the rto_start variable is incremented and if
  1699. * the atomic result is one, then that CPU will try to take the rto_lock.
  1700. * This prevents high contention on the lock as the process handles all
  1701. * CPUs scheduling lower priority tasks.
  1702. *
  1703. * All CPUs that are scheduling a lower priority task will increment the
  1704. * rt_loop_next variable. This will make sure that the irq work iterator
  1705. * checks all RT overloaded CPUs whenever a CPU schedules a new lower
  1706. * priority task, even if the iterator is in the middle of a scan. Incrementing
  1707. * the rt_loop_next will cause the iterator to perform another scan.
  1708. *
  1709. */
  1710. static int rto_next_cpu(struct root_domain *rd)
  1711. {
  1712. int next;
  1713. int cpu;
  1714. /*
  1715. * When starting the IPI RT pushing, the rto_cpu is set to -1,
  1716. * rt_next_cpu() will simply return the first CPU found in
  1717. * the rto_mask.
  1718. *
  1719. * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
  1720. * will return the next CPU found in the rto_mask.
  1721. *
  1722. * If there are no more CPUs left in the rto_mask, then a check is made
  1723. * against rto_loop and rto_loop_next. rto_loop is only updated with
  1724. * the rto_lock held, but any CPU may increment the rto_loop_next
  1725. * without any locking.
  1726. */
  1727. for (;;) {
  1728. /* When rto_cpu is -1 this acts like cpumask_first() */
  1729. cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
  1730. rd->rto_cpu = cpu;
  1731. if (cpu < nr_cpu_ids)
  1732. return cpu;
  1733. rd->rto_cpu = -1;
  1734. /*
  1735. * ACQUIRE ensures we see the @rto_mask changes
  1736. * made prior to the @next value observed.
  1737. *
  1738. * Matches WMB in rt_set_overload().
  1739. */
  1740. next = atomic_read_acquire(&rd->rto_loop_next);
  1741. if (rd->rto_loop == next)
  1742. break;
  1743. rd->rto_loop = next;
  1744. }
  1745. return -1;
  1746. }
  1747. static inline bool rto_start_trylock(atomic_t *v)
  1748. {
  1749. return !atomic_cmpxchg_acquire(v, 0, 1);
  1750. }
  1751. static inline void rto_start_unlock(atomic_t *v)
  1752. {
  1753. atomic_set_release(v, 0);
  1754. }
  1755. static void tell_cpu_to_push(struct rq *rq)
  1756. {
  1757. int cpu = -1;
  1758. /* Keep the loop going if the IPI is currently active */
  1759. atomic_inc(&rq->rd->rto_loop_next);
  1760. /* Only one CPU can initiate a loop at a time */
  1761. if (!rto_start_trylock(&rq->rd->rto_loop_start))
  1762. return;
  1763. raw_spin_lock(&rq->rd->rto_lock);
  1764. /*
  1765. * The rto_cpu is updated under the lock, if it has a valid CPU
  1766. * then the IPI is still running and will continue due to the
  1767. * update to loop_next, and nothing needs to be done here.
  1768. * Otherwise it is finishing up and an ipi needs to be sent.
  1769. */
  1770. if (rq->rd->rto_cpu < 0)
  1771. cpu = rto_next_cpu(rq->rd);
  1772. raw_spin_unlock(&rq->rd->rto_lock);
  1773. rto_start_unlock(&rq->rd->rto_loop_start);
  1774. if (cpu >= 0) {
  1775. /* Make sure the rd does not get freed while pushing */
  1776. sched_get_rd(rq->rd);
  1777. irq_work_queue_on(&rq->rd->rto_push_work, cpu);
  1778. }
  1779. }
  1780. /* Called from hardirq context */
  1781. void rto_push_irq_work_func(struct irq_work *work)
  1782. {
  1783. struct root_domain *rd =
  1784. container_of(work, struct root_domain, rto_push_work);
  1785. struct rq *rq;
  1786. int cpu;
  1787. rq = this_rq();
  1788. /*
  1789. * We do not need to grab the lock to check for has_pushable_tasks.
  1790. * When it gets updated, a check is made if a push is possible.
  1791. */
  1792. if (has_pushable_tasks(rq)) {
  1793. raw_spin_lock(&rq->lock);
  1794. push_rt_tasks(rq);
  1795. raw_spin_unlock(&rq->lock);
  1796. }
  1797. raw_spin_lock(&rd->rto_lock);
  1798. /* Pass the IPI to the next rt overloaded queue */
  1799. cpu = rto_next_cpu(rd);
  1800. raw_spin_unlock(&rd->rto_lock);
  1801. if (cpu < 0) {
  1802. sched_put_rd(rd);
  1803. return;
  1804. }
  1805. /* Try the next RT overloaded CPU */
  1806. irq_work_queue_on(&rd->rto_push_work, cpu);
  1807. }
  1808. #endif /* HAVE_RT_PUSH_IPI */
  1809. static void pull_rt_task(struct rq *this_rq)
  1810. {
  1811. int this_cpu = this_rq->cpu, cpu;
  1812. bool resched = false;
  1813. struct task_struct *p;
  1814. struct rq *src_rq;
  1815. int rt_overload_count = rt_overloaded(this_rq);
  1816. if (likely(!rt_overload_count))
  1817. return;
  1818. /*
  1819. * Match the barrier from rt_set_overloaded; this guarantees that if we
  1820. * see overloaded we must also see the rto_mask bit.
  1821. */
  1822. smp_rmb();
  1823. /* If we are the only overloaded CPU do nothing */
  1824. if (rt_overload_count == 1 &&
  1825. cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
  1826. return;
  1827. #ifdef HAVE_RT_PUSH_IPI
  1828. if (sched_feat(RT_PUSH_IPI)) {
  1829. tell_cpu_to_push(this_rq);
  1830. return;
  1831. }
  1832. #endif
  1833. for_each_cpu(cpu, this_rq->rd->rto_mask) {
  1834. if (this_cpu == cpu)
  1835. continue;
  1836. src_rq = cpu_rq(cpu);
  1837. /*
  1838. * Don't bother taking the src_rq->lock if the next highest
  1839. * task is known to be lower-priority than our current task.
  1840. * This may look racy, but if this value is about to go
  1841. * logically higher, the src_rq will push this task away.
  1842. * And if its going logically lower, we do not care
  1843. */
  1844. if (src_rq->rt.highest_prio.next >=
  1845. this_rq->rt.highest_prio.curr)
  1846. continue;
  1847. /*
  1848. * We can potentially drop this_rq's lock in
  1849. * double_lock_balance, and another CPU could
  1850. * alter this_rq
  1851. */
  1852. double_lock_balance(this_rq, src_rq);
  1853. /*
  1854. * We can pull only a task, which is pushable
  1855. * on its rq, and no others.
  1856. */
  1857. p = pick_highest_pushable_task(src_rq, this_cpu);
  1858. /*
  1859. * Do we have an RT task that preempts
  1860. * the to-be-scheduled task?
  1861. */
  1862. if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
  1863. WARN_ON(p == src_rq->curr);
  1864. WARN_ON(!task_on_rq_queued(p));
  1865. /*
  1866. * There's a chance that p is higher in priority
  1867. * than what's currently running on its CPU.
  1868. * This is just that p is wakeing up and hasn't
  1869. * had a chance to schedule. We only pull
  1870. * p if it is lower in priority than the
  1871. * current task on the run queue
  1872. */
  1873. if (p->prio < src_rq->curr->prio)
  1874. goto skip;
  1875. resched = true;
  1876. deactivate_task(src_rq, p, 0);
  1877. set_task_cpu(p, this_cpu);
  1878. activate_task(this_rq, p, 0);
  1879. /*
  1880. * We continue with the search, just in
  1881. * case there's an even higher prio task
  1882. * in another runqueue. (low likelihood
  1883. * but possible)
  1884. */
  1885. }
  1886. skip:
  1887. double_unlock_balance(this_rq, src_rq);
  1888. }
  1889. if (resched)
  1890. resched_curr(this_rq);
  1891. }
  1892. /*
  1893. * If we are not running and we are not going to reschedule soon, we should
  1894. * try to push tasks away now
  1895. */
  1896. static void task_woken_rt(struct rq *rq, struct task_struct *p)
  1897. {
  1898. bool need_to_push = !task_running(rq, p) &&
  1899. !test_tsk_need_resched(rq->curr) &&
  1900. p->nr_cpus_allowed > 1 &&
  1901. (dl_task(rq->curr) || rt_task(rq->curr)) &&
  1902. (rq->curr->nr_cpus_allowed < 2 ||
  1903. rq->curr->prio <= p->prio);
  1904. if (need_to_push)
  1905. push_rt_tasks(rq);
  1906. }
  1907. /* Assumes rq->lock is held */
  1908. static void rq_online_rt(struct rq *rq)
  1909. {
  1910. if (rq->rt.overloaded)
  1911. rt_set_overload(rq);
  1912. __enable_runtime(rq);
  1913. cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
  1914. }
  1915. /* Assumes rq->lock is held */
  1916. static void rq_offline_rt(struct rq *rq)
  1917. {
  1918. if (rq->rt.overloaded)
  1919. rt_clear_overload(rq);
  1920. __disable_runtime(rq);
  1921. cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
  1922. }
  1923. /*
  1924. * When switch from the rt queue, we bring ourselves to a position
  1925. * that we might want to pull RT tasks from other runqueues.
  1926. */
  1927. static void switched_from_rt(struct rq *rq, struct task_struct *p)
  1928. {
  1929. /*
  1930. * If there are other RT tasks then we will reschedule
  1931. * and the scheduling of the other RT tasks will handle
  1932. * the balancing. But if we are the last RT task
  1933. * we may need to handle the pulling of RT tasks
  1934. * now.
  1935. */
  1936. if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
  1937. return;
  1938. rt_queue_pull_task(rq);
  1939. }
  1940. void __init init_sched_rt_class(void)
  1941. {
  1942. unsigned int i;
  1943. for_each_possible_cpu(i) {
  1944. zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
  1945. GFP_KERNEL, cpu_to_node(i));
  1946. }
  1947. }
  1948. #endif /* CONFIG_SMP */
  1949. /*
  1950. * When switching a task to RT, we may overload the runqueue
  1951. * with RT tasks. In this case we try to push them off to
  1952. * other runqueues.
  1953. */
  1954. static void switched_to_rt(struct rq *rq, struct task_struct *p)
  1955. {
  1956. /*
  1957. * If we are running, update the avg_rt tracking, as the running time
  1958. * will now on be accounted into the latter.
  1959. */
  1960. if (task_current(rq, p)) {
  1961. update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
  1962. return;
  1963. }
  1964. /*
  1965. * If we are not running we may need to preempt the current
  1966. * running task. If that current running task is also an RT task
  1967. * then see if we can move to another run queue.
  1968. */
  1969. if (task_on_rq_queued(p)) {
  1970. #ifdef CONFIG_SMP
  1971. if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
  1972. rt_queue_push_tasks(rq);
  1973. #endif /* CONFIG_SMP */
  1974. if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
  1975. resched_curr(rq);
  1976. }
  1977. }
  1978. /*
  1979. * Priority of the task has changed. This may cause
  1980. * us to initiate a push or pull.
  1981. */
  1982. static void
  1983. prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
  1984. {
  1985. if (!task_on_rq_queued(p))
  1986. return;
  1987. if (rq->curr == p) {
  1988. #ifdef CONFIG_SMP
  1989. /*
  1990. * If our priority decreases while running, we
  1991. * may need to pull tasks to this runqueue.
  1992. */
  1993. if (oldprio < p->prio)
  1994. rt_queue_pull_task(rq);
  1995. /*
  1996. * If there's a higher priority task waiting to run
  1997. * then reschedule.
  1998. */
  1999. if (p->prio > rq->rt.highest_prio.curr)
  2000. resched_curr(rq);
  2001. #else
  2002. /* For UP simply resched on drop of prio */
  2003. if (oldprio < p->prio)
  2004. resched_curr(rq);
  2005. #endif /* CONFIG_SMP */
  2006. } else {
  2007. /*
  2008. * This task is not running, but if it is
  2009. * greater than the current running task
  2010. * then reschedule.
  2011. */
  2012. if (p->prio < rq->curr->prio)
  2013. resched_curr(rq);
  2014. }
  2015. }
  2016. #ifdef CONFIG_POSIX_TIMERS
  2017. static void watchdog(struct rq *rq, struct task_struct *p)
  2018. {
  2019. unsigned long soft, hard;
  2020. /* max may change after cur was read, this will be fixed next tick */
  2021. soft = task_rlimit(p, RLIMIT_RTTIME);
  2022. hard = task_rlimit_max(p, RLIMIT_RTTIME);
  2023. if (soft != RLIM_INFINITY) {
  2024. unsigned long next;
  2025. if (p->rt.watchdog_stamp != jiffies) {
  2026. p->rt.timeout++;
  2027. p->rt.watchdog_stamp = jiffies;
  2028. }
  2029. next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
  2030. if (p->rt.timeout > next) {
  2031. posix_cputimers_rt_watchdog(&p->posix_cputimers,
  2032. p->se.sum_exec_runtime);
  2033. }
  2034. }
  2035. }
  2036. #else
  2037. static inline void watchdog(struct rq *rq, struct task_struct *p) { }
  2038. #endif
  2039. /*
  2040. * scheduler tick hitting a task of our scheduling class.
  2041. *
  2042. * NOTE: This function can be called remotely by the tick offload that
  2043. * goes along full dynticks. Therefore no local assumption can be made
  2044. * and everything must be accessed through the @rq and @curr passed in
  2045. * parameters.
  2046. */
  2047. static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
  2048. {
  2049. struct sched_rt_entity *rt_se = &p->rt;
  2050. update_curr_rt(rq);
  2051. update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
  2052. watchdog(rq, p);
  2053. /*
  2054. * RR tasks need a special form of timeslice management.
  2055. * FIFO tasks have no timeslices.
  2056. */
  2057. if (p->policy != SCHED_RR)
  2058. return;
  2059. if (--p->rt.time_slice)
  2060. return;
  2061. p->rt.time_slice = sched_rr_timeslice;
  2062. /*
  2063. * Requeue to the end of queue if we (and all of our ancestors) are not
  2064. * the only element on the queue
  2065. */
  2066. for_each_sched_rt_entity(rt_se) {
  2067. if (rt_se->run_list.prev != rt_se->run_list.next) {
  2068. requeue_task_rt(rq, p, 0);
  2069. resched_curr(rq);
  2070. return;
  2071. }
  2072. }
  2073. }
  2074. static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
  2075. {
  2076. /*
  2077. * Time slice is 0 for SCHED_FIFO tasks
  2078. */
  2079. if (task->policy == SCHED_RR)
  2080. return sched_rr_timeslice;
  2081. else
  2082. return 0;
  2083. }
  2084. const struct sched_class rt_sched_class
  2085. __section("__rt_sched_class") = {
  2086. .enqueue_task = enqueue_task_rt,
  2087. .dequeue_task = dequeue_task_rt,
  2088. .yield_task = yield_task_rt,
  2089. .check_preempt_curr = check_preempt_curr_rt,
  2090. .pick_next_task = pick_next_task_rt,
  2091. .put_prev_task = put_prev_task_rt,
  2092. .set_next_task = set_next_task_rt,
  2093. #ifdef CONFIG_SMP
  2094. .balance = balance_rt,
  2095. .select_task_rq = select_task_rq_rt,
  2096. .set_cpus_allowed = set_cpus_allowed_common,
  2097. .rq_online = rq_online_rt,
  2098. .rq_offline = rq_offline_rt,
  2099. .task_woken = task_woken_rt,
  2100. .switched_from = switched_from_rt,
  2101. #endif
  2102. .task_tick = task_tick_rt,
  2103. .get_rr_interval = get_rr_interval_rt,
  2104. .prio_changed = prio_changed_rt,
  2105. .switched_to = switched_to_rt,
  2106. .update_curr = update_curr_rt,
  2107. #ifdef CONFIG_UCLAMP_TASK
  2108. .uclamp_enabled = 1,
  2109. #endif
  2110. };
  2111. #ifdef CONFIG_RT_GROUP_SCHED
  2112. /*
  2113. * Ensure that the real time constraints are schedulable.
  2114. */
  2115. static DEFINE_MUTEX(rt_constraints_mutex);
  2116. static inline int tg_has_rt_tasks(struct task_group *tg)
  2117. {
  2118. struct task_struct *task;
  2119. struct css_task_iter it;
  2120. int ret = 0;
  2121. /*
  2122. * Autogroups do not have RT tasks; see autogroup_create().
  2123. */
  2124. if (task_group_is_autogroup(tg))
  2125. return 0;
  2126. css_task_iter_start(&tg->css, 0, &it);
  2127. while (!ret && (task = css_task_iter_next(&it)))
  2128. ret |= rt_task(task);
  2129. css_task_iter_end(&it);
  2130. return ret;
  2131. }
  2132. struct rt_schedulable_data {
  2133. struct task_group *tg;
  2134. u64 rt_period;
  2135. u64 rt_runtime;
  2136. };
  2137. static int tg_rt_schedulable(struct task_group *tg, void *data)
  2138. {
  2139. struct rt_schedulable_data *d = data;
  2140. struct task_group *child;
  2141. unsigned long total, sum = 0;
  2142. u64 period, runtime;
  2143. period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  2144. runtime = tg->rt_bandwidth.rt_runtime;
  2145. if (tg == d->tg) {
  2146. period = d->rt_period;
  2147. runtime = d->rt_runtime;
  2148. }
  2149. /*
  2150. * Cannot have more runtime than the period.
  2151. */
  2152. if (runtime > period && runtime != RUNTIME_INF)
  2153. return -EINVAL;
  2154. /*
  2155. * Ensure we don't starve existing RT tasks if runtime turns zero.
  2156. */
  2157. if (rt_bandwidth_enabled() && !runtime &&
  2158. tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
  2159. return -EBUSY;
  2160. total = to_ratio(period, runtime);
  2161. /*
  2162. * Nobody can have more than the global setting allows.
  2163. */
  2164. if (total > to_ratio(global_rt_period(), global_rt_runtime()))
  2165. return -EINVAL;
  2166. /*
  2167. * The sum of our children's runtime should not exceed our own.
  2168. */
  2169. list_for_each_entry_rcu(child, &tg->children, siblings) {
  2170. period = ktime_to_ns(child->rt_bandwidth.rt_period);
  2171. runtime = child->rt_bandwidth.rt_runtime;
  2172. if (child == d->tg) {
  2173. period = d->rt_period;
  2174. runtime = d->rt_runtime;
  2175. }
  2176. sum += to_ratio(period, runtime);
  2177. }
  2178. if (sum > total)
  2179. return -EINVAL;
  2180. return 0;
  2181. }
  2182. static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
  2183. {
  2184. int ret;
  2185. struct rt_schedulable_data data = {
  2186. .tg = tg,
  2187. .rt_period = period,
  2188. .rt_runtime = runtime,
  2189. };
  2190. rcu_read_lock();
  2191. ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
  2192. rcu_read_unlock();
  2193. return ret;
  2194. }
  2195. static int tg_set_rt_bandwidth(struct task_group *tg,
  2196. u64 rt_period, u64 rt_runtime)
  2197. {
  2198. int i, err = 0;
  2199. /*
  2200. * Disallowing the root group RT runtime is BAD, it would disallow the
  2201. * kernel creating (and or operating) RT threads.
  2202. */
  2203. if (tg == &root_task_group && rt_runtime == 0)
  2204. return -EINVAL;
  2205. /* No period doesn't make any sense. */
  2206. if (rt_period == 0)
  2207. return -EINVAL;
  2208. /*
  2209. * Bound quota to defend quota against overflow during bandwidth shift.
  2210. */
  2211. if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
  2212. return -EINVAL;
  2213. mutex_lock(&rt_constraints_mutex);
  2214. err = __rt_schedulable(tg, rt_period, rt_runtime);
  2215. if (err)
  2216. goto unlock;
  2217. raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  2218. tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
  2219. tg->rt_bandwidth.rt_runtime = rt_runtime;
  2220. for_each_possible_cpu(i) {
  2221. struct rt_rq *rt_rq = tg->rt_rq[i];
  2222. raw_spin_lock(&rt_rq->rt_runtime_lock);
  2223. rt_rq->rt_runtime = rt_runtime;
  2224. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  2225. }
  2226. raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  2227. unlock:
  2228. mutex_unlock(&rt_constraints_mutex);
  2229. return err;
  2230. }
  2231. int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
  2232. {
  2233. u64 rt_runtime, rt_period;
  2234. rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  2235. rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
  2236. if (rt_runtime_us < 0)
  2237. rt_runtime = RUNTIME_INF;
  2238. else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
  2239. return -EINVAL;
  2240. return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
  2241. }
  2242. long sched_group_rt_runtime(struct task_group *tg)
  2243. {
  2244. u64 rt_runtime_us;
  2245. if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
  2246. return -1;
  2247. rt_runtime_us = tg->rt_bandwidth.rt_runtime;
  2248. do_div(rt_runtime_us, NSEC_PER_USEC);
  2249. return rt_runtime_us;
  2250. }
  2251. int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
  2252. {
  2253. u64 rt_runtime, rt_period;
  2254. if (rt_period_us > U64_MAX / NSEC_PER_USEC)
  2255. return -EINVAL;
  2256. rt_period = rt_period_us * NSEC_PER_USEC;
  2257. rt_runtime = tg->rt_bandwidth.rt_runtime;
  2258. return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
  2259. }
  2260. long sched_group_rt_period(struct task_group *tg)
  2261. {
  2262. u64 rt_period_us;
  2263. rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
  2264. do_div(rt_period_us, NSEC_PER_USEC);
  2265. return rt_period_us;
  2266. }
  2267. static int sched_rt_global_constraints(void)
  2268. {
  2269. int ret = 0;
  2270. mutex_lock(&rt_constraints_mutex);
  2271. ret = __rt_schedulable(NULL, 0, 0);
  2272. mutex_unlock(&rt_constraints_mutex);
  2273. return ret;
  2274. }
  2275. int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
  2276. {
  2277. /* Don't accept realtime tasks when there is no way for them to run */
  2278. if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
  2279. return 0;
  2280. return 1;
  2281. }
  2282. #else /* !CONFIG_RT_GROUP_SCHED */
  2283. static int sched_rt_global_constraints(void)
  2284. {
  2285. unsigned long flags;
  2286. int i;
  2287. raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
  2288. for_each_possible_cpu(i) {
  2289. struct rt_rq *rt_rq = &cpu_rq(i)->rt;
  2290. raw_spin_lock(&rt_rq->rt_runtime_lock);
  2291. rt_rq->rt_runtime = global_rt_runtime();
  2292. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  2293. }
  2294. raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
  2295. return 0;
  2296. }
  2297. #endif /* CONFIG_RT_GROUP_SCHED */
  2298. static int sched_rt_global_validate(void)
  2299. {
  2300. if (sysctl_sched_rt_period <= 0)
  2301. return -EINVAL;
  2302. if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
  2303. ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
  2304. ((u64)sysctl_sched_rt_runtime *
  2305. NSEC_PER_USEC > max_rt_runtime)))
  2306. return -EINVAL;
  2307. return 0;
  2308. }
  2309. static void sched_rt_do_global(void)
  2310. {
  2311. unsigned long flags;
  2312. raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
  2313. def_rt_bandwidth.rt_runtime = global_rt_runtime();
  2314. def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
  2315. raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
  2316. }
  2317. int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
  2318. size_t *lenp, loff_t *ppos)
  2319. {
  2320. int old_period, old_runtime;
  2321. static DEFINE_MUTEX(mutex);
  2322. int ret;
  2323. mutex_lock(&mutex);
  2324. old_period = sysctl_sched_rt_period;
  2325. old_runtime = sysctl_sched_rt_runtime;
  2326. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  2327. if (!ret && write) {
  2328. ret = sched_rt_global_validate();
  2329. if (ret)
  2330. goto undo;
  2331. ret = sched_dl_global_validate();
  2332. if (ret)
  2333. goto undo;
  2334. ret = sched_rt_global_constraints();
  2335. if (ret)
  2336. goto undo;
  2337. sched_rt_do_global();
  2338. sched_dl_do_global();
  2339. }
  2340. if (0) {
  2341. undo:
  2342. sysctl_sched_rt_period = old_period;
  2343. sysctl_sched_rt_runtime = old_runtime;
  2344. }
  2345. mutex_unlock(&mutex);
  2346. return ret;
  2347. }
  2348. int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
  2349. size_t *lenp, loff_t *ppos)
  2350. {
  2351. int ret;
  2352. static DEFINE_MUTEX(mutex);
  2353. mutex_lock(&mutex);
  2354. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  2355. /*
  2356. * Make sure that internally we keep jiffies.
  2357. * Also, writing zero resets the timeslice to default:
  2358. */
  2359. if (!ret && write) {
  2360. sched_rr_timeslice =
  2361. sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
  2362. msecs_to_jiffies(sysctl_sched_rr_timeslice);
  2363. }
  2364. mutex_unlock(&mutex);
  2365. return ret;
  2366. }
  2367. #ifdef CONFIG_SCHED_DEBUG
  2368. void print_rt_stats(struct seq_file *m, int cpu)
  2369. {
  2370. rt_rq_iter_t iter;
  2371. struct rt_rq *rt_rq;
  2372. rcu_read_lock();
  2373. for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
  2374. print_rt_rq(m, cpu, rt_rq);
  2375. rcu_read_unlock();
  2376. }
  2377. #endif /* CONFIG_SCHED_DEBUG */