pelt.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. #ifdef CONFIG_SMP
  2. #include "sched-pelt.h"
  3. int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
  4. int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
  5. int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
  6. int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
  7. int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
  8. #ifdef CONFIG_SCHED_THERMAL_PRESSURE
  9. int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
  10. static inline u64 thermal_load_avg(struct rq *rq)
  11. {
  12. return READ_ONCE(rq->avg_thermal.load_avg);
  13. }
  14. #else
  15. static inline int
  16. update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
  17. {
  18. return 0;
  19. }
  20. static inline u64 thermal_load_avg(struct rq *rq)
  21. {
  22. return 0;
  23. }
  24. #endif
  25. #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
  26. int update_irq_load_avg(struct rq *rq, u64 running);
  27. #else
  28. static inline int
  29. update_irq_load_avg(struct rq *rq, u64 running)
  30. {
  31. return 0;
  32. }
  33. #endif
  34. #define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
  35. static inline u32 get_pelt_divider(struct sched_avg *avg)
  36. {
  37. return PELT_MIN_DIVIDER + avg->period_contrib;
  38. }
  39. static inline void cfs_se_util_change(struct sched_avg *avg)
  40. {
  41. unsigned int enqueued;
  42. if (!sched_feat(UTIL_EST))
  43. return;
  44. /* Avoid store if the flag has been already reset */
  45. enqueued = avg->util_est.enqueued;
  46. if (!(enqueued & UTIL_AVG_UNCHANGED))
  47. return;
  48. /* Reset flag to report util_avg has been updated */
  49. enqueued &= ~UTIL_AVG_UNCHANGED;
  50. WRITE_ONCE(avg->util_est.enqueued, enqueued);
  51. }
  52. /*
  53. * The clock_pelt scales the time to reflect the effective amount of
  54. * computation done during the running delta time but then sync back to
  55. * clock_task when rq is idle.
  56. *
  57. *
  58. * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
  59. * @ max capacity ------******---------------******---------------
  60. * @ half capacity ------************---------************---------
  61. * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16
  62. *
  63. */
  64. static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
  65. {
  66. if (unlikely(is_idle_task(rq->curr))) {
  67. /* The rq is idle, we can sync to clock_task */
  68. rq->clock_pelt = rq_clock_task(rq);
  69. return;
  70. }
  71. /*
  72. * When a rq runs at a lower compute capacity, it will need
  73. * more time to do the same amount of work than at max
  74. * capacity. In order to be invariant, we scale the delta to
  75. * reflect how much work has been really done.
  76. * Running longer results in stealing idle time that will
  77. * disturb the load signal compared to max capacity. This
  78. * stolen idle time will be automatically reflected when the
  79. * rq will be idle and the clock will be synced with
  80. * rq_clock_task.
  81. */
  82. /*
  83. * Scale the elapsed time to reflect the real amount of
  84. * computation
  85. */
  86. delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
  87. delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
  88. rq->clock_pelt += delta;
  89. }
  90. /*
  91. * When rq becomes idle, we have to check if it has lost idle time
  92. * because it was fully busy. A rq is fully used when the /Sum util_sum
  93. * is greater or equal to:
  94. * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
  95. * For optimization and computing rounding purpose, we don't take into account
  96. * the position in the current window (period_contrib) and we use the higher
  97. * bound of util_sum to decide.
  98. */
  99. static inline void update_idle_rq_clock_pelt(struct rq *rq)
  100. {
  101. u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
  102. u32 util_sum = rq->cfs.avg.util_sum;
  103. util_sum += rq->avg_rt.util_sum;
  104. util_sum += rq->avg_dl.util_sum;
  105. /*
  106. * Reflecting stolen time makes sense only if the idle
  107. * phase would be present at max capacity. As soon as the
  108. * utilization of a rq has reached the maximum value, it is
  109. * considered as an always runnig rq without idle time to
  110. * steal. This potential idle time is considered as lost in
  111. * this case. We keep track of this lost idle time compare to
  112. * rq's clock_task.
  113. */
  114. if (util_sum >= divider)
  115. rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
  116. }
  117. static inline u64 rq_clock_pelt(struct rq *rq)
  118. {
  119. lockdep_assert_held(&rq->lock);
  120. assert_clock_updated(rq);
  121. return rq->clock_pelt - rq->lost_idle_time;
  122. }
  123. #ifdef CONFIG_CFS_BANDWIDTH
  124. /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
  125. static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
  126. {
  127. if (unlikely(cfs_rq->throttle_count))
  128. return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
  129. return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
  130. }
  131. #else
  132. static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
  133. {
  134. return rq_clock_pelt(rq_of(cfs_rq));
  135. }
  136. #endif
  137. #else
  138. static inline int
  139. update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  140. {
  141. return 0;
  142. }
  143. static inline int
  144. update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
  145. {
  146. return 0;
  147. }
  148. static inline int
  149. update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
  150. {
  151. return 0;
  152. }
  153. static inline int
  154. update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
  155. {
  156. return 0;
  157. }
  158. static inline u64 thermal_load_avg(struct rq *rq)
  159. {
  160. return 0;
  161. }
  162. static inline int
  163. update_irq_load_avg(struct rq *rq, u64 running)
  164. {
  165. return 0;
  166. }
  167. static inline u64 rq_clock_pelt(struct rq *rq)
  168. {
  169. return rq_clock_task(rq);
  170. }
  171. static inline void
  172. update_rq_clock_pelt(struct rq *rq, s64 delta) { }
  173. static inline void
  174. update_idle_rq_clock_pelt(struct rq *rq) { }
  175. #endif