cpudeadline.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * kernel/sched/cpudl.c
  4. *
  5. * Global CPU deadline management
  6. *
  7. * Author: Juri Lelli <j.lelli@sssup.it>
  8. */
  9. #include "sched.h"
  10. static inline int parent(int i)
  11. {
  12. return (i - 1) >> 1;
  13. }
  14. static inline int left_child(int i)
  15. {
  16. return (i << 1) + 1;
  17. }
  18. static inline int right_child(int i)
  19. {
  20. return (i << 1) + 2;
  21. }
  22. static void cpudl_heapify_down(struct cpudl *cp, int idx)
  23. {
  24. int l, r, largest;
  25. int orig_cpu = cp->elements[idx].cpu;
  26. u64 orig_dl = cp->elements[idx].dl;
  27. if (left_child(idx) >= cp->size)
  28. return;
  29. /* adapted from lib/prio_heap.c */
  30. while (1) {
  31. u64 largest_dl;
  32. l = left_child(idx);
  33. r = right_child(idx);
  34. largest = idx;
  35. largest_dl = orig_dl;
  36. if ((l < cp->size) && dl_time_before(orig_dl,
  37. cp->elements[l].dl)) {
  38. largest = l;
  39. largest_dl = cp->elements[l].dl;
  40. }
  41. if ((r < cp->size) && dl_time_before(largest_dl,
  42. cp->elements[r].dl))
  43. largest = r;
  44. if (largest == idx)
  45. break;
  46. /* pull largest child onto idx */
  47. cp->elements[idx].cpu = cp->elements[largest].cpu;
  48. cp->elements[idx].dl = cp->elements[largest].dl;
  49. cp->elements[cp->elements[idx].cpu].idx = idx;
  50. idx = largest;
  51. }
  52. /* actual push down of saved original values orig_* */
  53. cp->elements[idx].cpu = orig_cpu;
  54. cp->elements[idx].dl = orig_dl;
  55. cp->elements[cp->elements[idx].cpu].idx = idx;
  56. }
  57. static void cpudl_heapify_up(struct cpudl *cp, int idx)
  58. {
  59. int p;
  60. int orig_cpu = cp->elements[idx].cpu;
  61. u64 orig_dl = cp->elements[idx].dl;
  62. if (idx == 0)
  63. return;
  64. do {
  65. p = parent(idx);
  66. if (dl_time_before(orig_dl, cp->elements[p].dl))
  67. break;
  68. /* pull parent onto idx */
  69. cp->elements[idx].cpu = cp->elements[p].cpu;
  70. cp->elements[idx].dl = cp->elements[p].dl;
  71. cp->elements[cp->elements[idx].cpu].idx = idx;
  72. idx = p;
  73. } while (idx != 0);
  74. /* actual push up of saved original values orig_* */
  75. cp->elements[idx].cpu = orig_cpu;
  76. cp->elements[idx].dl = orig_dl;
  77. cp->elements[cp->elements[idx].cpu].idx = idx;
  78. }
  79. static void cpudl_heapify(struct cpudl *cp, int idx)
  80. {
  81. if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
  82. cp->elements[idx].dl))
  83. cpudl_heapify_up(cp, idx);
  84. else
  85. cpudl_heapify_down(cp, idx);
  86. }
  87. static inline int cpudl_maximum(struct cpudl *cp)
  88. {
  89. return cp->elements[0].cpu;
  90. }
  91. /*
  92. * cpudl_find - find the best (later-dl) CPU in the system
  93. * @cp: the cpudl max-heap context
  94. * @p: the task
  95. * @later_mask: a mask to fill in with the selected CPUs (or NULL)
  96. *
  97. * Returns: int - CPUs were found
  98. */
  99. int cpudl_find(struct cpudl *cp, struct task_struct *p,
  100. struct cpumask *later_mask)
  101. {
  102. const struct sched_dl_entity *dl_se = &p->dl;
  103. if (later_mask &&
  104. cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
  105. unsigned long cap, max_cap = 0;
  106. int cpu, max_cpu = -1;
  107. if (!static_branch_unlikely(&sched_asym_cpucapacity))
  108. return 1;
  109. /* Ensure the capacity of the CPUs fits the task. */
  110. for_each_cpu(cpu, later_mask) {
  111. if (!dl_task_fits_capacity(p, cpu)) {
  112. cpumask_clear_cpu(cpu, later_mask);
  113. cap = capacity_orig_of(cpu);
  114. if (cap > max_cap ||
  115. (cpu == task_cpu(p) && cap == max_cap)) {
  116. max_cap = cap;
  117. max_cpu = cpu;
  118. }
  119. }
  120. }
  121. if (cpumask_empty(later_mask))
  122. cpumask_set_cpu(max_cpu, later_mask);
  123. return 1;
  124. } else {
  125. int best_cpu = cpudl_maximum(cp);
  126. WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
  127. if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
  128. dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
  129. if (later_mask)
  130. cpumask_set_cpu(best_cpu, later_mask);
  131. return 1;
  132. }
  133. }
  134. return 0;
  135. }
  136. /*
  137. * cpudl_clear - remove a CPU from the cpudl max-heap
  138. * @cp: the cpudl max-heap context
  139. * @cpu: the target CPU
  140. *
  141. * Notes: assumes cpu_rq(cpu)->lock is locked
  142. *
  143. * Returns: (void)
  144. */
  145. void cpudl_clear(struct cpudl *cp, int cpu)
  146. {
  147. int old_idx, new_cpu;
  148. unsigned long flags;
  149. WARN_ON(!cpu_present(cpu));
  150. raw_spin_lock_irqsave(&cp->lock, flags);
  151. old_idx = cp->elements[cpu].idx;
  152. if (old_idx == IDX_INVALID) {
  153. /*
  154. * Nothing to remove if old_idx was invalid.
  155. * This could happen if a rq_offline_dl is
  156. * called for a CPU without -dl tasks running.
  157. */
  158. } else {
  159. new_cpu = cp->elements[cp->size - 1].cpu;
  160. cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
  161. cp->elements[old_idx].cpu = new_cpu;
  162. cp->size--;
  163. cp->elements[new_cpu].idx = old_idx;
  164. cp->elements[cpu].idx = IDX_INVALID;
  165. cpudl_heapify(cp, old_idx);
  166. cpumask_set_cpu(cpu, cp->free_cpus);
  167. }
  168. raw_spin_unlock_irqrestore(&cp->lock, flags);
  169. }
  170. /*
  171. * cpudl_set - update the cpudl max-heap
  172. * @cp: the cpudl max-heap context
  173. * @cpu: the target CPU
  174. * @dl: the new earliest deadline for this CPU
  175. *
  176. * Notes: assumes cpu_rq(cpu)->lock is locked
  177. *
  178. * Returns: (void)
  179. */
  180. void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
  181. {
  182. int old_idx;
  183. unsigned long flags;
  184. WARN_ON(!cpu_present(cpu));
  185. raw_spin_lock_irqsave(&cp->lock, flags);
  186. old_idx = cp->elements[cpu].idx;
  187. if (old_idx == IDX_INVALID) {
  188. int new_idx = cp->size++;
  189. cp->elements[new_idx].dl = dl;
  190. cp->elements[new_idx].cpu = cpu;
  191. cp->elements[cpu].idx = new_idx;
  192. cpudl_heapify_up(cp, new_idx);
  193. cpumask_clear_cpu(cpu, cp->free_cpus);
  194. } else {
  195. cp->elements[old_idx].dl = dl;
  196. cpudl_heapify(cp, old_idx);
  197. }
  198. raw_spin_unlock_irqrestore(&cp->lock, flags);
  199. }
  200. /*
  201. * cpudl_set_freecpu - Set the cpudl.free_cpus
  202. * @cp: the cpudl max-heap context
  203. * @cpu: rd attached CPU
  204. */
  205. void cpudl_set_freecpu(struct cpudl *cp, int cpu)
  206. {
  207. cpumask_set_cpu(cpu, cp->free_cpus);
  208. }
  209. /*
  210. * cpudl_clear_freecpu - Clear the cpudl.free_cpus
  211. * @cp: the cpudl max-heap context
  212. * @cpu: rd attached CPU
  213. */
  214. void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
  215. {
  216. cpumask_clear_cpu(cpu, cp->free_cpus);
  217. }
  218. /*
  219. * cpudl_init - initialize the cpudl structure
  220. * @cp: the cpudl max-heap context
  221. */
  222. int cpudl_init(struct cpudl *cp)
  223. {
  224. int i;
  225. raw_spin_lock_init(&cp->lock);
  226. cp->size = 0;
  227. cp->elements = kcalloc(nr_cpu_ids,
  228. sizeof(struct cpudl_item),
  229. GFP_KERNEL);
  230. if (!cp->elements)
  231. return -ENOMEM;
  232. if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
  233. kfree(cp->elements);
  234. return -ENOMEM;
  235. }
  236. for_each_possible_cpu(i)
  237. cp->elements[i].idx = IDX_INVALID;
  238. return 0;
  239. }
  240. /*
  241. * cpudl_cleanup - clean up the cpudl structure
  242. * @cp: the cpudl max-heap context
  243. */
  244. void cpudl_cleanup(struct cpudl *cp)
  245. {
  246. free_cpumask_var(cp->free_cpus);
  247. kfree(cp->elements);
  248. }