cpuhotplug.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Generic cpu hotunplug interrupt migration code copied from the
  4. * arch/arm implementation
  5. *
  6. * Copyright (C) Russell King
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/interrupt.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/irq.h>
  15. #include <linux/sched/isolation.h>
  16. #include "internals.h"
  17. /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
  18. static inline bool irq_needs_fixup(struct irq_data *d)
  19. {
  20. const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
  21. unsigned int cpu = smp_processor_id();
  22. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  23. /*
  24. * The cpumask_empty() check is a workaround for interrupt chips,
  25. * which do not implement effective affinity, but the architecture has
  26. * enabled the config switch. Use the general affinity mask instead.
  27. */
  28. if (cpumask_empty(m))
  29. m = irq_data_get_affinity_mask(d);
  30. /*
  31. * Sanity check. If the mask is not empty when excluding the outgoing
  32. * CPU then it must contain at least one online CPU. The outgoing CPU
  33. * has been removed from the online mask already.
  34. */
  35. if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
  36. cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
  37. /*
  38. * If this happens then there was a missed IRQ fixup at some
  39. * point. Warn about it and enforce fixup.
  40. */
  41. pr_debug("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
  42. cpumask_pr_args(m), d->irq, cpu);
  43. return true;
  44. }
  45. #endif
  46. return cpumask_test_cpu(cpu, m);
  47. }
  48. static bool migrate_one_irq(struct irq_desc *desc)
  49. {
  50. struct irq_data *d = irq_desc_get_irq_data(desc);
  51. struct irq_chip *chip = irq_data_get_irq_chip(d);
  52. bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
  53. const struct cpumask *affinity;
  54. bool brokeaff = false;
  55. int err;
  56. /*
  57. * IRQ chip might be already torn down, but the irq descriptor is
  58. * still in the radix tree. Also if the chip has no affinity setter,
  59. * nothing can be done here.
  60. */
  61. if (!chip || !chip->irq_set_affinity) {
  62. pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
  63. return false;
  64. }
  65. /*
  66. * No move required, if:
  67. * - Interrupt is per cpu
  68. * - Interrupt is not started
  69. * - Affinity mask does not include this CPU.
  70. *
  71. * Note: Do not check desc->action as this might be a chained
  72. * interrupt.
  73. */
  74. if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
  75. /*
  76. * If an irq move is pending, abort it if the dying CPU is
  77. * the sole target.
  78. */
  79. irq_fixup_move_pending(desc, false);
  80. return false;
  81. }
  82. /*
  83. * Complete an eventually pending irq move cleanup. If this
  84. * interrupt was moved in hard irq context, then the vectors need
  85. * to be cleaned up. It can't wait until this interrupt actually
  86. * happens and this CPU was involved.
  87. */
  88. irq_force_complete_move(desc);
  89. /*
  90. * If there is a setaffinity pending, then try to reuse the pending
  91. * mask, so the last change of the affinity does not get lost. If
  92. * there is no move pending or the pending mask does not contain
  93. * any online CPU, use the current affinity mask.
  94. */
  95. if (irq_fixup_move_pending(desc, true))
  96. affinity = irq_desc_get_pending_mask(desc);
  97. else
  98. affinity = irq_data_get_affinity_mask(d);
  99. /* Mask the chip for interrupts which cannot move in process context */
  100. if (maskchip && chip->irq_mask)
  101. chip->irq_mask(d);
  102. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  103. /*
  104. * If the interrupt is managed, then shut it down and leave
  105. * the affinity untouched.
  106. */
  107. if (irqd_affinity_is_managed(d)) {
  108. irqd_set_managed_shutdown(d);
  109. irq_shutdown_and_deactivate(desc);
  110. return false;
  111. }
  112. affinity = cpu_online_mask;
  113. brokeaff = true;
  114. }
  115. /*
  116. * Do not set the force argument of irq_do_set_affinity() as this
  117. * disables the masking of offline CPUs from the supplied affinity
  118. * mask and therefore might keep/reassign the irq to the outgoing
  119. * CPU.
  120. */
  121. err = irq_do_set_affinity(d, affinity, false);
  122. if (err) {
  123. pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
  124. d->irq, err);
  125. brokeaff = false;
  126. }
  127. if (maskchip && chip->irq_unmask)
  128. chip->irq_unmask(d);
  129. return brokeaff;
  130. }
  131. /**
  132. * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
  133. *
  134. * The current CPU has been marked offline. Migrate IRQs off this CPU.
  135. * If the affinity settings do not allow other CPUs, force them onto any
  136. * available CPU.
  137. *
  138. * Note: we must iterate over all IRQs, whether they have an attached
  139. * action structure or not, as we need to get chained interrupts too.
  140. */
  141. void irq_migrate_all_off_this_cpu(void)
  142. {
  143. struct irq_desc *desc;
  144. unsigned int irq;
  145. for_each_active_irq(irq) {
  146. bool affinity_broken;
  147. desc = irq_to_desc(irq);
  148. raw_spin_lock(&desc->lock);
  149. affinity_broken = migrate_one_irq(desc);
  150. raw_spin_unlock(&desc->lock);
  151. if (affinity_broken) {
  152. pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
  153. irq, smp_processor_id());
  154. }
  155. }
  156. }
  157. static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
  158. {
  159. const struct cpumask *hk_mask;
  160. if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
  161. return false;
  162. hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
  163. if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
  164. return false;
  165. return cpumask_test_cpu(cpu, hk_mask);
  166. }
  167. static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
  168. {
  169. struct irq_data *data = irq_desc_get_irq_data(desc);
  170. const struct cpumask *affinity = irq_data_get_affinity_mask(data);
  171. if (!irqd_affinity_is_managed(data) || !desc->action ||
  172. !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
  173. return;
  174. if (irqd_is_managed_and_shutdown(data)) {
  175. irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
  176. return;
  177. }
  178. /*
  179. * If the interrupt can only be directed to a single target
  180. * CPU then it is already assigned to a CPU in the affinity
  181. * mask. No point in trying to move it around unless the
  182. * isolation mechanism requests to move it to an upcoming
  183. * housekeeping CPU.
  184. */
  185. if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
  186. irq_set_affinity_locked(data, affinity, false);
  187. }
  188. /**
  189. * irq_affinity_online_cpu - Restore affinity for managed interrupts
  190. * @cpu: Upcoming CPU for which interrupts should be restored
  191. */
  192. int irq_affinity_online_cpu(unsigned int cpu)
  193. {
  194. struct irq_desc *desc;
  195. unsigned int irq;
  196. irq_lock_sparse();
  197. for_each_active_irq(irq) {
  198. desc = irq_to_desc(irq);
  199. raw_spin_lock_irq(&desc->lock);
  200. irq_restore_affinity_of_irq(desc, cpu);
  201. raw_spin_unlock_irq(&desc->lock);
  202. }
  203. irq_unlock_sparse();
  204. return 0;
  205. }