irq_work.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  4. *
  5. * Provides a framework for enqueueing and running callbacks from hardirq
  6. * context. The enqueueing is NMI-safe.
  7. */
  8. #include <linux/bug.h>
  9. #include <linux/kernel.h>
  10. #include <linux/export.h>
  11. #include <linux/irq_work.h>
  12. #include <linux/percpu.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/irqflags.h>
  15. #include <linux/sched.h>
  16. #include <linux/tick.h>
  17. #include <linux/cpu.h>
  18. #include <linux/notifier.h>
  19. #include <linux/smp.h>
  20. #include <asm/processor.h>
  21. static DEFINE_PER_CPU(struct llist_head, raised_list);
  22. static DEFINE_PER_CPU(struct llist_head, lazy_list);
  23. /*
  24. * Claim the entry so that no one else will poke at it.
  25. */
  26. static bool irq_work_claim(struct irq_work *work)
  27. {
  28. int oflags;
  29. oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
  30. /*
  31. * If the work is already pending, no need to raise the IPI.
  32. * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
  33. * everything we did before is visible.
  34. */
  35. if (oflags & IRQ_WORK_PENDING)
  36. return false;
  37. return true;
  38. }
  39. void __weak arch_irq_work_raise(void)
  40. {
  41. /*
  42. * Lame architectures will get the timer tick callback
  43. */
  44. }
  45. /* Enqueue on current CPU, work must already be claimed and preempt disabled */
  46. static void __irq_work_queue_local(struct irq_work *work)
  47. {
  48. /* If the work is "lazy", handle it from next tick if any */
  49. if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
  50. if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
  51. tick_nohz_tick_stopped())
  52. arch_irq_work_raise();
  53. } else {
  54. if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
  55. arch_irq_work_raise();
  56. }
  57. }
  58. /* Enqueue the irq work @work on the current CPU */
  59. bool irq_work_queue(struct irq_work *work)
  60. {
  61. /* Only queue if not already pending */
  62. if (!irq_work_claim(work))
  63. return false;
  64. /* Queue the entry and raise the IPI if needed. */
  65. preempt_disable();
  66. __irq_work_queue_local(work);
  67. preempt_enable();
  68. return true;
  69. }
  70. EXPORT_SYMBOL_GPL(irq_work_queue);
  71. /*
  72. * Enqueue the irq_work @work on @cpu unless it's already pending
  73. * somewhere.
  74. *
  75. * Can be re-enqueued while the callback is still in progress.
  76. */
  77. bool irq_work_queue_on(struct irq_work *work, int cpu)
  78. {
  79. #ifndef CONFIG_SMP
  80. return irq_work_queue(work);
  81. #else /* CONFIG_SMP: */
  82. /* All work should have been flushed before going offline */
  83. WARN_ON_ONCE(cpu_is_offline(cpu));
  84. /* Only queue if not already pending */
  85. if (!irq_work_claim(work))
  86. return false;
  87. preempt_disable();
  88. if (cpu != smp_processor_id()) {
  89. /* Arch remote IPI send/receive backend aren't NMI safe */
  90. WARN_ON_ONCE(in_nmi());
  91. __smp_call_single_queue(cpu, &work->llnode);
  92. } else {
  93. __irq_work_queue_local(work);
  94. }
  95. preempt_enable();
  96. return true;
  97. #endif /* CONFIG_SMP */
  98. }
  99. EXPORT_SYMBOL_GPL(irq_work_queue_on);
  100. bool irq_work_needs_cpu(void)
  101. {
  102. struct llist_head *raised, *lazy;
  103. raised = this_cpu_ptr(&raised_list);
  104. lazy = this_cpu_ptr(&lazy_list);
  105. if (llist_empty(raised) || arch_irq_work_has_interrupt())
  106. if (llist_empty(lazy))
  107. return false;
  108. /* All work should have been flushed before going offline */
  109. WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
  110. return true;
  111. }
  112. void irq_work_single(void *arg)
  113. {
  114. struct irq_work *work = arg;
  115. int flags;
  116. /*
  117. * Clear the PENDING bit, after this point the @work
  118. * can be re-used.
  119. * Make it immediately visible so that other CPUs trying
  120. * to claim that work don't rely on us to handle their data
  121. * while we are in the middle of the func.
  122. */
  123. flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
  124. lockdep_irq_work_enter(work);
  125. work->func(work);
  126. lockdep_irq_work_exit(work);
  127. /*
  128. * Clear the BUSY bit and return to the free state if
  129. * no-one else claimed it meanwhile.
  130. */
  131. flags &= ~IRQ_WORK_PENDING;
  132. (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
  133. }
  134. static void irq_work_run_list(struct llist_head *list)
  135. {
  136. struct irq_work *work, *tmp;
  137. struct llist_node *llnode;
  138. BUG_ON(!irqs_disabled());
  139. if (llist_empty(list))
  140. return;
  141. llnode = llist_del_all(list);
  142. llist_for_each_entry_safe(work, tmp, llnode, llnode)
  143. irq_work_single(work);
  144. }
  145. /*
  146. * hotplug calls this through:
  147. * hotplug_cfd() -> flush_smp_call_function_queue()
  148. */
  149. void irq_work_run(void)
  150. {
  151. irq_work_run_list(this_cpu_ptr(&raised_list));
  152. irq_work_run_list(this_cpu_ptr(&lazy_list));
  153. }
  154. EXPORT_SYMBOL_GPL(irq_work_run);
  155. void irq_work_tick(void)
  156. {
  157. struct llist_head *raised = this_cpu_ptr(&raised_list);
  158. if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
  159. irq_work_run_list(raised);
  160. irq_work_run_list(this_cpu_ptr(&lazy_list));
  161. }
  162. /*
  163. * Synchronize against the irq_work @entry, ensures the entry is not
  164. * currently in use.
  165. */
  166. void irq_work_sync(struct irq_work *work)
  167. {
  168. lockdep_assert_irqs_enabled();
  169. while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
  170. cpu_relax();
  171. }
  172. EXPORT_SYMBOL_GPL(irq_work_sync);