softirq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/kernel/softirq.c
  4. *
  5. * Copyright (C) 1992 Linus Torvalds
  6. *
  7. * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/export.h>
  11. #include <linux/kernel_stat.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/notifier.h>
  16. #include <linux/percpu.h>
  17. #include <linux/cpu.h>
  18. #include <linux/freezer.h>
  19. #include <linux/kthread.h>
  20. #include <linux/rcupdate.h>
  21. #include <linux/ftrace.h>
  22. #include <linux/smp.h>
  23. #include <linux/smpboot.h>
  24. #include <linux/tick.h>
  25. #include <linux/irq.h>
  26. #define CREATE_TRACE_POINTS
  27. #include <trace/events/irq.h>
  28. EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
  29. EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit);
  30. /*
  31. - No shared variables, all the data are CPU local.
  32. - If a softirq needs serialization, let it serialize itself
  33. by its own spinlocks.
  34. - Even if softirq is serialized, only local cpu is marked for
  35. execution. Hence, we get something sort of weak cpu binding.
  36. Though it is still not clear, will it result in better locality
  37. or will not.
  38. Examples:
  39. - NET RX softirq. It is multithreaded and does not require
  40. any global serialization.
  41. - NET TX softirq. It kicks software netdevice queues, hence
  42. it is logically serialized per device, but this serialization
  43. is invisible to common code.
  44. - Tasklets: serialized wrt itself.
  45. */
  46. #ifndef __ARCH_IRQ_STAT
  47. DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
  48. EXPORT_PER_CPU_SYMBOL(irq_stat);
  49. #endif
  50. static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
  51. DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
  52. EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
  53. /*
  54. * active_softirqs -- per cpu, a mask of softirqs that are being handled,
  55. * with the expectation that approximate answers are acceptable and therefore
  56. * no synchronization.
  57. */
  58. DEFINE_PER_CPU(__u32, active_softirqs);
  59. const char * const softirq_to_name[NR_SOFTIRQS] = {
  60. "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
  61. "TASKLET", "SCHED", "HRTIMER", "RCU"
  62. };
  63. /*
  64. * we cannot loop indefinitely here to avoid userspace starvation,
  65. * but we also don't want to introduce a worst case 1/HZ latency
  66. * to the pending events, so lets the scheduler to balance
  67. * the softirq load for us.
  68. */
  69. static void wakeup_softirqd(void)
  70. {
  71. /* Interrupts are disabled: no need to stop preemption */
  72. struct task_struct *tsk = __this_cpu_read(ksoftirqd);
  73. if (tsk && tsk->state != TASK_RUNNING)
  74. wake_up_process(tsk);
  75. }
  76. /*
  77. * preempt_count and SOFTIRQ_OFFSET usage:
  78. * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
  79. * softirq processing.
  80. * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
  81. * on local_bh_disable or local_bh_enable.
  82. * This lets us distinguish between whether we are currently processing
  83. * softirq and whether we just have bh disabled.
  84. */
  85. /*
  86. * This one is for softirq.c-internal use,
  87. * where hardirqs are disabled legitimately:
  88. */
  89. #ifdef CONFIG_TRACE_IRQFLAGS
  90. DEFINE_PER_CPU(int, hardirqs_enabled);
  91. DEFINE_PER_CPU(int, hardirq_context);
  92. EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
  93. EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
  94. void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
  95. {
  96. unsigned long flags;
  97. WARN_ON_ONCE(in_irq());
  98. raw_local_irq_save(flags);
  99. /*
  100. * The preempt tracer hooks into preempt_count_add and will break
  101. * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
  102. * is set and before current->softirq_enabled is cleared.
  103. * We must manually increment preempt_count here and manually
  104. * call the trace_preempt_off later.
  105. */
  106. __preempt_count_add(cnt);
  107. /*
  108. * Were softirqs turned off above:
  109. */
  110. if (softirq_count() == (cnt & SOFTIRQ_MASK))
  111. lockdep_softirqs_off(ip);
  112. raw_local_irq_restore(flags);
  113. if (preempt_count() == cnt) {
  114. #ifdef CONFIG_DEBUG_PREEMPT
  115. current->preempt_disable_ip = get_lock_parent_ip();
  116. #endif
  117. trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
  118. }
  119. }
  120. EXPORT_SYMBOL(__local_bh_disable_ip);
  121. #endif /* CONFIG_TRACE_IRQFLAGS */
  122. static void __local_bh_enable(unsigned int cnt)
  123. {
  124. lockdep_assert_irqs_disabled();
  125. if (preempt_count() == cnt)
  126. trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
  127. if (softirq_count() == (cnt & SOFTIRQ_MASK))
  128. lockdep_softirqs_on(_RET_IP_);
  129. __preempt_count_sub(cnt);
  130. }
  131. /*
  132. * Special-case - softirqs can safely be enabled by __do_softirq(),
  133. * without processing still-pending softirqs:
  134. */
  135. void _local_bh_enable(void)
  136. {
  137. WARN_ON_ONCE(in_irq());
  138. __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
  139. }
  140. EXPORT_SYMBOL(_local_bh_enable);
  141. void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
  142. {
  143. WARN_ON_ONCE(in_irq());
  144. lockdep_assert_irqs_enabled();
  145. #ifdef CONFIG_TRACE_IRQFLAGS
  146. local_irq_disable();
  147. #endif
  148. /*
  149. * Are softirqs going to be turned on now:
  150. */
  151. if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
  152. lockdep_softirqs_on(ip);
  153. /*
  154. * Keep preemption disabled until we are done with
  155. * softirq processing:
  156. */
  157. preempt_count_sub(cnt - 1);
  158. if (unlikely(!in_interrupt() && local_softirq_pending())) {
  159. /*
  160. * Run softirq if any pending. And do it in its own stack
  161. * as we may be calling this deep in a task call stack already.
  162. */
  163. do_softirq();
  164. }
  165. preempt_count_dec();
  166. #ifdef CONFIG_TRACE_IRQFLAGS
  167. local_irq_enable();
  168. #endif
  169. preempt_check_resched();
  170. }
  171. EXPORT_SYMBOL(__local_bh_enable_ip);
  172. /*
  173. * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
  174. * but break the loop if need_resched() is set or after 2 ms.
  175. * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
  176. * certain cases, such as stop_machine(), jiffies may cease to
  177. * increment and so we need the MAX_SOFTIRQ_RESTART limit as
  178. * well to make sure we eventually return from this method.
  179. *
  180. * These limits have been established via experimentation.
  181. * The two things to balance is latency against fairness -
  182. * we want to handle softirqs as soon as possible, but they
  183. * should not be able to lock up the box.
  184. */
  185. #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
  186. #define MAX_SOFTIRQ_RESTART 10
  187. #ifdef CONFIG_TRACE_IRQFLAGS
  188. /*
  189. * When we run softirqs from irq_exit() and thus on the hardirq stack we need
  190. * to keep the lockdep irq context tracking as tight as possible in order to
  191. * not miss-qualify lock contexts and miss possible deadlocks.
  192. */
  193. static inline bool lockdep_softirq_start(void)
  194. {
  195. bool in_hardirq = false;
  196. if (lockdep_hardirq_context()) {
  197. in_hardirq = true;
  198. lockdep_hardirq_exit();
  199. }
  200. lockdep_softirq_enter();
  201. return in_hardirq;
  202. }
  203. static inline void lockdep_softirq_end(bool in_hardirq)
  204. {
  205. lockdep_softirq_exit();
  206. if (in_hardirq)
  207. lockdep_hardirq_enter();
  208. }
  209. #else
  210. static inline bool lockdep_softirq_start(void) { return false; }
  211. static inline void lockdep_softirq_end(bool in_hardirq) { }
  212. #endif
  213. #define softirq_deferred_for_rt(pending) \
  214. ({ \
  215. __u32 deferred = 0; \
  216. if (cpupri_check_rt()) { \
  217. deferred = pending & LONG_SOFTIRQ_MASK; \
  218. pending &= ~LONG_SOFTIRQ_MASK; \
  219. } \
  220. deferred; \
  221. })
  222. asmlinkage __visible void __softirq_entry __do_softirq(void)
  223. {
  224. unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
  225. unsigned long old_flags = current->flags;
  226. int max_restart = MAX_SOFTIRQ_RESTART;
  227. struct softirq_action *h;
  228. bool in_hardirq;
  229. __u32 deferred;
  230. __u32 pending;
  231. int softirq_bit;
  232. /*
  233. * Mask out PF_MEMALLOC as the current task context is borrowed for the
  234. * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
  235. * again if the socket is related to swapping.
  236. */
  237. current->flags &= ~PF_MEMALLOC;
  238. pending = local_softirq_pending();
  239. deferred = softirq_deferred_for_rt(pending);
  240. account_irq_enter_time(current);
  241. __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
  242. in_hardirq = lockdep_softirq_start();
  243. restart:
  244. /* Reset the pending bitmask before enabling irqs */
  245. set_softirq_pending(deferred);
  246. __this_cpu_write(active_softirqs, pending);
  247. local_irq_enable();
  248. h = softirq_vec;
  249. while ((softirq_bit = ffs(pending))) {
  250. unsigned int vec_nr;
  251. int prev_count;
  252. h += softirq_bit - 1;
  253. vec_nr = h - softirq_vec;
  254. prev_count = preempt_count();
  255. kstat_incr_softirqs_this_cpu(vec_nr);
  256. trace_softirq_entry(vec_nr);
  257. h->action(h);
  258. trace_softirq_exit(vec_nr);
  259. if (unlikely(prev_count != preempt_count())) {
  260. pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
  261. vec_nr, softirq_to_name[vec_nr], h->action,
  262. prev_count, preempt_count());
  263. preempt_count_set(prev_count);
  264. }
  265. h++;
  266. pending >>= softirq_bit;
  267. }
  268. __this_cpu_write(active_softirqs, 0);
  269. if (__this_cpu_read(ksoftirqd) == current)
  270. rcu_softirq_qs();
  271. local_irq_disable();
  272. pending = local_softirq_pending();
  273. deferred = softirq_deferred_for_rt(pending);
  274. if (pending) {
  275. if (time_before(jiffies, end) && !need_resched() &&
  276. --max_restart)
  277. goto restart;
  278. }
  279. #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
  280. if (pending | deferred)
  281. wakeup_softirqd();
  282. #endif
  283. lockdep_softirq_end(in_hardirq);
  284. account_irq_exit_time(current);
  285. __local_bh_enable(SOFTIRQ_OFFSET);
  286. WARN_ON_ONCE(in_interrupt());
  287. current_restore_flags(old_flags, PF_MEMALLOC);
  288. }
  289. asmlinkage __visible void do_softirq(void)
  290. {
  291. __u32 pending;
  292. unsigned long flags;
  293. if (in_interrupt())
  294. return;
  295. local_irq_save(flags);
  296. pending = local_softirq_pending();
  297. if (pending)
  298. do_softirq_own_stack();
  299. local_irq_restore(flags);
  300. }
  301. /**
  302. * irq_enter_rcu - Enter an interrupt context with RCU watching
  303. */
  304. void irq_enter_rcu(void)
  305. {
  306. if (is_idle_task(current) && !in_interrupt()) {
  307. /*
  308. * Prevent raise_softirq from needlessly waking up ksoftirqd
  309. * here, as softirq will be serviced on return from interrupt.
  310. */
  311. local_bh_disable();
  312. tick_irq_enter();
  313. _local_bh_enable();
  314. }
  315. __irq_enter();
  316. }
  317. /**
  318. * irq_enter - Enter an interrupt context including RCU update
  319. */
  320. void irq_enter(void)
  321. {
  322. rcu_irq_enter();
  323. irq_enter_rcu();
  324. }
  325. static inline void invoke_softirq(void)
  326. {
  327. if (!force_irqthreads) {
  328. #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
  329. /*
  330. * We can safely execute softirq on the current stack if
  331. * it is the irq stack, because it should be near empty
  332. * at this stage.
  333. */
  334. __do_softirq();
  335. #else
  336. /*
  337. * Otherwise, irq_exit() is called on the task stack that can
  338. * be potentially deep already. So call softirq in its own stack
  339. * to prevent from any overrun.
  340. */
  341. do_softirq_own_stack();
  342. #endif
  343. } else {
  344. wakeup_softirqd();
  345. }
  346. }
  347. static inline void tick_irq_exit(void)
  348. {
  349. #ifdef CONFIG_NO_HZ_COMMON
  350. int cpu = smp_processor_id();
  351. /* Make sure that timer wheel updates are propagated */
  352. if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
  353. if (!in_irq())
  354. tick_nohz_irq_exit();
  355. }
  356. #endif
  357. }
  358. static inline void __irq_exit_rcu(void)
  359. {
  360. #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
  361. local_irq_disable();
  362. #else
  363. lockdep_assert_irqs_disabled();
  364. #endif
  365. account_irq_exit_time(current);
  366. preempt_count_sub(HARDIRQ_OFFSET);
  367. if (!in_interrupt() && local_softirq_pending())
  368. invoke_softirq();
  369. tick_irq_exit();
  370. }
  371. /**
  372. * irq_exit_rcu() - Exit an interrupt context without updating RCU
  373. *
  374. * Also processes softirqs if needed and possible.
  375. */
  376. void irq_exit_rcu(void)
  377. {
  378. __irq_exit_rcu();
  379. /* must be last! */
  380. lockdep_hardirq_exit();
  381. }
  382. /**
  383. * irq_exit - Exit an interrupt context, update RCU and lockdep
  384. *
  385. * Also processes softirqs if needed and possible.
  386. */
  387. void irq_exit(void)
  388. {
  389. __irq_exit_rcu();
  390. rcu_irq_exit();
  391. /* must be last! */
  392. lockdep_hardirq_exit();
  393. }
  394. /*
  395. * This function must run with irqs disabled!
  396. */
  397. inline void raise_softirq_irqoff(unsigned int nr)
  398. {
  399. __raise_softirq_irqoff(nr);
  400. /*
  401. * If we're in an interrupt or softirq, we're done
  402. * (this also catches softirq-disabled code). We will
  403. * actually run the softirq once we return from
  404. * the irq or softirq.
  405. *
  406. * Otherwise we wake up ksoftirqd to make sure we
  407. * schedule the softirq soon.
  408. */
  409. if (!in_interrupt())
  410. wakeup_softirqd();
  411. }
  412. void raise_softirq(unsigned int nr)
  413. {
  414. unsigned long flags;
  415. local_irq_save(flags);
  416. raise_softirq_irqoff(nr);
  417. local_irq_restore(flags);
  418. }
  419. void __raise_softirq_irqoff(unsigned int nr)
  420. {
  421. lockdep_assert_irqs_disabled();
  422. trace_softirq_raise(nr);
  423. or_softirq_pending(1UL << nr);
  424. }
  425. void open_softirq(int nr, void (*action)(struct softirq_action *))
  426. {
  427. softirq_vec[nr].action = action;
  428. }
  429. /*
  430. * Tasklets
  431. */
  432. struct tasklet_head {
  433. struct tasklet_struct *head;
  434. struct tasklet_struct **tail;
  435. };
  436. static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
  437. static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
  438. static void __tasklet_schedule_common(struct tasklet_struct *t,
  439. struct tasklet_head __percpu *headp,
  440. unsigned int softirq_nr)
  441. {
  442. struct tasklet_head *head;
  443. unsigned long flags;
  444. local_irq_save(flags);
  445. head = this_cpu_ptr(headp);
  446. t->next = NULL;
  447. *head->tail = t;
  448. head->tail = &(t->next);
  449. raise_softirq_irqoff(softirq_nr);
  450. local_irq_restore(flags);
  451. }
  452. void __tasklet_schedule(struct tasklet_struct *t)
  453. {
  454. __tasklet_schedule_common(t, &tasklet_vec,
  455. TASKLET_SOFTIRQ);
  456. }
  457. EXPORT_SYMBOL(__tasklet_schedule);
  458. void __tasklet_hi_schedule(struct tasklet_struct *t)
  459. {
  460. __tasklet_schedule_common(t, &tasklet_hi_vec,
  461. HI_SOFTIRQ);
  462. }
  463. EXPORT_SYMBOL(__tasklet_hi_schedule);
  464. static void tasklet_action_common(struct softirq_action *a,
  465. struct tasklet_head *tl_head,
  466. unsigned int softirq_nr)
  467. {
  468. struct tasklet_struct *list;
  469. local_irq_disable();
  470. list = tl_head->head;
  471. tl_head->head = NULL;
  472. tl_head->tail = &tl_head->head;
  473. local_irq_enable();
  474. while (list) {
  475. struct tasklet_struct *t = list;
  476. list = list->next;
  477. if (tasklet_trylock(t)) {
  478. if (!atomic_read(&t->count)) {
  479. if (!test_and_clear_bit(TASKLET_STATE_SCHED,
  480. &t->state))
  481. BUG();
  482. if (t->use_callback) {
  483. trace_tasklet_entry(t->callback);
  484. t->callback(t);
  485. trace_tasklet_exit(t->callback);
  486. } else {
  487. trace_tasklet_entry(t->func);
  488. t->func(t->data);
  489. trace_tasklet_exit(t->func);
  490. }
  491. tasklet_unlock(t);
  492. continue;
  493. }
  494. tasklet_unlock(t);
  495. }
  496. local_irq_disable();
  497. t->next = NULL;
  498. *tl_head->tail = t;
  499. tl_head->tail = &t->next;
  500. __raise_softirq_irqoff(softirq_nr);
  501. local_irq_enable();
  502. }
  503. }
  504. static __latent_entropy void tasklet_action(struct softirq_action *a)
  505. {
  506. tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
  507. }
  508. static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
  509. {
  510. tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
  511. }
  512. void tasklet_setup(struct tasklet_struct *t,
  513. void (*callback)(struct tasklet_struct *))
  514. {
  515. t->next = NULL;
  516. t->state = 0;
  517. atomic_set(&t->count, 0);
  518. t->callback = callback;
  519. t->use_callback = true;
  520. t->data = 0;
  521. }
  522. EXPORT_SYMBOL(tasklet_setup);
  523. void tasklet_init(struct tasklet_struct *t,
  524. void (*func)(unsigned long), unsigned long data)
  525. {
  526. t->next = NULL;
  527. t->state = 0;
  528. atomic_set(&t->count, 0);
  529. t->func = func;
  530. t->use_callback = false;
  531. t->data = data;
  532. }
  533. EXPORT_SYMBOL(tasklet_init);
  534. void tasklet_kill(struct tasklet_struct *t)
  535. {
  536. if (in_interrupt())
  537. pr_notice("Attempt to kill tasklet from interrupt\n");
  538. while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
  539. do {
  540. yield();
  541. } while (test_bit(TASKLET_STATE_SCHED, &t->state));
  542. }
  543. tasklet_unlock_wait(t);
  544. clear_bit(TASKLET_STATE_SCHED, &t->state);
  545. }
  546. EXPORT_SYMBOL(tasklet_kill);
  547. void __init softirq_init(void)
  548. {
  549. int cpu;
  550. for_each_possible_cpu(cpu) {
  551. per_cpu(tasklet_vec, cpu).tail =
  552. &per_cpu(tasklet_vec, cpu).head;
  553. per_cpu(tasklet_hi_vec, cpu).tail =
  554. &per_cpu(tasklet_hi_vec, cpu).head;
  555. }
  556. open_softirq(TASKLET_SOFTIRQ, tasklet_action);
  557. open_softirq(HI_SOFTIRQ, tasklet_hi_action);
  558. }
  559. static int ksoftirqd_should_run(unsigned int cpu)
  560. {
  561. return local_softirq_pending();
  562. }
  563. static void run_ksoftirqd(unsigned int cpu)
  564. {
  565. local_irq_disable();
  566. if (local_softirq_pending()) {
  567. /*
  568. * We can safely run softirq on inline stack, as we are not deep
  569. * in the task stack here.
  570. */
  571. __do_softirq();
  572. local_irq_enable();
  573. cond_resched();
  574. return;
  575. }
  576. local_irq_enable();
  577. }
  578. #ifdef CONFIG_HOTPLUG_CPU
  579. /*
  580. * tasklet_kill_immediate is called to remove a tasklet which can already be
  581. * scheduled for execution on @cpu.
  582. *
  583. * Unlike tasklet_kill, this function removes the tasklet
  584. * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
  585. *
  586. * When this function is called, @cpu must be in the CPU_DEAD state.
  587. */
  588. void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
  589. {
  590. struct tasklet_struct **i;
  591. BUG_ON(cpu_online(cpu));
  592. BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
  593. if (!test_bit(TASKLET_STATE_SCHED, &t->state))
  594. return;
  595. /* CPU is dead, so no lock needed. */
  596. for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
  597. if (*i == t) {
  598. *i = t->next;
  599. /* If this was the tail element, move the tail ptr */
  600. if (*i == NULL)
  601. per_cpu(tasklet_vec, cpu).tail = i;
  602. return;
  603. }
  604. }
  605. BUG();
  606. }
  607. static int takeover_tasklets(unsigned int cpu)
  608. {
  609. /* CPU is dead, so no lock needed. */
  610. local_irq_disable();
  611. /* Find end, append list for that CPU. */
  612. if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
  613. *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
  614. __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
  615. per_cpu(tasklet_vec, cpu).head = NULL;
  616. per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
  617. }
  618. raise_softirq_irqoff(TASKLET_SOFTIRQ);
  619. if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
  620. *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
  621. __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
  622. per_cpu(tasklet_hi_vec, cpu).head = NULL;
  623. per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
  624. }
  625. raise_softirq_irqoff(HI_SOFTIRQ);
  626. local_irq_enable();
  627. return 0;
  628. }
  629. #else
  630. #define takeover_tasklets NULL
  631. #endif /* CONFIG_HOTPLUG_CPU */
  632. static struct smp_hotplug_thread softirq_threads = {
  633. .store = &ksoftirqd,
  634. .thread_should_run = ksoftirqd_should_run,
  635. .thread_fn = run_ksoftirqd,
  636. .thread_comm = "ksoftirqd/%u",
  637. };
  638. static __init int spawn_ksoftirqd(void)
  639. {
  640. cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
  641. takeover_tasklets);
  642. BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
  643. return 0;
  644. }
  645. early_initcall(spawn_ksoftirqd);
  646. /*
  647. * [ These __weak aliases are kept in a separate compilation unit, so that
  648. * GCC does not inline them incorrectly. ]
  649. */
  650. int __init __weak early_irq_init(void)
  651. {
  652. return 0;
  653. }
  654. int __init __weak arch_probe_nr_irqs(void)
  655. {
  656. return NR_IRQS_LEGACY;
  657. }
  658. int __init __weak arch_early_irq_init(void)
  659. {
  660. return 0;
  661. }
  662. unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
  663. {
  664. return from;
  665. }