idle.h 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCHED_IDLE_H
  3. #define _LINUX_SCHED_IDLE_H
  4. #include <linux/sched.h>
  5. enum cpu_idle_type {
  6. CPU_IDLE,
  7. CPU_NOT_IDLE,
  8. CPU_NEWLY_IDLE,
  9. CPU_MAX_IDLE_TYPES
  10. };
  11. extern void wake_up_if_idle(int cpu);
  12. /*
  13. * Idle thread specific functions to determine the need_resched
  14. * polling state.
  15. */
  16. #ifdef TIF_POLLING_NRFLAG
  17. static inline void __current_set_polling(void)
  18. {
  19. set_thread_flag(TIF_POLLING_NRFLAG);
  20. }
  21. static inline bool __must_check current_set_polling_and_test(void)
  22. {
  23. __current_set_polling();
  24. /*
  25. * Polling state must be visible before we test NEED_RESCHED,
  26. * paired by resched_curr()
  27. */
  28. smp_mb__after_atomic();
  29. return unlikely(tif_need_resched());
  30. }
  31. static inline void __current_clr_polling(void)
  32. {
  33. clear_thread_flag(TIF_POLLING_NRFLAG);
  34. }
  35. static inline bool __must_check current_clr_polling_and_test(void)
  36. {
  37. __current_clr_polling();
  38. /*
  39. * Polling state must be visible before we test NEED_RESCHED,
  40. * paired by resched_curr()
  41. */
  42. smp_mb__after_atomic();
  43. return unlikely(tif_need_resched());
  44. }
  45. #else
  46. static inline void __current_set_polling(void) { }
  47. static inline void __current_clr_polling(void) { }
  48. static inline bool __must_check current_set_polling_and_test(void)
  49. {
  50. return unlikely(tif_need_resched());
  51. }
  52. static inline bool __must_check current_clr_polling_and_test(void)
  53. {
  54. return unlikely(tif_need_resched());
  55. }
  56. #endif
  57. static inline void current_clr_polling(void)
  58. {
  59. __current_clr_polling();
  60. /*
  61. * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
  62. * Once the bit is cleared, we'll get IPIs with every new
  63. * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
  64. * fold.
  65. */
  66. smp_mb(); /* paired with resched_curr() */
  67. preempt_fold_need_resched();
  68. }
  69. #endif /* _LINUX_SCHED_IDLE_H */