clock.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCHED_CLOCK_H
  3. #define _LINUX_SCHED_CLOCK_H
  4. #include <linux/smp.h>
  5. /*
  6. * Do not use outside of architecture code which knows its limitations.
  7. *
  8. * sched_clock() has no promise of monotonicity or bounded drift between
  9. * CPUs, use (which you should not) requires disabling IRQs.
  10. *
  11. * Please use one of the three interfaces below.
  12. */
  13. extern unsigned long long notrace sched_clock(void);
  14. /*
  15. * See the comment in kernel/sched/clock.c
  16. */
  17. extern u64 running_clock(void);
  18. extern u64 sched_clock_cpu(int cpu);
  19. extern void sched_clock_init(void);
  20. #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  21. static inline void sched_clock_tick(void)
  22. {
  23. }
  24. static inline void clear_sched_clock_stable(void)
  25. {
  26. }
  27. static inline void sched_clock_idle_sleep_event(void)
  28. {
  29. }
  30. static inline void sched_clock_idle_wakeup_event(void)
  31. {
  32. }
  33. static inline u64 cpu_clock(int cpu)
  34. {
  35. return sched_clock();
  36. }
  37. static inline u64 local_clock(void)
  38. {
  39. return sched_clock();
  40. }
  41. #else
  42. extern int sched_clock_stable(void);
  43. extern void clear_sched_clock_stable(void);
  44. /*
  45. * When sched_clock_stable(), __sched_clock_offset provides the offset
  46. * between local_clock() and sched_clock().
  47. */
  48. extern u64 __sched_clock_offset;
  49. extern void sched_clock_tick(void);
  50. extern void sched_clock_tick_stable(void);
  51. extern void sched_clock_idle_sleep_event(void);
  52. extern void sched_clock_idle_wakeup_event(void);
  53. /*
  54. * As outlined in clock.c, provides a fast, high resolution, nanosecond
  55. * time source that is monotonic per cpu argument and has bounded drift
  56. * between cpus.
  57. *
  58. * ######################### BIG FAT WARNING ##########################
  59. * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
  60. * # go backwards !! #
  61. * ####################################################################
  62. */
  63. static inline u64 cpu_clock(int cpu)
  64. {
  65. return sched_clock_cpu(cpu);
  66. }
  67. static inline u64 local_clock(void)
  68. {
  69. return sched_clock_cpu(raw_smp_processor_id());
  70. }
  71. #endif
  72. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  73. /*
  74. * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
  75. * The reason for this explicit opt-in is not to have perf penalty with
  76. * slow sched_clocks.
  77. */
  78. extern void enable_sched_clock_irqtime(void);
  79. extern void disable_sched_clock_irqtime(void);
  80. #else
  81. static inline void enable_sched_clock_irqtime(void) {}
  82. static inline void disable_sched_clock_irqtime(void) {}
  83. #endif
  84. #endif /* _LINUX_SCHED_CLOCK_H */