watchdog_hld.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Detect hard lockups on a system
  4. *
  5. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  6. *
  7. * Note: Most of this code is borrowed heavily from the original softlockup
  8. * detector, so thanks to Ingo for the initial implementation.
  9. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  10. * to those contributors as well.
  11. */
  12. #define pr_fmt(fmt) "NMI watchdog: " fmt
  13. #include <linux/nmi.h>
  14. #include <linux/atomic.h>
  15. #include <linux/module.h>
  16. #include <linux/sched/debug.h>
  17. #include <asm/irq_regs.h>
  18. #include <linux/perf_event.h>
  19. static DEFINE_PER_CPU(bool, hard_watchdog_warn);
  20. static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
  21. static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
  22. static DEFINE_PER_CPU(struct perf_event *, dead_event);
  23. static struct cpumask dead_events_mask;
  24. static unsigned long hardlockup_allcpu_dumped;
  25. static atomic_t watchdog_cpus = ATOMIC_INIT(0);
  26. notrace void arch_touch_nmi_watchdog(void)
  27. {
  28. /*
  29. * Using __raw here because some code paths have
  30. * preemption enabled. If preemption is enabled
  31. * then interrupts should be enabled too, in which
  32. * case we shouldn't have to worry about the watchdog
  33. * going off.
  34. */
  35. raw_cpu_write(watchdog_nmi_touch, true);
  36. }
  37. EXPORT_SYMBOL(arch_touch_nmi_watchdog);
  38. #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
  39. static DEFINE_PER_CPU(ktime_t, last_timestamp);
  40. static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
  41. static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
  42. void watchdog_update_hrtimer_threshold(u64 period)
  43. {
  44. /*
  45. * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
  46. *
  47. * So it runs effectively with 2.5 times the rate of the NMI
  48. * watchdog. That means the hrtimer should fire 2-3 times before
  49. * the NMI watchdog expires. The NMI watchdog on x86 is based on
  50. * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
  51. * might run way faster than expected and the NMI fires in a
  52. * smaller period than the one deduced from the nominal CPU
  53. * frequency. Depending on the Turbo-Mode factor this might be fast
  54. * enough to get the NMI period smaller than the hrtimer watchdog
  55. * period and trigger false positives.
  56. *
  57. * The sample threshold is used to check in the NMI handler whether
  58. * the minimum time between two NMI samples has elapsed. That
  59. * prevents false positives.
  60. *
  61. * Set this to 4/5 of the actual watchdog threshold period so the
  62. * hrtimer is guaranteed to fire at least once within the real
  63. * watchdog threshold.
  64. */
  65. watchdog_hrtimer_sample_threshold = period * 2;
  66. }
  67. static bool watchdog_check_timestamp(void)
  68. {
  69. ktime_t delta, now = ktime_get_mono_fast_ns();
  70. delta = now - __this_cpu_read(last_timestamp);
  71. if (delta < watchdog_hrtimer_sample_threshold) {
  72. /*
  73. * If ktime is jiffies based, a stalled timer would prevent
  74. * jiffies from being incremented and the filter would look
  75. * at a stale timestamp and never trigger.
  76. */
  77. if (__this_cpu_inc_return(nmi_rearmed) < 10)
  78. return false;
  79. }
  80. __this_cpu_write(nmi_rearmed, 0);
  81. __this_cpu_write(last_timestamp, now);
  82. return true;
  83. }
  84. #else
  85. static inline bool watchdog_check_timestamp(void)
  86. {
  87. return true;
  88. }
  89. #endif
  90. static struct perf_event_attr wd_hw_attr = {
  91. .type = PERF_TYPE_HARDWARE,
  92. .config = PERF_COUNT_HW_CPU_CYCLES,
  93. .size = sizeof(struct perf_event_attr),
  94. .pinned = 1,
  95. .disabled = 1,
  96. };
  97. /* Callback function for perf event subsystem */
  98. static void watchdog_overflow_callback(struct perf_event *event,
  99. struct perf_sample_data *data,
  100. struct pt_regs *regs)
  101. {
  102. /* Ensure the watchdog never gets throttled */
  103. event->hw.interrupts = 0;
  104. if (__this_cpu_read(watchdog_nmi_touch) == true) {
  105. __this_cpu_write(watchdog_nmi_touch, false);
  106. return;
  107. }
  108. if (!watchdog_check_timestamp())
  109. return;
  110. /* check for a hardlockup
  111. * This is done by making sure our timer interrupt
  112. * is incrementing. The timer interrupt should have
  113. * fired multiple times before we overflow'd. If it hasn't
  114. * then this is a good indication the cpu is stuck
  115. */
  116. if (is_hardlockup()) {
  117. int this_cpu = smp_processor_id();
  118. /* only print hardlockups once */
  119. if (__this_cpu_read(hard_watchdog_warn) == true)
  120. return;
  121. pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
  122. this_cpu);
  123. print_modules();
  124. print_irqtrace_events(current);
  125. if (regs)
  126. show_regs(regs);
  127. else
  128. dump_stack();
  129. /*
  130. * Perform all-CPU dump only once to avoid multiple hardlockups
  131. * generating interleaving traces
  132. */
  133. if (sysctl_hardlockup_all_cpu_backtrace &&
  134. !test_and_set_bit(0, &hardlockup_allcpu_dumped))
  135. trigger_allbutself_cpu_backtrace();
  136. if (hardlockup_panic)
  137. nmi_panic(regs, "Hard LOCKUP");
  138. __this_cpu_write(hard_watchdog_warn, true);
  139. return;
  140. }
  141. __this_cpu_write(hard_watchdog_warn, false);
  142. return;
  143. }
  144. static int hardlockup_detector_event_create(void)
  145. {
  146. unsigned int cpu = smp_processor_id();
  147. struct perf_event_attr *wd_attr;
  148. struct perf_event *evt;
  149. wd_attr = &wd_hw_attr;
  150. wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
  151. /* Try to register using hardware perf events */
  152. evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
  153. watchdog_overflow_callback, NULL);
  154. if (IS_ERR(evt)) {
  155. pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
  156. PTR_ERR(evt));
  157. return PTR_ERR(evt);
  158. }
  159. this_cpu_write(watchdog_ev, evt);
  160. return 0;
  161. }
  162. /**
  163. * hardlockup_detector_perf_enable - Enable the local event
  164. */
  165. void hardlockup_detector_perf_enable(void)
  166. {
  167. if (hardlockup_detector_event_create())
  168. return;
  169. /* use original value for check */
  170. if (!atomic_fetch_inc(&watchdog_cpus))
  171. pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
  172. perf_event_enable(this_cpu_read(watchdog_ev));
  173. }
  174. /**
  175. * hardlockup_detector_perf_disable - Disable the local event
  176. */
  177. void hardlockup_detector_perf_disable(void)
  178. {
  179. struct perf_event *event = this_cpu_read(watchdog_ev);
  180. if (event) {
  181. perf_event_disable(event);
  182. this_cpu_write(watchdog_ev, NULL);
  183. this_cpu_write(dead_event, event);
  184. cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
  185. atomic_dec(&watchdog_cpus);
  186. }
  187. }
  188. /**
  189. * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
  190. *
  191. * Called from lockup_detector_cleanup(). Serialized by the caller.
  192. */
  193. void hardlockup_detector_perf_cleanup(void)
  194. {
  195. int cpu;
  196. for_each_cpu(cpu, &dead_events_mask) {
  197. struct perf_event *event = per_cpu(dead_event, cpu);
  198. /*
  199. * Required because for_each_cpu() reports unconditionally
  200. * CPU0 as set on UP kernels. Sigh.
  201. */
  202. if (event)
  203. perf_event_release_kernel(event);
  204. per_cpu(dead_event, cpu) = NULL;
  205. }
  206. cpumask_clear(&dead_events_mask);
  207. }
  208. /**
  209. * hardlockup_detector_perf_stop - Globally stop watchdog events
  210. *
  211. * Special interface for x86 to handle the perf HT bug.
  212. */
  213. void __init hardlockup_detector_perf_stop(void)
  214. {
  215. int cpu;
  216. lockdep_assert_cpus_held();
  217. for_each_online_cpu(cpu) {
  218. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  219. if (event)
  220. perf_event_disable(event);
  221. }
  222. }
  223. /**
  224. * hardlockup_detector_perf_restart - Globally restart watchdog events
  225. *
  226. * Special interface for x86 to handle the perf HT bug.
  227. */
  228. void __init hardlockup_detector_perf_restart(void)
  229. {
  230. int cpu;
  231. lockdep_assert_cpus_held();
  232. if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
  233. return;
  234. for_each_online_cpu(cpu) {
  235. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  236. if (event)
  237. perf_event_enable(event);
  238. }
  239. }
  240. /**
  241. * hardlockup_detector_perf_init - Probe whether NMI event is available at all
  242. */
  243. int __init hardlockup_detector_perf_init(void)
  244. {
  245. int ret = hardlockup_detector_event_create();
  246. if (ret) {
  247. pr_info("Perf NMI watchdog permanently disabled\n");
  248. } else {
  249. perf_event_release_kernel(this_cpu_read(watchdog_ev));
  250. this_cpu_write(watchdog_ev, NULL);
  251. }
  252. return ret;
  253. }