watchdog.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Watchdog support on powerpc systems.
  4. *
  5. * Copyright 2017, IBM Corporation.
  6. *
  7. * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
  8. */
  9. #define pr_fmt(fmt) "watchdog: " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/param.h>
  12. #include <linux/init.h>
  13. #include <linux/percpu.h>
  14. #include <linux/cpu.h>
  15. #include <linux/nmi.h>
  16. #include <linux/module.h>
  17. #include <linux/export.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/hardirq.h>
  20. #include <linux/reboot.h>
  21. #include <linux/slab.h>
  22. #include <linux/kdebug.h>
  23. #include <linux/sched/debug.h>
  24. #include <linux/delay.h>
  25. #include <linux/smp.h>
  26. #include <asm/paca.h>
  27. /*
  28. * The powerpc watchdog ensures that each CPU is able to service timers.
  29. * The watchdog sets up a simple timer on each CPU to run once per timer
  30. * period, and updates a per-cpu timestamp and a "pending" cpumask. This is
  31. * the heartbeat.
  32. *
  33. * Then there are two systems to check that the heartbeat is still running.
  34. * The local soft-NMI, and the SMP checker.
  35. *
  36. * The soft-NMI checker can detect lockups on the local CPU. When interrupts
  37. * are disabled with local_irq_disable(), platforms that use soft-masking
  38. * can leave hardware interrupts enabled and handle them with a masked
  39. * interrupt handler. The masked handler can send the timer interrupt to the
  40. * watchdog's soft_nmi_interrupt(), which appears to Linux as an NMI
  41. * interrupt, and can be used to detect CPUs stuck with IRQs disabled.
  42. *
  43. * The soft-NMI checker will compare the heartbeat timestamp for this CPU
  44. * with the current time, and take action if the difference exceeds the
  45. * watchdog threshold.
  46. *
  47. * The limitation of the soft-NMI watchdog is that it does not work when
  48. * interrupts are hard disabled or otherwise not being serviced. This is
  49. * solved by also having a SMP watchdog where all CPUs check all other
  50. * CPUs heartbeat.
  51. *
  52. * The SMP checker can detect lockups on other CPUs. A gobal "pending"
  53. * cpumask is kept, containing all CPUs which enable the watchdog. Each
  54. * CPU clears their pending bit in their heartbeat timer. When the bitmask
  55. * becomes empty, the last CPU to clear its pending bit updates a global
  56. * timestamp and refills the pending bitmask.
  57. *
  58. * In the heartbeat timer, if any CPU notices that the global timestamp has
  59. * not been updated for a period exceeding the watchdog threshold, then it
  60. * means the CPU(s) with their bit still set in the pending mask have had
  61. * their heartbeat stop, and action is taken.
  62. *
  63. * Some platforms implement true NMI IPIs, which can be used by the SMP
  64. * watchdog to detect an unresponsive CPU and pull it out of its stuck
  65. * state with the NMI IPI, to get crash/debug data from it. This way the
  66. * SMP watchdog can detect hardware interrupts off lockups.
  67. */
  68. static cpumask_t wd_cpus_enabled __read_mostly;
  69. static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
  70. static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
  71. static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
  72. static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
  73. static DEFINE_PER_CPU(u64, wd_timer_tb);
  74. /* SMP checker bits */
  75. static unsigned long __wd_smp_lock;
  76. static cpumask_t wd_smp_cpus_pending;
  77. static cpumask_t wd_smp_cpus_stuck;
  78. static u64 wd_smp_last_reset_tb;
  79. static inline void wd_smp_lock(unsigned long *flags)
  80. {
  81. /*
  82. * Avoid locking layers if possible.
  83. * This may be called from low level interrupt handlers at some
  84. * point in future.
  85. */
  86. raw_local_irq_save(*flags);
  87. hard_irq_disable(); /* Make it soft-NMI safe */
  88. while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
  89. raw_local_irq_restore(*flags);
  90. spin_until_cond(!test_bit(0, &__wd_smp_lock));
  91. raw_local_irq_save(*flags);
  92. hard_irq_disable();
  93. }
  94. }
  95. static inline void wd_smp_unlock(unsigned long *flags)
  96. {
  97. clear_bit_unlock(0, &__wd_smp_lock);
  98. raw_local_irq_restore(*flags);
  99. }
  100. static void wd_lockup_ipi(struct pt_regs *regs)
  101. {
  102. int cpu = raw_smp_processor_id();
  103. u64 tb = get_tb();
  104. pr_emerg("CPU %d Hard LOCKUP\n", cpu);
  105. pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n",
  106. cpu, tb, per_cpu(wd_timer_tb, cpu),
  107. tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
  108. print_modules();
  109. print_irqtrace_events(current);
  110. if (regs)
  111. show_regs(regs);
  112. else
  113. dump_stack();
  114. /* Do not panic from here because that can recurse into NMI IPI layer */
  115. }
  116. static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
  117. {
  118. cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
  119. cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
  120. /*
  121. * See wd_smp_clear_cpu_pending()
  122. */
  123. smp_mb();
  124. if (cpumask_empty(&wd_smp_cpus_pending)) {
  125. wd_smp_last_reset_tb = tb;
  126. cpumask_andnot(&wd_smp_cpus_pending,
  127. &wd_cpus_enabled,
  128. &wd_smp_cpus_stuck);
  129. }
  130. }
  131. static void set_cpu_stuck(int cpu, u64 tb)
  132. {
  133. set_cpumask_stuck(cpumask_of(cpu), tb);
  134. }
  135. static void watchdog_smp_panic(int cpu, u64 tb)
  136. {
  137. unsigned long flags;
  138. int c;
  139. wd_smp_lock(&flags);
  140. /* Double check some things under lock */
  141. if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
  142. goto out;
  143. if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
  144. goto out;
  145. if (cpumask_weight(&wd_smp_cpus_pending) == 0)
  146. goto out;
  147. pr_emerg("CPU %d detected hard LOCKUP on other CPUs %*pbl\n",
  148. cpu, cpumask_pr_args(&wd_smp_cpus_pending));
  149. pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
  150. cpu, tb, wd_smp_last_reset_tb,
  151. tb_to_ns(tb - wd_smp_last_reset_tb) / 1000000);
  152. if (!sysctl_hardlockup_all_cpu_backtrace) {
  153. /*
  154. * Try to trigger the stuck CPUs, unless we are going to
  155. * get a backtrace on all of them anyway.
  156. */
  157. for_each_cpu(c, &wd_smp_cpus_pending) {
  158. if (c == cpu)
  159. continue;
  160. smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
  161. }
  162. }
  163. /* Take the stuck CPUs out of the watch group */
  164. set_cpumask_stuck(&wd_smp_cpus_pending, tb);
  165. wd_smp_unlock(&flags);
  166. printk_safe_flush();
  167. /*
  168. * printk_safe_flush() seems to require another print
  169. * before anything actually goes out to console.
  170. */
  171. if (sysctl_hardlockup_all_cpu_backtrace)
  172. trigger_allbutself_cpu_backtrace();
  173. if (hardlockup_panic)
  174. nmi_panic(NULL, "Hard LOCKUP");
  175. return;
  176. out:
  177. wd_smp_unlock(&flags);
  178. }
  179. static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
  180. {
  181. if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
  182. if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
  183. struct pt_regs *regs = get_irq_regs();
  184. unsigned long flags;
  185. wd_smp_lock(&flags);
  186. pr_emerg("CPU %d became unstuck TB:%lld\n",
  187. cpu, tb);
  188. print_irqtrace_events(current);
  189. if (regs)
  190. show_regs(regs);
  191. else
  192. dump_stack();
  193. cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
  194. wd_smp_unlock(&flags);
  195. } else {
  196. /*
  197. * The last CPU to clear pending should have reset the
  198. * watchdog so we generally should not find it empty
  199. * here if our CPU was clear. However it could happen
  200. * due to a rare race with another CPU taking the
  201. * last CPU out of the mask concurrently.
  202. *
  203. * We can't add a warning for it. But just in case
  204. * there is a problem with the watchdog that is causing
  205. * the mask to not be reset, try to kick it along here.
  206. */
  207. if (unlikely(cpumask_empty(&wd_smp_cpus_pending)))
  208. goto none_pending;
  209. }
  210. return;
  211. }
  212. cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
  213. /*
  214. * Order the store to clear pending with the load(s) to check all
  215. * words in the pending mask to check they are all empty. This orders
  216. * with the same barrier on another CPU. This prevents two CPUs
  217. * clearing the last 2 pending bits, but neither seeing the other's
  218. * store when checking if the mask is empty, and missing an empty
  219. * mask, which ends with a false positive.
  220. */
  221. smp_mb();
  222. if (cpumask_empty(&wd_smp_cpus_pending)) {
  223. unsigned long flags;
  224. none_pending:
  225. /*
  226. * Double check under lock because more than one CPU could see
  227. * a clear mask with the lockless check after clearing their
  228. * pending bits.
  229. */
  230. wd_smp_lock(&flags);
  231. if (cpumask_empty(&wd_smp_cpus_pending)) {
  232. wd_smp_last_reset_tb = tb;
  233. cpumask_andnot(&wd_smp_cpus_pending,
  234. &wd_cpus_enabled,
  235. &wd_smp_cpus_stuck);
  236. }
  237. wd_smp_unlock(&flags);
  238. }
  239. }
  240. static void watchdog_timer_interrupt(int cpu)
  241. {
  242. u64 tb = get_tb();
  243. per_cpu(wd_timer_tb, cpu) = tb;
  244. wd_smp_clear_cpu_pending(cpu, tb);
  245. if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
  246. watchdog_smp_panic(cpu, tb);
  247. }
  248. void soft_nmi_interrupt(struct pt_regs *regs)
  249. {
  250. unsigned long flags;
  251. int cpu = raw_smp_processor_id();
  252. u64 tb;
  253. if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
  254. return;
  255. nmi_enter();
  256. __this_cpu_inc(irq_stat.soft_nmi_irqs);
  257. tb = get_tb();
  258. if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
  259. wd_smp_lock(&flags);
  260. if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
  261. wd_smp_unlock(&flags);
  262. goto out;
  263. }
  264. set_cpu_stuck(cpu, tb);
  265. pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n",
  266. cpu, (void *)regs->nip);
  267. pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n",
  268. cpu, tb, per_cpu(wd_timer_tb, cpu),
  269. tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
  270. print_modules();
  271. print_irqtrace_events(current);
  272. show_regs(regs);
  273. wd_smp_unlock(&flags);
  274. if (sysctl_hardlockup_all_cpu_backtrace)
  275. trigger_allbutself_cpu_backtrace();
  276. if (hardlockup_panic)
  277. nmi_panic(regs, "Hard LOCKUP");
  278. }
  279. if (wd_panic_timeout_tb < 0x7fffffff)
  280. mtspr(SPRN_DEC, wd_panic_timeout_tb);
  281. out:
  282. nmi_exit();
  283. }
  284. static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
  285. {
  286. int cpu = smp_processor_id();
  287. if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
  288. return HRTIMER_NORESTART;
  289. if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
  290. return HRTIMER_NORESTART;
  291. watchdog_timer_interrupt(cpu);
  292. hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
  293. return HRTIMER_RESTART;
  294. }
  295. void arch_touch_nmi_watchdog(void)
  296. {
  297. unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
  298. int cpu = smp_processor_id();
  299. u64 tb;
  300. if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
  301. return;
  302. tb = get_tb();
  303. if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
  304. per_cpu(wd_timer_tb, cpu) = tb;
  305. wd_smp_clear_cpu_pending(cpu, tb);
  306. }
  307. }
  308. EXPORT_SYMBOL(arch_touch_nmi_watchdog);
  309. static void start_watchdog(void *arg)
  310. {
  311. struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
  312. int cpu = smp_processor_id();
  313. unsigned long flags;
  314. if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
  315. WARN_ON(1);
  316. return;
  317. }
  318. if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
  319. return;
  320. if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
  321. return;
  322. wd_smp_lock(&flags);
  323. cpumask_set_cpu(cpu, &wd_cpus_enabled);
  324. if (cpumask_weight(&wd_cpus_enabled) == 1) {
  325. cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
  326. wd_smp_last_reset_tb = get_tb();
  327. }
  328. wd_smp_unlock(&flags);
  329. *this_cpu_ptr(&wd_timer_tb) = get_tb();
  330. hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  331. hrtimer->function = watchdog_timer_fn;
  332. hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
  333. HRTIMER_MODE_REL_PINNED);
  334. }
  335. static int start_watchdog_on_cpu(unsigned int cpu)
  336. {
  337. return smp_call_function_single(cpu, start_watchdog, NULL, true);
  338. }
  339. static void stop_watchdog(void *arg)
  340. {
  341. struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
  342. int cpu = smp_processor_id();
  343. unsigned long flags;
  344. if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
  345. return; /* Can happen in CPU unplug case */
  346. hrtimer_cancel(hrtimer);
  347. wd_smp_lock(&flags);
  348. cpumask_clear_cpu(cpu, &wd_cpus_enabled);
  349. wd_smp_unlock(&flags);
  350. wd_smp_clear_cpu_pending(cpu, get_tb());
  351. }
  352. static int stop_watchdog_on_cpu(unsigned int cpu)
  353. {
  354. return smp_call_function_single(cpu, stop_watchdog, NULL, true);
  355. }
  356. static void watchdog_calc_timeouts(void)
  357. {
  358. wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
  359. /* Have the SMP detector trigger a bit later */
  360. wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
  361. /* 2/5 is the factor that the perf based detector uses */
  362. wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
  363. }
  364. void watchdog_nmi_stop(void)
  365. {
  366. int cpu;
  367. for_each_cpu(cpu, &wd_cpus_enabled)
  368. stop_watchdog_on_cpu(cpu);
  369. }
  370. void watchdog_nmi_start(void)
  371. {
  372. int cpu;
  373. watchdog_calc_timeouts();
  374. for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
  375. start_watchdog_on_cpu(cpu);
  376. }
  377. /*
  378. * Invoked from core watchdog init.
  379. */
  380. int __init watchdog_nmi_probe(void)
  381. {
  382. int err;
  383. err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
  384. "powerpc/watchdog:online",
  385. start_watchdog_on_cpu,
  386. stop_watchdog_on_cpu);
  387. if (err < 0) {
  388. pr_warn("could not be initialized");
  389. return err;
  390. }
  391. return 0;
  392. }