watchdog.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Detect hard and soft lockups on a system
  4. *
  5. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  6. *
  7. * Note: Most of this code is borrowed heavily from the original softlockup
  8. * detector, so thanks to Ingo for the initial implementation.
  9. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  10. * to those contributors as well.
  11. */
  12. #define pr_fmt(fmt) "watchdog: " fmt
  13. #include <linux/mm.h>
  14. #include <linux/cpu.h>
  15. #include <linux/nmi.h>
  16. #include <linux/init.h>
  17. #include <linux/module.h>
  18. #include <linux/sysctl.h>
  19. #include <linux/tick.h>
  20. #include <linux/sched/clock.h>
  21. #include <linux/sched/debug.h>
  22. #include <linux/sched/isolation.h>
  23. #include <linux/stop_machine.h>
  24. #include <asm/irq_regs.h>
  25. #include <linux/kvm_para.h>
  26. #include <trace/hooks/softlockup.h>
  27. static DEFINE_MUTEX(watchdog_mutex);
  28. #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
  29. # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
  30. # define NMI_WATCHDOG_DEFAULT 1
  31. #else
  32. # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
  33. # define NMI_WATCHDOG_DEFAULT 0
  34. #endif
  35. unsigned long __read_mostly watchdog_enabled;
  36. int __read_mostly watchdog_user_enabled = 1;
  37. int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
  38. int __read_mostly soft_watchdog_user_enabled = 1;
  39. int __read_mostly watchdog_thresh = 10;
  40. static int __read_mostly nmi_watchdog_available;
  41. struct cpumask watchdog_cpumask __read_mostly;
  42. unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
  43. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  44. # ifdef CONFIG_SMP
  45. int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
  46. # endif /* CONFIG_SMP */
  47. /*
  48. * Should we panic when a soft-lockup or hard-lockup occurs:
  49. */
  50. unsigned int __read_mostly hardlockup_panic =
  51. CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
  52. /*
  53. * We may not want to enable hard lockup detection by default in all cases,
  54. * for example when running the kernel as a guest on a hypervisor. In these
  55. * cases this function can be called to disable hard lockup detection. This
  56. * function should only be executed once by the boot processor before the
  57. * kernel command line parameters are parsed, because otherwise it is not
  58. * possible to override this in hardlockup_panic_setup().
  59. */
  60. void __init hardlockup_detector_disable(void)
  61. {
  62. nmi_watchdog_user_enabled = 0;
  63. }
  64. static int __init hardlockup_panic_setup(char *str)
  65. {
  66. if (!strncmp(str, "panic", 5))
  67. hardlockup_panic = 1;
  68. else if (!strncmp(str, "nopanic", 7))
  69. hardlockup_panic = 0;
  70. else if (!strncmp(str, "0", 1))
  71. nmi_watchdog_user_enabled = 0;
  72. else if (!strncmp(str, "1", 1))
  73. nmi_watchdog_user_enabled = 1;
  74. return 1;
  75. }
  76. __setup("nmi_watchdog=", hardlockup_panic_setup);
  77. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  78. /*
  79. * These functions can be overridden if an architecture implements its
  80. * own hardlockup detector.
  81. *
  82. * watchdog_nmi_enable/disable can be implemented to start and stop when
  83. * softlockup watchdog threads start and stop. The arch must select the
  84. * SOFTLOCKUP_DETECTOR Kconfig.
  85. */
  86. int __weak watchdog_nmi_enable(unsigned int cpu)
  87. {
  88. hardlockup_detector_perf_enable();
  89. return 0;
  90. }
  91. void __weak watchdog_nmi_disable(unsigned int cpu)
  92. {
  93. hardlockup_detector_perf_disable();
  94. }
  95. /* Return 0, if a NMI watchdog is available. Error code otherwise */
  96. int __weak __init watchdog_nmi_probe(void)
  97. {
  98. return hardlockup_detector_perf_init();
  99. }
  100. /**
  101. * watchdog_nmi_stop - Stop the watchdog for reconfiguration
  102. *
  103. * The reconfiguration steps are:
  104. * watchdog_nmi_stop();
  105. * update_variables();
  106. * watchdog_nmi_start();
  107. */
  108. void __weak watchdog_nmi_stop(void) { }
  109. /**
  110. * watchdog_nmi_start - Start the watchdog after reconfiguration
  111. *
  112. * Counterpart to watchdog_nmi_stop().
  113. *
  114. * The following variables have been updated in update_variables() and
  115. * contain the currently valid configuration:
  116. * - watchdog_enabled
  117. * - watchdog_thresh
  118. * - watchdog_cpumask
  119. */
  120. void __weak watchdog_nmi_start(void) { }
  121. /**
  122. * lockup_detector_update_enable - Update the sysctl enable bit
  123. *
  124. * Caller needs to make sure that the NMI/perf watchdogs are off, so this
  125. * can't race with watchdog_nmi_disable().
  126. */
  127. static void lockup_detector_update_enable(void)
  128. {
  129. watchdog_enabled = 0;
  130. if (!watchdog_user_enabled)
  131. return;
  132. if (nmi_watchdog_available && nmi_watchdog_user_enabled)
  133. watchdog_enabled |= NMI_WATCHDOG_ENABLED;
  134. if (soft_watchdog_user_enabled)
  135. watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
  136. }
  137. #ifdef CONFIG_SOFTLOCKUP_DETECTOR
  138. #define SOFTLOCKUP_RESET ULONG_MAX
  139. #ifdef CONFIG_SMP
  140. int __read_mostly sysctl_softlockup_all_cpu_backtrace;
  141. #endif
  142. static struct cpumask watchdog_allowed_mask __read_mostly;
  143. /* Global variables, exported for sysctl */
  144. unsigned int __read_mostly softlockup_panic =
  145. CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
  146. static bool softlockup_initialized __read_mostly;
  147. static u64 __read_mostly sample_period;
  148. static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
  149. static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
  150. static DEFINE_PER_CPU(bool, softlockup_touch_sync);
  151. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
  152. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
  153. static unsigned long soft_lockup_nmi_warn;
  154. static int __init nowatchdog_setup(char *str)
  155. {
  156. watchdog_user_enabled = 0;
  157. return 1;
  158. }
  159. __setup("nowatchdog", nowatchdog_setup);
  160. static int __init nosoftlockup_setup(char *str)
  161. {
  162. soft_watchdog_user_enabled = 0;
  163. return 1;
  164. }
  165. __setup("nosoftlockup", nosoftlockup_setup);
  166. static int __init watchdog_thresh_setup(char *str)
  167. {
  168. get_option(&str, &watchdog_thresh);
  169. return 1;
  170. }
  171. __setup("watchdog_thresh=", watchdog_thresh_setup);
  172. static void __lockup_detector_cleanup(void);
  173. /*
  174. * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  175. * lockups can have false positives under extreme conditions. So we generally
  176. * want a higher threshold for soft lockups than for hard lockups. So we couple
  177. * the thresholds with a factor: we make the soft threshold twice the amount of
  178. * time the hard threshold is.
  179. */
  180. static int get_softlockup_thresh(void)
  181. {
  182. return watchdog_thresh * 2;
  183. }
  184. /*
  185. * Returns seconds, approximately. We don't need nanosecond
  186. * resolution, and we don't need to waste time with a big divide when
  187. * 2^30ns == 1.074s.
  188. */
  189. static unsigned long get_timestamp(void)
  190. {
  191. return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
  192. }
  193. static void set_sample_period(void)
  194. {
  195. /*
  196. * convert watchdog_thresh from seconds to ns
  197. * the divide by 5 is to give hrtimer several chances (two
  198. * or three with the current relation between the soft
  199. * and hard thresholds) to increment before the
  200. * hardlockup detector generates a warning
  201. */
  202. sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
  203. watchdog_update_hrtimer_threshold(sample_period);
  204. }
  205. /* Commands for resetting the watchdog */
  206. static void update_touch_ts(void)
  207. {
  208. __this_cpu_write(watchdog_touch_ts, get_timestamp());
  209. }
  210. /**
  211. * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
  212. *
  213. * Call when the scheduler may have stalled for legitimate reasons
  214. * preventing the watchdog task from executing - e.g. the scheduler
  215. * entering idle state. This should only be used for scheduler events.
  216. * Use touch_softlockup_watchdog() for everything else.
  217. */
  218. notrace void touch_softlockup_watchdog_sched(void)
  219. {
  220. /*
  221. * Preemption can be enabled. It doesn't matter which CPU's timestamp
  222. * gets zeroed here, so use the raw_ operation.
  223. */
  224. raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
  225. }
  226. notrace void touch_softlockup_watchdog(void)
  227. {
  228. touch_softlockup_watchdog_sched();
  229. wq_watchdog_touch(raw_smp_processor_id());
  230. }
  231. EXPORT_SYMBOL(touch_softlockup_watchdog);
  232. void touch_all_softlockup_watchdogs(void)
  233. {
  234. int cpu;
  235. /*
  236. * watchdog_mutex cannpt be taken here, as this might be called
  237. * from (soft)interrupt context, so the access to
  238. * watchdog_allowed_cpumask might race with a concurrent update.
  239. *
  240. * The watchdog time stamp can race against a concurrent real
  241. * update as well, the only side effect might be a cycle delay for
  242. * the softlockup check.
  243. */
  244. for_each_cpu(cpu, &watchdog_allowed_mask)
  245. per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
  246. wq_watchdog_touch(-1);
  247. }
  248. void touch_softlockup_watchdog_sync(void)
  249. {
  250. __this_cpu_write(softlockup_touch_sync, true);
  251. __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
  252. }
  253. static int is_softlockup(unsigned long touch_ts)
  254. {
  255. unsigned long now = get_timestamp();
  256. if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
  257. /* Warn about unreasonable delays. */
  258. if (time_after(now, touch_ts + get_softlockup_thresh()))
  259. return now - touch_ts;
  260. }
  261. return 0;
  262. }
  263. /* watchdog detector functions */
  264. bool is_hardlockup(void)
  265. {
  266. unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
  267. if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
  268. return true;
  269. __this_cpu_write(hrtimer_interrupts_saved, hrint);
  270. return false;
  271. }
  272. static void watchdog_interrupt_count(void)
  273. {
  274. __this_cpu_inc(hrtimer_interrupts);
  275. }
  276. static DEFINE_PER_CPU(struct completion, softlockup_completion);
  277. static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
  278. /*
  279. * The watchdog thread function - touches the timestamp.
  280. *
  281. * It only runs once every sample_period seconds (4 seconds by
  282. * default) to reset the softlockup timestamp. If this gets delayed
  283. * for more than 2*watchdog_thresh seconds then the debug-printout
  284. * triggers in watchdog_timer_fn().
  285. */
  286. static int softlockup_fn(void *data)
  287. {
  288. update_touch_ts();
  289. complete(this_cpu_ptr(&softlockup_completion));
  290. return 0;
  291. }
  292. /* watchdog kicker functions */
  293. static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
  294. {
  295. unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
  296. struct pt_regs *regs = get_irq_regs();
  297. int duration;
  298. int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
  299. if (!watchdog_enabled)
  300. return HRTIMER_NORESTART;
  301. /* kick the hardlockup detector */
  302. watchdog_interrupt_count();
  303. /* kick the softlockup detector */
  304. if (completion_done(this_cpu_ptr(&softlockup_completion))) {
  305. reinit_completion(this_cpu_ptr(&softlockup_completion));
  306. stop_one_cpu_nowait(smp_processor_id(),
  307. softlockup_fn, NULL,
  308. this_cpu_ptr(&softlockup_stop_work));
  309. }
  310. /* .. and repeat */
  311. hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
  312. if (touch_ts == SOFTLOCKUP_RESET) {
  313. if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
  314. /*
  315. * If the time stamp was touched atomically
  316. * make sure the scheduler tick is up to date.
  317. */
  318. __this_cpu_write(softlockup_touch_sync, false);
  319. sched_clock_tick();
  320. }
  321. /* Clear the guest paused flag on watchdog reset */
  322. kvm_check_and_clear_guest_paused();
  323. update_touch_ts();
  324. return HRTIMER_RESTART;
  325. }
  326. /* check for a softlockup
  327. * This is done by making sure a high priority task is
  328. * being scheduled. The task touches the watchdog to
  329. * indicate it is getting cpu time. If it hasn't then
  330. * this is a good indication some task is hogging the cpu
  331. */
  332. duration = is_softlockup(touch_ts);
  333. if (unlikely(duration)) {
  334. /*
  335. * If a virtual machine is stopped by the host it can look to
  336. * the watchdog like a soft lockup, check to see if the host
  337. * stopped the vm before we issue the warning
  338. */
  339. if (kvm_check_and_clear_guest_paused())
  340. return HRTIMER_RESTART;
  341. /*
  342. * Prevent multiple soft-lockup reports if one cpu is already
  343. * engaged in dumping all cpu back traces.
  344. */
  345. if (softlockup_all_cpu_backtrace) {
  346. if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
  347. return HRTIMER_RESTART;
  348. }
  349. /* Start period for the next softlockup warning. */
  350. update_touch_ts();
  351. pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
  352. smp_processor_id(), duration,
  353. current->comm, task_pid_nr(current));
  354. print_modules();
  355. print_irqtrace_events(current);
  356. if (regs)
  357. show_regs(regs);
  358. else
  359. dump_stack();
  360. if (softlockup_all_cpu_backtrace) {
  361. trigger_allbutself_cpu_backtrace();
  362. clear_bit_unlock(0, &soft_lockup_nmi_warn);
  363. }
  364. trace_android_vh_watchdog_timer_softlockup(duration, regs, !!softlockup_panic);
  365. add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
  366. if (softlockup_panic)
  367. panic("softlockup: hung tasks");
  368. }
  369. return HRTIMER_RESTART;
  370. }
  371. static void watchdog_enable(unsigned int cpu)
  372. {
  373. struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
  374. struct completion *done = this_cpu_ptr(&softlockup_completion);
  375. WARN_ON_ONCE(cpu != smp_processor_id());
  376. init_completion(done);
  377. complete(done);
  378. /*
  379. * Start the timer first to prevent the NMI watchdog triggering
  380. * before the timer has a chance to fire.
  381. */
  382. hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
  383. hrtimer->function = watchdog_timer_fn;
  384. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  385. HRTIMER_MODE_REL_PINNED_HARD);
  386. /* Initialize timestamp */
  387. update_touch_ts();
  388. /* Enable the perf event */
  389. if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
  390. watchdog_nmi_enable(cpu);
  391. }
  392. static void watchdog_disable(unsigned int cpu)
  393. {
  394. struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
  395. WARN_ON_ONCE(cpu != smp_processor_id());
  396. /*
  397. * Disable the perf event first. That prevents that a large delay
  398. * between disabling the timer and disabling the perf event causes
  399. * the perf NMI to detect a false positive.
  400. */
  401. watchdog_nmi_disable(cpu);
  402. hrtimer_cancel(hrtimer);
  403. wait_for_completion(this_cpu_ptr(&softlockup_completion));
  404. }
  405. static int softlockup_stop_fn(void *data)
  406. {
  407. watchdog_disable(smp_processor_id());
  408. return 0;
  409. }
  410. static void softlockup_stop_all(void)
  411. {
  412. int cpu;
  413. if (!softlockup_initialized)
  414. return;
  415. for_each_cpu(cpu, &watchdog_allowed_mask)
  416. smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
  417. cpumask_clear(&watchdog_allowed_mask);
  418. }
  419. static int softlockup_start_fn(void *data)
  420. {
  421. watchdog_enable(smp_processor_id());
  422. return 0;
  423. }
  424. static void softlockup_start_all(void)
  425. {
  426. int cpu;
  427. cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
  428. for_each_cpu(cpu, &watchdog_allowed_mask)
  429. smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
  430. }
  431. int lockup_detector_online_cpu(unsigned int cpu)
  432. {
  433. if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
  434. watchdog_enable(cpu);
  435. return 0;
  436. }
  437. int lockup_detector_offline_cpu(unsigned int cpu)
  438. {
  439. if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
  440. watchdog_disable(cpu);
  441. return 0;
  442. }
  443. static void lockup_detector_reconfigure(void)
  444. {
  445. cpus_read_lock();
  446. watchdog_nmi_stop();
  447. softlockup_stop_all();
  448. set_sample_period();
  449. lockup_detector_update_enable();
  450. if (watchdog_enabled && watchdog_thresh)
  451. softlockup_start_all();
  452. watchdog_nmi_start();
  453. cpus_read_unlock();
  454. /*
  455. * Must be called outside the cpus locked section to prevent
  456. * recursive locking in the perf code.
  457. */
  458. __lockup_detector_cleanup();
  459. }
  460. /*
  461. * Create the watchdog thread infrastructure and configure the detector(s).
  462. *
  463. * The threads are not unparked as watchdog_allowed_mask is empty. When
  464. * the threads are successfully initialized, take the proper locks and
  465. * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
  466. */
  467. static __init void lockup_detector_setup(void)
  468. {
  469. /*
  470. * If sysctl is off and watchdog got disabled on the command line,
  471. * nothing to do here.
  472. */
  473. lockup_detector_update_enable();
  474. if (!IS_ENABLED(CONFIG_SYSCTL) &&
  475. !(watchdog_enabled && watchdog_thresh))
  476. return;
  477. mutex_lock(&watchdog_mutex);
  478. lockup_detector_reconfigure();
  479. softlockup_initialized = true;
  480. mutex_unlock(&watchdog_mutex);
  481. }
  482. #else /* CONFIG_SOFTLOCKUP_DETECTOR */
  483. static void lockup_detector_reconfigure(void)
  484. {
  485. cpus_read_lock();
  486. watchdog_nmi_stop();
  487. lockup_detector_update_enable();
  488. watchdog_nmi_start();
  489. cpus_read_unlock();
  490. }
  491. static inline void lockup_detector_setup(void)
  492. {
  493. lockup_detector_reconfigure();
  494. }
  495. #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
  496. static void __lockup_detector_cleanup(void)
  497. {
  498. lockdep_assert_held(&watchdog_mutex);
  499. hardlockup_detector_perf_cleanup();
  500. }
  501. /**
  502. * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
  503. *
  504. * Caller must not hold the cpu hotplug rwsem.
  505. */
  506. void lockup_detector_cleanup(void)
  507. {
  508. mutex_lock(&watchdog_mutex);
  509. __lockup_detector_cleanup();
  510. mutex_unlock(&watchdog_mutex);
  511. }
  512. /**
  513. * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
  514. *
  515. * Special interface for parisc. It prevents lockup detector warnings from
  516. * the default pm_poweroff() function which busy loops forever.
  517. */
  518. void lockup_detector_soft_poweroff(void)
  519. {
  520. watchdog_enabled = 0;
  521. }
  522. #ifdef CONFIG_SYSCTL
  523. /* Propagate any changes to the watchdog threads */
  524. static void proc_watchdog_update(void)
  525. {
  526. /* Remove impossible cpus to keep sysctl output clean. */
  527. cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
  528. lockup_detector_reconfigure();
  529. }
  530. /*
  531. * common function for watchdog, nmi_watchdog and soft_watchdog parameter
  532. *
  533. * caller | table->data points to | 'which'
  534. * -------------------|----------------------------|--------------------------
  535. * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
  536. * | | SOFT_WATCHDOG_ENABLED
  537. * -------------------|----------------------------|--------------------------
  538. * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
  539. * -------------------|----------------------------|--------------------------
  540. * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
  541. */
  542. static int proc_watchdog_common(int which, struct ctl_table *table, int write,
  543. void *buffer, size_t *lenp, loff_t *ppos)
  544. {
  545. int err, old, *param = table->data;
  546. mutex_lock(&watchdog_mutex);
  547. if (!write) {
  548. /*
  549. * On read synchronize the userspace interface. This is a
  550. * racy snapshot.
  551. */
  552. *param = (watchdog_enabled & which) != 0;
  553. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  554. } else {
  555. old = READ_ONCE(*param);
  556. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  557. if (!err && old != READ_ONCE(*param))
  558. proc_watchdog_update();
  559. }
  560. mutex_unlock(&watchdog_mutex);
  561. return err;
  562. }
  563. /*
  564. * /proc/sys/kernel/watchdog
  565. */
  566. int proc_watchdog(struct ctl_table *table, int write,
  567. void *buffer, size_t *lenp, loff_t *ppos)
  568. {
  569. return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
  570. table, write, buffer, lenp, ppos);
  571. }
  572. /*
  573. * /proc/sys/kernel/nmi_watchdog
  574. */
  575. int proc_nmi_watchdog(struct ctl_table *table, int write,
  576. void *buffer, size_t *lenp, loff_t *ppos)
  577. {
  578. if (!nmi_watchdog_available && write)
  579. return -ENOTSUPP;
  580. return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
  581. table, write, buffer, lenp, ppos);
  582. }
  583. /*
  584. * /proc/sys/kernel/soft_watchdog
  585. */
  586. int proc_soft_watchdog(struct ctl_table *table, int write,
  587. void *buffer, size_t *lenp, loff_t *ppos)
  588. {
  589. return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
  590. table, write, buffer, lenp, ppos);
  591. }
  592. /*
  593. * /proc/sys/kernel/watchdog_thresh
  594. */
  595. int proc_watchdog_thresh(struct ctl_table *table, int write,
  596. void *buffer, size_t *lenp, loff_t *ppos)
  597. {
  598. int err, old;
  599. mutex_lock(&watchdog_mutex);
  600. old = READ_ONCE(watchdog_thresh);
  601. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  602. if (!err && write && old != READ_ONCE(watchdog_thresh))
  603. proc_watchdog_update();
  604. mutex_unlock(&watchdog_mutex);
  605. return err;
  606. }
  607. /*
  608. * The cpumask is the mask of possible cpus that the watchdog can run
  609. * on, not the mask of cpus it is actually running on. This allows the
  610. * user to specify a mask that will include cpus that have not yet
  611. * been brought online, if desired.
  612. */
  613. int proc_watchdog_cpumask(struct ctl_table *table, int write,
  614. void *buffer, size_t *lenp, loff_t *ppos)
  615. {
  616. int err;
  617. mutex_lock(&watchdog_mutex);
  618. err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
  619. if (!err && write)
  620. proc_watchdog_update();
  621. mutex_unlock(&watchdog_mutex);
  622. return err;
  623. }
  624. #endif /* CONFIG_SYSCTL */
  625. void __init lockup_detector_init(void)
  626. {
  627. if (tick_nohz_full_enabled())
  628. pr_info("Disabling watchdog on nohz_full cores by default\n");
  629. cpumask_copy(&watchdog_cpumask,
  630. housekeeping_cpumask(HK_FLAG_TIMER));
  631. if (!watchdog_nmi_probe())
  632. nmi_watchdog_available = true;
  633. lockup_detector_setup();
  634. }