report.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/debug_locks.h>
  3. #include <linux/delay.h>
  4. #include <linux/jiffies.h>
  5. #include <linux/kernel.h>
  6. #include <linux/lockdep.h>
  7. #include <linux/preempt.h>
  8. #include <linux/printk.h>
  9. #include <linux/sched.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/stacktrace.h>
  12. #include "kcsan.h"
  13. #include "encoding.h"
  14. /*
  15. * Max. number of stack entries to show in the report.
  16. */
  17. #define NUM_STACK_ENTRIES 64
  18. /* Common access info. */
  19. struct access_info {
  20. const volatile void *ptr;
  21. size_t size;
  22. int access_type;
  23. int task_pid;
  24. int cpu_id;
  25. };
  26. /*
  27. * Other thread info: communicated from other racing thread to thread that set
  28. * up the watchpoint, which then prints the complete report atomically.
  29. */
  30. struct other_info {
  31. struct access_info ai;
  32. unsigned long stack_entries[NUM_STACK_ENTRIES];
  33. int num_stack_entries;
  34. /*
  35. * Optionally pass @current. Typically we do not need to pass @current
  36. * via @other_info since just @task_pid is sufficient. Passing @current
  37. * has additional overhead.
  38. *
  39. * To safely pass @current, we must either use get_task_struct/
  40. * put_task_struct, or stall the thread that populated @other_info.
  41. *
  42. * We cannot rely on get_task_struct/put_task_struct in case
  43. * release_report() races with a task being released, and would have to
  44. * free it in release_report(). This may result in deadlock if we want
  45. * to use KCSAN on the allocators.
  46. *
  47. * Since we also want to reliably print held locks for
  48. * CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
  49. * that populated @other_info until it has been consumed.
  50. */
  51. struct task_struct *task;
  52. };
  53. /*
  54. * To never block any producers of struct other_info, we need as many elements
  55. * as we have watchpoints (upper bound on concurrent races to report).
  56. */
  57. static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
  58. /*
  59. * Information about reported races; used to rate limit reporting.
  60. */
  61. struct report_time {
  62. /*
  63. * The last time the race was reported.
  64. */
  65. unsigned long time;
  66. /*
  67. * The frames of the 2 threads; if only 1 thread is known, one frame
  68. * will be 0.
  69. */
  70. unsigned long frame1;
  71. unsigned long frame2;
  72. };
  73. /*
  74. * Since we also want to be able to debug allocators with KCSAN, to avoid
  75. * deadlock, report_times cannot be dynamically resized with krealloc in
  76. * rate_limit_report.
  77. *
  78. * Therefore, we use a fixed-size array, which at most will occupy a page. This
  79. * still adequately rate limits reports, assuming that a) number of unique data
  80. * races is not excessive, and b) occurrence of unique races within the
  81. * same time window is limited.
  82. */
  83. #define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
  84. #define REPORT_TIMES_SIZE \
  85. (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \
  86. REPORT_TIMES_MAX : \
  87. CONFIG_KCSAN_REPORT_ONCE_IN_MS)
  88. static struct report_time report_times[REPORT_TIMES_SIZE];
  89. /*
  90. * Spinlock serializing report generation, and access to @other_infos. Although
  91. * it could make sense to have a finer-grained locking story for @other_infos,
  92. * report generation needs to be serialized either way, so not much is gained.
  93. */
  94. static DEFINE_RAW_SPINLOCK(report_lock);
  95. /*
  96. * Checks if the race identified by thread frames frame1 and frame2 has
  97. * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
  98. */
  99. static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
  100. {
  101. struct report_time *use_entry = &report_times[0];
  102. unsigned long invalid_before;
  103. int i;
  104. BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
  105. if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
  106. return false;
  107. invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
  108. /* Check if a matching race report exists. */
  109. for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
  110. struct report_time *rt = &report_times[i];
  111. /*
  112. * Must always select an entry for use to store info as we
  113. * cannot resize report_times; at the end of the scan, use_entry
  114. * will be the oldest entry, which ideally also happened before
  115. * KCSAN_REPORT_ONCE_IN_MS ago.
  116. */
  117. if (time_before(rt->time, use_entry->time))
  118. use_entry = rt;
  119. /*
  120. * Initially, no need to check any further as this entry as well
  121. * as following entries have never been used.
  122. */
  123. if (rt->time == 0)
  124. break;
  125. /* Check if entry expired. */
  126. if (time_before(rt->time, invalid_before))
  127. continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
  128. /* Reported recently, check if race matches. */
  129. if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
  130. (rt->frame1 == frame2 && rt->frame2 == frame1))
  131. return true;
  132. }
  133. use_entry->time = jiffies;
  134. use_entry->frame1 = frame1;
  135. use_entry->frame2 = frame2;
  136. return false;
  137. }
  138. /*
  139. * Special rules to skip reporting.
  140. */
  141. static bool
  142. skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
  143. {
  144. /* Should never get here if value_change==FALSE. */
  145. WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
  146. /*
  147. * The first call to skip_report always has value_change==TRUE, since we
  148. * cannot know the value written of an instrumented access. For the 2nd
  149. * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
  150. *
  151. * 1. read watchpoint, conflicting write (value_change==TRUE): report;
  152. * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
  153. * 3. write watchpoint, conflicting write (value_change==TRUE): report;
  154. * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
  155. * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
  156. * 6. write watchpoint, conflicting read (value_change==TRUE): report;
  157. *
  158. * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
  159. * data races where the write may have rewritten the same value; case 6
  160. * is possible either if the size is larger than what we check value
  161. * changes for or the access type is KCSAN_ACCESS_ASSERT.
  162. */
  163. if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
  164. value_change == KCSAN_VALUE_CHANGE_MAYBE) {
  165. /*
  166. * The access is a write, but the data value did not change.
  167. *
  168. * We opt-out of this filter for certain functions at request of
  169. * maintainers.
  170. */
  171. char buf[64];
  172. int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
  173. if (!strnstr(buf, "rcu_", len) &&
  174. !strnstr(buf, "_rcu", len) &&
  175. !strnstr(buf, "_srcu", len))
  176. return true;
  177. }
  178. return kcsan_skip_report_debugfs(top_frame);
  179. }
  180. static const char *get_access_type(int type)
  181. {
  182. if (type & KCSAN_ACCESS_ASSERT) {
  183. if (type & KCSAN_ACCESS_SCOPED) {
  184. if (type & KCSAN_ACCESS_WRITE)
  185. return "assert no accesses (scoped)";
  186. else
  187. return "assert no writes (scoped)";
  188. } else {
  189. if (type & KCSAN_ACCESS_WRITE)
  190. return "assert no accesses";
  191. else
  192. return "assert no writes";
  193. }
  194. }
  195. switch (type) {
  196. case 0:
  197. return "read";
  198. case KCSAN_ACCESS_ATOMIC:
  199. return "read (marked)";
  200. case KCSAN_ACCESS_WRITE:
  201. return "write";
  202. case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
  203. return "write (marked)";
  204. case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
  205. return "read-write";
  206. case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
  207. return "read-write (marked)";
  208. case KCSAN_ACCESS_SCOPED:
  209. return "read (scoped)";
  210. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
  211. return "read (marked, scoped)";
  212. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
  213. return "write (scoped)";
  214. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
  215. return "write (marked, scoped)";
  216. default:
  217. BUG();
  218. }
  219. }
  220. static const char *get_bug_type(int type)
  221. {
  222. return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
  223. }
  224. /* Return thread description: in task or interrupt. */
  225. static const char *get_thread_desc(int task_id)
  226. {
  227. if (task_id != -1) {
  228. static char buf[32]; /* safe: protected by report_lock */
  229. snprintf(buf, sizeof(buf), "task %i", task_id);
  230. return buf;
  231. }
  232. return "interrupt";
  233. }
  234. /* Helper to skip KCSAN-related functions in stack-trace. */
  235. static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries)
  236. {
  237. char buf[64];
  238. char *cur;
  239. int len, skip;
  240. for (skip = 0; skip < num_entries; ++skip) {
  241. len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
  242. /* Never show tsan_* or {read,write}_once_size. */
  243. if (strnstr(buf, "tsan_", len) ||
  244. strnstr(buf, "_once_size", len))
  245. continue;
  246. cur = strnstr(buf, "kcsan_", len);
  247. if (cur) {
  248. cur += strlen("kcsan_");
  249. if (!str_has_prefix(cur, "test"))
  250. continue; /* KCSAN runtime function. */
  251. /* KCSAN related test. */
  252. }
  253. /*
  254. * No match for runtime functions -- @skip entries to skip to
  255. * get to first frame of interest.
  256. */
  257. break;
  258. }
  259. return skip;
  260. }
  261. /* Compares symbolized strings of addr1 and addr2. */
  262. static int sym_strcmp(void *addr1, void *addr2)
  263. {
  264. char buf1[64];
  265. char buf2[64];
  266. snprintf(buf1, sizeof(buf1), "%pS", addr1);
  267. snprintf(buf2, sizeof(buf2), "%pS", addr2);
  268. return strncmp(buf1, buf2, sizeof(buf1));
  269. }
  270. static void print_verbose_info(struct task_struct *task)
  271. {
  272. if (!task)
  273. return;
  274. /* Restore IRQ state trace for printing. */
  275. kcsan_restore_irqtrace(task);
  276. pr_err("\n");
  277. debug_show_held_locks(task);
  278. print_irqtrace_events(task);
  279. }
  280. /*
  281. * Returns true if a report was generated, false otherwise.
  282. */
  283. static bool print_report(enum kcsan_value_change value_change,
  284. enum kcsan_report_type type,
  285. const struct access_info *ai,
  286. const struct other_info *other_info)
  287. {
  288. unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
  289. int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
  290. int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
  291. unsigned long this_frame = stack_entries[skipnr];
  292. unsigned long other_frame = 0;
  293. int other_skipnr = 0; /* silence uninit warnings */
  294. /*
  295. * Must check report filter rules before starting to print.
  296. */
  297. if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
  298. return false;
  299. if (type == KCSAN_REPORT_RACE_SIGNAL) {
  300. other_skipnr = get_stack_skipnr(other_info->stack_entries,
  301. other_info->num_stack_entries);
  302. other_frame = other_info->stack_entries[other_skipnr];
  303. /* @value_change is only known for the other thread */
  304. if (skip_report(value_change, other_frame))
  305. return false;
  306. }
  307. if (rate_limit_report(this_frame, other_frame))
  308. return false;
  309. /* Print report header. */
  310. pr_err("==================================================================\n");
  311. switch (type) {
  312. case KCSAN_REPORT_RACE_SIGNAL: {
  313. int cmp;
  314. /*
  315. * Order functions lexographically for consistent bug titles.
  316. * Do not print offset of functions to keep title short.
  317. */
  318. cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
  319. pr_err("BUG: KCSAN: %s in %ps / %ps\n",
  320. get_bug_type(ai->access_type | other_info->ai.access_type),
  321. (void *)(cmp < 0 ? other_frame : this_frame),
  322. (void *)(cmp < 0 ? this_frame : other_frame));
  323. } break;
  324. case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
  325. pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
  326. (void *)this_frame);
  327. break;
  328. default:
  329. BUG();
  330. }
  331. pr_err("\n");
  332. /* Print information about the racing accesses. */
  333. switch (type) {
  334. case KCSAN_REPORT_RACE_SIGNAL:
  335. pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
  336. get_access_type(other_info->ai.access_type), other_info->ai.ptr,
  337. other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
  338. other_info->ai.cpu_id);
  339. /* Print the other thread's stack trace. */
  340. stack_trace_print(other_info->stack_entries + other_skipnr,
  341. other_info->num_stack_entries - other_skipnr,
  342. 0);
  343. if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
  344. print_verbose_info(other_info->task);
  345. pr_err("\n");
  346. pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
  347. get_access_type(ai->access_type), ai->ptr, ai->size,
  348. get_thread_desc(ai->task_pid), ai->cpu_id);
  349. break;
  350. case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
  351. pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
  352. get_access_type(ai->access_type), ai->ptr, ai->size,
  353. get_thread_desc(ai->task_pid), ai->cpu_id);
  354. break;
  355. default:
  356. BUG();
  357. }
  358. /* Print stack trace of this thread. */
  359. stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
  360. 0);
  361. if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
  362. print_verbose_info(current);
  363. /* Print report footer. */
  364. pr_err("\n");
  365. pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
  366. dump_stack_print_info(KERN_DEFAULT);
  367. pr_err("==================================================================\n");
  368. return true;
  369. }
  370. static void release_report(unsigned long *flags, struct other_info *other_info)
  371. {
  372. if (other_info)
  373. /*
  374. * Use size to denote valid/invalid, since KCSAN entirely
  375. * ignores 0-sized accesses.
  376. */
  377. other_info->ai.size = 0;
  378. raw_spin_unlock_irqrestore(&report_lock, *flags);
  379. }
  380. /*
  381. * Sets @other_info->task and awaits consumption of @other_info.
  382. *
  383. * Precondition: report_lock is held.
  384. * Postcondition: report_lock is held.
  385. */
  386. static void set_other_info_task_blocking(unsigned long *flags,
  387. const struct access_info *ai,
  388. struct other_info *other_info)
  389. {
  390. /*
  391. * We may be instrumenting a code-path where current->state is already
  392. * something other than TASK_RUNNING.
  393. */
  394. const bool is_running = current->state == TASK_RUNNING;
  395. /*
  396. * To avoid deadlock in case we are in an interrupt here and this is a
  397. * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
  398. * timeout to ensure this works in all contexts.
  399. *
  400. * Await approximately the worst case delay of the reporting thread (if
  401. * we are not interrupted).
  402. */
  403. int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
  404. other_info->task = current;
  405. do {
  406. if (is_running) {
  407. /*
  408. * Let lockdep know the real task is sleeping, to print
  409. * the held locks (recall we turned lockdep off, so
  410. * locking/unlocking @report_lock won't be recorded).
  411. */
  412. set_current_state(TASK_UNINTERRUPTIBLE);
  413. }
  414. raw_spin_unlock_irqrestore(&report_lock, *flags);
  415. /*
  416. * We cannot call schedule() since we also cannot reliably
  417. * determine if sleeping here is permitted -- see in_atomic().
  418. */
  419. udelay(1);
  420. raw_spin_lock_irqsave(&report_lock, *flags);
  421. if (timeout-- < 0) {
  422. /*
  423. * Abort. Reset @other_info->task to NULL, since it
  424. * appears the other thread is still going to consume
  425. * it. It will result in no verbose info printed for
  426. * this task.
  427. */
  428. other_info->task = NULL;
  429. break;
  430. }
  431. /*
  432. * If invalid, or @ptr nor @current matches, then @other_info
  433. * has been consumed and we may continue. If not, retry.
  434. */
  435. } while (other_info->ai.size && other_info->ai.ptr == ai->ptr &&
  436. other_info->task == current);
  437. if (is_running)
  438. set_current_state(TASK_RUNNING);
  439. }
  440. /* Populate @other_info; requires that the provided @other_info not in use. */
  441. static void prepare_report_producer(unsigned long *flags,
  442. const struct access_info *ai,
  443. struct other_info *other_info)
  444. {
  445. raw_spin_lock_irqsave(&report_lock, *flags);
  446. /*
  447. * The same @other_infos entry cannot be used concurrently, because
  448. * there is a one-to-one mapping to watchpoint slots (@watchpoints in
  449. * core.c), and a watchpoint is only released for reuse after reporting
  450. * is done by the consumer of @other_info. Therefore, it is impossible
  451. * for another concurrent prepare_report_producer() to set the same
  452. * @other_info, and are guaranteed exclusivity for the @other_infos
  453. * entry pointed to by @other_info.
  454. *
  455. * To check this property holds, size should never be non-zero here,
  456. * because every consumer of struct other_info resets size to 0 in
  457. * release_report().
  458. */
  459. WARN_ON(other_info->ai.size);
  460. other_info->ai = *ai;
  461. other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2);
  462. if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
  463. set_other_info_task_blocking(flags, ai, other_info);
  464. raw_spin_unlock_irqrestore(&report_lock, *flags);
  465. }
  466. /* Awaits producer to fill @other_info and then returns. */
  467. static bool prepare_report_consumer(unsigned long *flags,
  468. const struct access_info *ai,
  469. struct other_info *other_info)
  470. {
  471. raw_spin_lock_irqsave(&report_lock, *flags);
  472. while (!other_info->ai.size) { /* Await valid @other_info. */
  473. raw_spin_unlock_irqrestore(&report_lock, *flags);
  474. cpu_relax();
  475. raw_spin_lock_irqsave(&report_lock, *flags);
  476. }
  477. /* Should always have a matching access based on watchpoint encoding. */
  478. if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size,
  479. (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)))
  480. goto discard;
  481. if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size,
  482. (unsigned long)ai->ptr, ai->size)) {
  483. /*
  484. * If the actual accesses to not match, this was a false
  485. * positive due to watchpoint encoding.
  486. */
  487. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
  488. goto discard;
  489. }
  490. return true;
  491. discard:
  492. release_report(flags, other_info);
  493. return false;
  494. }
  495. /*
  496. * Depending on the report type either sets @other_info and returns false, or
  497. * awaits @other_info and returns true. If @other_info is not required for the
  498. * report type, simply acquires @report_lock and returns true.
  499. */
  500. static noinline bool prepare_report(unsigned long *flags,
  501. enum kcsan_report_type type,
  502. const struct access_info *ai,
  503. struct other_info *other_info)
  504. {
  505. switch (type) {
  506. case KCSAN_REPORT_CONSUMED_WATCHPOINT:
  507. prepare_report_producer(flags, ai, other_info);
  508. return false;
  509. case KCSAN_REPORT_RACE_SIGNAL:
  510. return prepare_report_consumer(flags, ai, other_info);
  511. default:
  512. /* @other_info not required; just acquire @report_lock. */
  513. raw_spin_lock_irqsave(&report_lock, *flags);
  514. return true;
  515. }
  516. }
  517. void kcsan_report(const volatile void *ptr, size_t size, int access_type,
  518. enum kcsan_value_change value_change,
  519. enum kcsan_report_type type, int watchpoint_idx)
  520. {
  521. unsigned long flags = 0;
  522. const struct access_info ai = {
  523. .ptr = ptr,
  524. .size = size,
  525. .access_type = access_type,
  526. .task_pid = in_task() ? task_pid_nr(current) : -1,
  527. .cpu_id = raw_smp_processor_id()
  528. };
  529. struct other_info *other_info = type == KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
  530. ? NULL : &other_infos[watchpoint_idx];
  531. kcsan_disable_current();
  532. if (WARN_ON(watchpoint_idx < 0 || watchpoint_idx >= ARRAY_SIZE(other_infos)))
  533. goto out;
  534. /*
  535. * Because we may generate reports when we're in scheduler code, the use
  536. * of printk() could deadlock. Until such time that all printing code
  537. * called in print_report() is scheduler-safe, accept the risk, and just
  538. * get our message out. As such, also disable lockdep to hide the
  539. * warning, and avoid disabling lockdep for the rest of the kernel.
  540. */
  541. lockdep_off();
  542. if (prepare_report(&flags, type, &ai, other_info)) {
  543. /*
  544. * Never report if value_change is FALSE, only if we it is
  545. * either TRUE or MAYBE. In case of MAYBE, further filtering may
  546. * be done once we know the full stack trace in print_report().
  547. */
  548. bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
  549. print_report(value_change, type, &ai, other_info);
  550. if (reported && panic_on_warn)
  551. panic("panic_on_warn set ...\n");
  552. release_report(&flags, other_info);
  553. }
  554. lockdep_on();
  555. out:
  556. kcsan_enable_current();
  557. }