core.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "kcsan: " fmt
  3. #include <linux/atomic.h>
  4. #include <linux/bug.h>
  5. #include <linux/delay.h>
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/kernel.h>
  9. #include <linux/list.h>
  10. #include <linux/moduleparam.h>
  11. #include <linux/percpu.h>
  12. #include <linux/preempt.h>
  13. #include <linux/sched.h>
  14. #include <linux/uaccess.h>
  15. #include "atomic.h"
  16. #include "encoding.h"
  17. #include "kcsan.h"
  18. static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
  19. unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
  20. unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
  21. static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
  22. static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
  23. #ifdef MODULE_PARAM_PREFIX
  24. #undef MODULE_PARAM_PREFIX
  25. #endif
  26. #define MODULE_PARAM_PREFIX "kcsan."
  27. module_param_named(early_enable, kcsan_early_enable, bool, 0);
  28. module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
  29. module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
  30. module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
  31. module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
  32. bool kcsan_enabled;
  33. /* Per-CPU kcsan_ctx for interrupts */
  34. static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
  35. .disable_count = 0,
  36. .atomic_next = 0,
  37. .atomic_nest_count = 0,
  38. .in_flat_atomic = false,
  39. .access_mask = 0,
  40. .scoped_accesses = {LIST_POISON1, NULL},
  41. };
  42. /*
  43. * Helper macros to index into adjacent slots, starting from address slot
  44. * itself, followed by the right and left slots.
  45. *
  46. * The purpose is 2-fold:
  47. *
  48. * 1. if during insertion the address slot is already occupied, check if
  49. * any adjacent slots are free;
  50. * 2. accesses that straddle a slot boundary due to size that exceeds a
  51. * slot's range may check adjacent slots if any watchpoint matches.
  52. *
  53. * Note that accesses with very large size may still miss a watchpoint; however,
  54. * given this should be rare, this is a reasonable trade-off to make, since this
  55. * will avoid:
  56. *
  57. * 1. excessive contention between watchpoint checks and setup;
  58. * 2. larger number of simultaneous watchpoints without sacrificing
  59. * performance.
  60. *
  61. * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
  62. *
  63. * slot=0: [ 1, 2, 0]
  64. * slot=9: [10, 11, 9]
  65. * slot=63: [64, 65, 63]
  66. */
  67. #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
  68. /*
  69. * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
  70. * slot (middle) is fine if we assume that races occur rarely. The set of
  71. * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
  72. * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
  73. */
  74. #define SLOT_IDX_FAST(slot, i) (slot + i)
  75. /*
  76. * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
  77. * able to safely update and access a watchpoint without introducing locking
  78. * overhead, we encode each watchpoint as a single atomic long. The initial
  79. * zero-initialized state matches INVALID_WATCHPOINT.
  80. *
  81. * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
  82. * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
  83. */
  84. static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
  85. /*
  86. * Instructions to skip watching counter, used in should_watch(). We use a
  87. * per-CPU counter to avoid excessive contention.
  88. */
  89. static DEFINE_PER_CPU(long, kcsan_skip);
  90. /* For kcsan_prandom_u32_max(). */
  91. static DEFINE_PER_CPU(u32, kcsan_rand_state);
  92. static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
  93. size_t size,
  94. bool expect_write,
  95. long *encoded_watchpoint)
  96. {
  97. const int slot = watchpoint_slot(addr);
  98. const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
  99. atomic_long_t *watchpoint;
  100. unsigned long wp_addr_masked;
  101. size_t wp_size;
  102. bool is_write;
  103. int i;
  104. BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
  105. for (i = 0; i < NUM_SLOTS; ++i) {
  106. watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
  107. *encoded_watchpoint = atomic_long_read(watchpoint);
  108. if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
  109. &wp_size, &is_write))
  110. continue;
  111. if (expect_write && !is_write)
  112. continue;
  113. /* Check if the watchpoint matches the access. */
  114. if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
  115. return watchpoint;
  116. }
  117. return NULL;
  118. }
  119. static inline atomic_long_t *
  120. insert_watchpoint(unsigned long addr, size_t size, bool is_write)
  121. {
  122. const int slot = watchpoint_slot(addr);
  123. const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
  124. atomic_long_t *watchpoint;
  125. int i;
  126. /* Check slot index logic, ensuring we stay within array bounds. */
  127. BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
  128. BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
  129. BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
  130. BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
  131. for (i = 0; i < NUM_SLOTS; ++i) {
  132. long expect_val = INVALID_WATCHPOINT;
  133. /* Try to acquire this slot. */
  134. watchpoint = &watchpoints[SLOT_IDX(slot, i)];
  135. if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
  136. return watchpoint;
  137. }
  138. return NULL;
  139. }
  140. /*
  141. * Return true if watchpoint was successfully consumed, false otherwise.
  142. *
  143. * This may return false if:
  144. *
  145. * 1. another thread already consumed the watchpoint;
  146. * 2. the thread that set up the watchpoint already removed it;
  147. * 3. the watchpoint was removed and then re-used.
  148. */
  149. static __always_inline bool
  150. try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
  151. {
  152. return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
  153. }
  154. /* Return true if watchpoint was not touched, false if already consumed. */
  155. static inline bool consume_watchpoint(atomic_long_t *watchpoint)
  156. {
  157. return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
  158. }
  159. /* Remove the watchpoint -- its slot may be reused after. */
  160. static inline void remove_watchpoint(atomic_long_t *watchpoint)
  161. {
  162. atomic_long_set(watchpoint, INVALID_WATCHPOINT);
  163. }
  164. static __always_inline struct kcsan_ctx *get_ctx(void)
  165. {
  166. /*
  167. * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
  168. * also result in calls that generate warnings in uaccess regions.
  169. */
  170. return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
  171. }
  172. /* Check scoped accesses; never inline because this is a slow-path! */
  173. static noinline void kcsan_check_scoped_accesses(void)
  174. {
  175. struct kcsan_ctx *ctx = get_ctx();
  176. struct list_head *prev_save = ctx->scoped_accesses.prev;
  177. struct kcsan_scoped_access *scoped_access;
  178. ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
  179. list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
  180. __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
  181. ctx->scoped_accesses.prev = prev_save;
  182. }
  183. /* Rules for generic atomic accesses. Called from fast-path. */
  184. static __always_inline bool
  185. is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
  186. {
  187. if (type & KCSAN_ACCESS_ATOMIC)
  188. return true;
  189. /*
  190. * Unless explicitly declared atomic, never consider an assertion access
  191. * as atomic. This allows using them also in atomic regions, such as
  192. * seqlocks, without implicitly changing their semantics.
  193. */
  194. if (type & KCSAN_ACCESS_ASSERT)
  195. return false;
  196. if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
  197. (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
  198. !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
  199. return true; /* Assume aligned writes up to word size are atomic. */
  200. if (ctx->atomic_next > 0) {
  201. /*
  202. * Because we do not have separate contexts for nested
  203. * interrupts, in case atomic_next is set, we simply assume that
  204. * the outer interrupt set atomic_next. In the worst case, we
  205. * will conservatively consider operations as atomic. This is a
  206. * reasonable trade-off to make, since this case should be
  207. * extremely rare; however, even if extremely rare, it could
  208. * lead to false positives otherwise.
  209. */
  210. if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
  211. --ctx->atomic_next; /* in task, or outer interrupt */
  212. return true;
  213. }
  214. return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
  215. }
  216. static __always_inline bool
  217. should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
  218. {
  219. /*
  220. * Never set up watchpoints when memory operations are atomic.
  221. *
  222. * Need to check this first, before kcsan_skip check below: (1) atomics
  223. * should not count towards skipped instructions, and (2) to actually
  224. * decrement kcsan_atomic_next for consecutive instruction stream.
  225. */
  226. if (is_atomic(ptr, size, type, ctx))
  227. return false;
  228. if (this_cpu_dec_return(kcsan_skip) >= 0)
  229. return false;
  230. /*
  231. * NOTE: If we get here, kcsan_skip must always be reset in slow path
  232. * via reset_kcsan_skip() to avoid underflow.
  233. */
  234. /* this operation should be watched */
  235. return true;
  236. }
  237. /*
  238. * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
  239. * congruential generator, using constants from "Numerical Recipes".
  240. */
  241. static u32 kcsan_prandom_u32_max(u32 ep_ro)
  242. {
  243. u32 state = this_cpu_read(kcsan_rand_state);
  244. state = 1664525 * state + 1013904223;
  245. this_cpu_write(kcsan_rand_state, state);
  246. return state % ep_ro;
  247. }
  248. static inline void reset_kcsan_skip(void)
  249. {
  250. long skip_count = kcsan_skip_watch -
  251. (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
  252. kcsan_prandom_u32_max(kcsan_skip_watch) :
  253. 0);
  254. this_cpu_write(kcsan_skip, skip_count);
  255. }
  256. static __always_inline bool kcsan_is_enabled(void)
  257. {
  258. return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
  259. }
  260. /* Introduce delay depending on context and configuration. */
  261. static void delay_access(int type)
  262. {
  263. unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
  264. /* For certain access types, skew the random delay to be longer. */
  265. unsigned int skew_delay_order =
  266. (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
  267. delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
  268. kcsan_prandom_u32_max(delay >> skew_delay_order) :
  269. 0;
  270. udelay(delay);
  271. }
  272. void kcsan_save_irqtrace(struct task_struct *task)
  273. {
  274. #ifdef CONFIG_TRACE_IRQFLAGS
  275. task->kcsan_save_irqtrace = task->irqtrace;
  276. #endif
  277. }
  278. void kcsan_restore_irqtrace(struct task_struct *task)
  279. {
  280. #ifdef CONFIG_TRACE_IRQFLAGS
  281. task->irqtrace = task->kcsan_save_irqtrace;
  282. #endif
  283. }
  284. /*
  285. * Pull everything together: check_access() below contains the performance
  286. * critical operations; the fast-path (including check_access) functions should
  287. * all be inlinable by the instrumentation functions.
  288. *
  289. * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
  290. * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
  291. * be filtered from the stacktrace, as well as give them unique names for the
  292. * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
  293. * since they do not access any user memory, but instrumentation is still
  294. * emitted in UACCESS regions.
  295. */
  296. static noinline void kcsan_found_watchpoint(const volatile void *ptr,
  297. size_t size,
  298. int type,
  299. atomic_long_t *watchpoint,
  300. long encoded_watchpoint)
  301. {
  302. unsigned long flags;
  303. bool consumed;
  304. if (!kcsan_is_enabled())
  305. return;
  306. /*
  307. * The access_mask check relies on value-change comparison. To avoid
  308. * reporting a race where e.g. the writer set up the watchpoint, but the
  309. * reader has access_mask!=0, we have to ignore the found watchpoint.
  310. */
  311. if (get_ctx()->access_mask != 0)
  312. return;
  313. /*
  314. * Consume the watchpoint as soon as possible, to minimize the chances
  315. * of !consumed. Consuming the watchpoint must always be guarded by
  316. * kcsan_is_enabled() check, as otherwise we might erroneously
  317. * triggering reports when disabled.
  318. */
  319. consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
  320. /* keep this after try_consume_watchpoint */
  321. flags = user_access_save();
  322. if (consumed) {
  323. kcsan_save_irqtrace(current);
  324. kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
  325. KCSAN_REPORT_CONSUMED_WATCHPOINT,
  326. watchpoint - watchpoints);
  327. kcsan_restore_irqtrace(current);
  328. } else {
  329. /*
  330. * The other thread may not print any diagnostics, as it has
  331. * already removed the watchpoint, or another thread consumed
  332. * the watchpoint before this thread.
  333. */
  334. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
  335. }
  336. if ((type & KCSAN_ACCESS_ASSERT) != 0)
  337. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
  338. else
  339. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
  340. user_access_restore(flags);
  341. }
  342. static noinline void
  343. kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
  344. {
  345. const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
  346. const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
  347. atomic_long_t *watchpoint;
  348. union {
  349. u8 _1;
  350. u16 _2;
  351. u32 _4;
  352. u64 _8;
  353. } expect_value;
  354. unsigned long access_mask;
  355. enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
  356. unsigned long ua_flags = user_access_save();
  357. unsigned long irq_flags = 0;
  358. /*
  359. * Always reset kcsan_skip counter in slow-path to avoid underflow; see
  360. * should_watch().
  361. */
  362. reset_kcsan_skip();
  363. if (!kcsan_is_enabled())
  364. goto out;
  365. /*
  366. * Special atomic rules: unlikely to be true, so we check them here in
  367. * the slow-path, and not in the fast-path in is_atomic(). Call after
  368. * kcsan_is_enabled(), as we may access memory that is not yet
  369. * initialized during early boot.
  370. */
  371. if (!is_assert && kcsan_is_atomic_special(ptr))
  372. goto out;
  373. if (!check_encodable((unsigned long)ptr, size)) {
  374. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
  375. goto out;
  376. }
  377. /*
  378. * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
  379. * runtime is entered for every memory access, and potentially useful
  380. * information is lost if dirtied by KCSAN.
  381. */
  382. kcsan_save_irqtrace(current);
  383. if (!kcsan_interrupt_watcher)
  384. local_irq_save(irq_flags);
  385. watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
  386. if (watchpoint == NULL) {
  387. /*
  388. * Out of capacity: the size of 'watchpoints', and the frequency
  389. * with which should_watch() returns true should be tweaked so
  390. * that this case happens very rarely.
  391. */
  392. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
  393. goto out_unlock;
  394. }
  395. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
  396. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
  397. /*
  398. * Read the current value, to later check and infer a race if the data
  399. * was modified via a non-instrumented access, e.g. from a device.
  400. */
  401. expect_value._8 = 0;
  402. switch (size) {
  403. case 1:
  404. expect_value._1 = READ_ONCE(*(const u8 *)ptr);
  405. break;
  406. case 2:
  407. expect_value._2 = READ_ONCE(*(const u16 *)ptr);
  408. break;
  409. case 4:
  410. expect_value._4 = READ_ONCE(*(const u32 *)ptr);
  411. break;
  412. case 8:
  413. expect_value._8 = READ_ONCE(*(const u64 *)ptr);
  414. break;
  415. default:
  416. break; /* ignore; we do not diff the values */
  417. }
  418. if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
  419. kcsan_disable_current();
  420. pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
  421. is_write ? "write" : "read", size, ptr,
  422. watchpoint_slot((unsigned long)ptr),
  423. encode_watchpoint((unsigned long)ptr, size, is_write));
  424. kcsan_enable_current();
  425. }
  426. /*
  427. * Delay this thread, to increase probability of observing a racy
  428. * conflicting access.
  429. */
  430. delay_access(type);
  431. /*
  432. * Re-read value, and check if it is as expected; if not, we infer a
  433. * racy access.
  434. */
  435. access_mask = get_ctx()->access_mask;
  436. switch (size) {
  437. case 1:
  438. expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
  439. if (access_mask)
  440. expect_value._1 &= (u8)access_mask;
  441. break;
  442. case 2:
  443. expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
  444. if (access_mask)
  445. expect_value._2 &= (u16)access_mask;
  446. break;
  447. case 4:
  448. expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
  449. if (access_mask)
  450. expect_value._4 &= (u32)access_mask;
  451. break;
  452. case 8:
  453. expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
  454. if (access_mask)
  455. expect_value._8 &= (u64)access_mask;
  456. break;
  457. default:
  458. break; /* ignore; we do not diff the values */
  459. }
  460. /* Were we able to observe a value-change? */
  461. if (expect_value._8 != 0)
  462. value_change = KCSAN_VALUE_CHANGE_TRUE;
  463. /* Check if this access raced with another. */
  464. if (!consume_watchpoint(watchpoint)) {
  465. /*
  466. * Depending on the access type, map a value_change of MAYBE to
  467. * TRUE (always report) or FALSE (never report).
  468. */
  469. if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
  470. if (access_mask != 0) {
  471. /*
  472. * For access with access_mask, we require a
  473. * value-change, as it is likely that races on
  474. * ~access_mask bits are expected.
  475. */
  476. value_change = KCSAN_VALUE_CHANGE_FALSE;
  477. } else if (size > 8 || is_assert) {
  478. /* Always assume a value-change. */
  479. value_change = KCSAN_VALUE_CHANGE_TRUE;
  480. }
  481. }
  482. /*
  483. * No need to increment 'data_races' counter, as the racing
  484. * thread already did.
  485. *
  486. * Count 'assert_failures' for each failed ASSERT access,
  487. * therefore both this thread and the racing thread may
  488. * increment this counter.
  489. */
  490. if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
  491. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
  492. kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
  493. watchpoint - watchpoints);
  494. } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
  495. /* Inferring a race, since the value should not have changed. */
  496. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
  497. if (is_assert)
  498. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
  499. if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
  500. kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
  501. KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
  502. watchpoint - watchpoints);
  503. }
  504. /*
  505. * Remove watchpoint; must be after reporting, since the slot may be
  506. * reused after this point.
  507. */
  508. remove_watchpoint(watchpoint);
  509. atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
  510. out_unlock:
  511. if (!kcsan_interrupt_watcher)
  512. local_irq_restore(irq_flags);
  513. kcsan_restore_irqtrace(current);
  514. out:
  515. user_access_restore(ua_flags);
  516. }
  517. static __always_inline void check_access(const volatile void *ptr, size_t size,
  518. int type)
  519. {
  520. const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
  521. atomic_long_t *watchpoint;
  522. long encoded_watchpoint;
  523. /*
  524. * Do nothing for 0 sized check; this comparison will be optimized out
  525. * for constant sized instrumentation (__tsan_{read,write}N).
  526. */
  527. if (unlikely(size == 0))
  528. return;
  529. /*
  530. * Avoid user_access_save in fast-path: find_watchpoint is safe without
  531. * user_access_save, as the address that ptr points to is only used to
  532. * check if a watchpoint exists; ptr is never dereferenced.
  533. */
  534. watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
  535. &encoded_watchpoint);
  536. /*
  537. * It is safe to check kcsan_is_enabled() after find_watchpoint in the
  538. * slow-path, as long as no state changes that cause a race to be
  539. * detected and reported have occurred until kcsan_is_enabled() is
  540. * checked.
  541. */
  542. if (unlikely(watchpoint != NULL))
  543. kcsan_found_watchpoint(ptr, size, type, watchpoint,
  544. encoded_watchpoint);
  545. else {
  546. struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
  547. if (unlikely(should_watch(ptr, size, type, ctx)))
  548. kcsan_setup_watchpoint(ptr, size, type);
  549. else if (unlikely(ctx->scoped_accesses.prev))
  550. kcsan_check_scoped_accesses();
  551. }
  552. }
  553. /* === Public interface ===================================================== */
  554. void __init kcsan_init(void)
  555. {
  556. int cpu;
  557. BUG_ON(!in_task());
  558. for_each_possible_cpu(cpu)
  559. per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
  560. /*
  561. * We are in the init task, and no other tasks should be running;
  562. * WRITE_ONCE without memory barrier is sufficient.
  563. */
  564. if (kcsan_early_enable) {
  565. pr_info("enabled early\n");
  566. WRITE_ONCE(kcsan_enabled, true);
  567. }
  568. }
  569. /* === Exported interface =================================================== */
  570. void kcsan_disable_current(void)
  571. {
  572. ++get_ctx()->disable_count;
  573. }
  574. EXPORT_SYMBOL(kcsan_disable_current);
  575. void kcsan_enable_current(void)
  576. {
  577. if (get_ctx()->disable_count-- == 0) {
  578. /*
  579. * Warn if kcsan_enable_current() calls are unbalanced with
  580. * kcsan_disable_current() calls, which causes disable_count to
  581. * become negative and should not happen.
  582. */
  583. kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
  584. kcsan_disable_current(); /* disable to generate warning */
  585. WARN(1, "Unbalanced %s()", __func__);
  586. kcsan_enable_current();
  587. }
  588. }
  589. EXPORT_SYMBOL(kcsan_enable_current);
  590. void kcsan_enable_current_nowarn(void)
  591. {
  592. if (get_ctx()->disable_count-- == 0)
  593. kcsan_disable_current();
  594. }
  595. EXPORT_SYMBOL(kcsan_enable_current_nowarn);
  596. void kcsan_nestable_atomic_begin(void)
  597. {
  598. /*
  599. * Do *not* check and warn if we are in a flat atomic region: nestable
  600. * and flat atomic regions are independent from each other.
  601. * See include/linux/kcsan.h: struct kcsan_ctx comments for more
  602. * comments.
  603. */
  604. ++get_ctx()->atomic_nest_count;
  605. }
  606. EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
  607. void kcsan_nestable_atomic_end(void)
  608. {
  609. if (get_ctx()->atomic_nest_count-- == 0) {
  610. /*
  611. * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
  612. * kcsan_nestable_atomic_begin() calls, which causes
  613. * atomic_nest_count to become negative and should not happen.
  614. */
  615. kcsan_nestable_atomic_begin(); /* restore to 0 */
  616. kcsan_disable_current(); /* disable to generate warning */
  617. WARN(1, "Unbalanced %s()", __func__);
  618. kcsan_enable_current();
  619. }
  620. }
  621. EXPORT_SYMBOL(kcsan_nestable_atomic_end);
  622. void kcsan_flat_atomic_begin(void)
  623. {
  624. get_ctx()->in_flat_atomic = true;
  625. }
  626. EXPORT_SYMBOL(kcsan_flat_atomic_begin);
  627. void kcsan_flat_atomic_end(void)
  628. {
  629. get_ctx()->in_flat_atomic = false;
  630. }
  631. EXPORT_SYMBOL(kcsan_flat_atomic_end);
  632. void kcsan_atomic_next(int n)
  633. {
  634. get_ctx()->atomic_next = n;
  635. }
  636. EXPORT_SYMBOL(kcsan_atomic_next);
  637. void kcsan_set_access_mask(unsigned long mask)
  638. {
  639. get_ctx()->access_mask = mask;
  640. }
  641. EXPORT_SYMBOL(kcsan_set_access_mask);
  642. struct kcsan_scoped_access *
  643. kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
  644. struct kcsan_scoped_access *sa)
  645. {
  646. struct kcsan_ctx *ctx = get_ctx();
  647. __kcsan_check_access(ptr, size, type);
  648. ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
  649. INIT_LIST_HEAD(&sa->list);
  650. sa->ptr = ptr;
  651. sa->size = size;
  652. sa->type = type;
  653. if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
  654. INIT_LIST_HEAD(&ctx->scoped_accesses);
  655. list_add(&sa->list, &ctx->scoped_accesses);
  656. ctx->disable_count--;
  657. return sa;
  658. }
  659. EXPORT_SYMBOL(kcsan_begin_scoped_access);
  660. void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
  661. {
  662. struct kcsan_ctx *ctx = get_ctx();
  663. if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
  664. return;
  665. ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
  666. list_del(&sa->list);
  667. if (list_empty(&ctx->scoped_accesses))
  668. /*
  669. * Ensure we do not enter kcsan_check_scoped_accesses()
  670. * slow-path if unnecessary, and avoids requiring list_empty()
  671. * in the fast-path (to avoid a READ_ONCE() and potential
  672. * uaccess warning).
  673. */
  674. ctx->scoped_accesses.prev = NULL;
  675. ctx->disable_count--;
  676. __kcsan_check_access(sa->ptr, sa->size, sa->type);
  677. }
  678. EXPORT_SYMBOL(kcsan_end_scoped_access);
  679. void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
  680. {
  681. check_access(ptr, size, type);
  682. }
  683. EXPORT_SYMBOL(__kcsan_check_access);
  684. /*
  685. * KCSAN uses the same instrumentation that is emitted by supported compilers
  686. * for ThreadSanitizer (TSAN).
  687. *
  688. * When enabled, the compiler emits instrumentation calls (the functions
  689. * prefixed with "__tsan" below) for all loads and stores that it generated;
  690. * inline asm is not instrumented.
  691. *
  692. * Note that, not all supported compiler versions distinguish aligned/unaligned
  693. * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
  694. * version to the generic version, which can handle both.
  695. */
  696. #define DEFINE_TSAN_READ_WRITE(size) \
  697. void __tsan_read##size(void *ptr); \
  698. void __tsan_read##size(void *ptr) \
  699. { \
  700. check_access(ptr, size, 0); \
  701. } \
  702. EXPORT_SYMBOL(__tsan_read##size); \
  703. void __tsan_unaligned_read##size(void *ptr) \
  704. __alias(__tsan_read##size); \
  705. EXPORT_SYMBOL(__tsan_unaligned_read##size); \
  706. void __tsan_write##size(void *ptr); \
  707. void __tsan_write##size(void *ptr) \
  708. { \
  709. check_access(ptr, size, KCSAN_ACCESS_WRITE); \
  710. } \
  711. EXPORT_SYMBOL(__tsan_write##size); \
  712. void __tsan_unaligned_write##size(void *ptr) \
  713. __alias(__tsan_write##size); \
  714. EXPORT_SYMBOL(__tsan_unaligned_write##size); \
  715. void __tsan_read_write##size(void *ptr); \
  716. void __tsan_read_write##size(void *ptr) \
  717. { \
  718. check_access(ptr, size, \
  719. KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
  720. } \
  721. EXPORT_SYMBOL(__tsan_read_write##size); \
  722. void __tsan_unaligned_read_write##size(void *ptr) \
  723. __alias(__tsan_read_write##size); \
  724. EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
  725. DEFINE_TSAN_READ_WRITE(1);
  726. DEFINE_TSAN_READ_WRITE(2);
  727. DEFINE_TSAN_READ_WRITE(4);
  728. DEFINE_TSAN_READ_WRITE(8);
  729. DEFINE_TSAN_READ_WRITE(16);
  730. void __tsan_read_range(void *ptr, size_t size);
  731. void __tsan_read_range(void *ptr, size_t size)
  732. {
  733. check_access(ptr, size, 0);
  734. }
  735. EXPORT_SYMBOL(__tsan_read_range);
  736. void __tsan_write_range(void *ptr, size_t size);
  737. void __tsan_write_range(void *ptr, size_t size)
  738. {
  739. check_access(ptr, size, KCSAN_ACCESS_WRITE);
  740. }
  741. EXPORT_SYMBOL(__tsan_write_range);
  742. /*
  743. * Use of explicit volatile is generally disallowed [1], however, volatile is
  744. * still used in various concurrent context, whether in low-level
  745. * synchronization primitives or for legacy reasons.
  746. * [1] https://lwn.net/Articles/233479/
  747. *
  748. * We only consider volatile accesses atomic if they are aligned and would pass
  749. * the size-check of compiletime_assert_rwonce_type().
  750. */
  751. #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
  752. void __tsan_volatile_read##size(void *ptr); \
  753. void __tsan_volatile_read##size(void *ptr) \
  754. { \
  755. const bool is_atomic = size <= sizeof(long long) && \
  756. IS_ALIGNED((unsigned long)ptr, size); \
  757. if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
  758. return; \
  759. check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
  760. } \
  761. EXPORT_SYMBOL(__tsan_volatile_read##size); \
  762. void __tsan_unaligned_volatile_read##size(void *ptr) \
  763. __alias(__tsan_volatile_read##size); \
  764. EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
  765. void __tsan_volatile_write##size(void *ptr); \
  766. void __tsan_volatile_write##size(void *ptr) \
  767. { \
  768. const bool is_atomic = size <= sizeof(long long) && \
  769. IS_ALIGNED((unsigned long)ptr, size); \
  770. if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
  771. return; \
  772. check_access(ptr, size, \
  773. KCSAN_ACCESS_WRITE | \
  774. (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
  775. } \
  776. EXPORT_SYMBOL(__tsan_volatile_write##size); \
  777. void __tsan_unaligned_volatile_write##size(void *ptr) \
  778. __alias(__tsan_volatile_write##size); \
  779. EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
  780. DEFINE_TSAN_VOLATILE_READ_WRITE(1);
  781. DEFINE_TSAN_VOLATILE_READ_WRITE(2);
  782. DEFINE_TSAN_VOLATILE_READ_WRITE(4);
  783. DEFINE_TSAN_VOLATILE_READ_WRITE(8);
  784. DEFINE_TSAN_VOLATILE_READ_WRITE(16);
  785. /*
  786. * The below are not required by KCSAN, but can still be emitted by the
  787. * compiler.
  788. */
  789. void __tsan_func_entry(void *call_pc);
  790. void __tsan_func_entry(void *call_pc)
  791. {
  792. }
  793. EXPORT_SYMBOL(__tsan_func_entry);
  794. void __tsan_func_exit(void);
  795. void __tsan_func_exit(void)
  796. {
  797. }
  798. EXPORT_SYMBOL(__tsan_func_exit);
  799. void __tsan_init(void);
  800. void __tsan_init(void)
  801. {
  802. }
  803. EXPORT_SYMBOL(__tsan_init);
  804. /*
  805. * Instrumentation for atomic builtins (__atomic_*, __sync_*).
  806. *
  807. * Normal kernel code _should not_ be using them directly, but some
  808. * architectures may implement some or all atomics using the compilers'
  809. * builtins.
  810. *
  811. * Note: If an architecture decides to fully implement atomics using the
  812. * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
  813. * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
  814. * atomic-instrumented) is no longer necessary.
  815. *
  816. * TSAN instrumentation replaces atomic accesses with calls to any of the below
  817. * functions, whose job is to also execute the operation itself.
  818. */
  819. #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
  820. u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
  821. u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
  822. { \
  823. if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
  824. check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
  825. } \
  826. return __atomic_load_n(ptr, memorder); \
  827. } \
  828. EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
  829. void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
  830. void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
  831. { \
  832. if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
  833. check_access(ptr, bits / BITS_PER_BYTE, \
  834. KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
  835. } \
  836. __atomic_store_n(ptr, v, memorder); \
  837. } \
  838. EXPORT_SYMBOL(__tsan_atomic##bits##_store)
  839. #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
  840. u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
  841. u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
  842. { \
  843. if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
  844. check_access(ptr, bits / BITS_PER_BYTE, \
  845. KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
  846. KCSAN_ACCESS_ATOMIC); \
  847. } \
  848. return __atomic_##op##suffix(ptr, v, memorder); \
  849. } \
  850. EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
  851. /*
  852. * Note: CAS operations are always classified as write, even in case they
  853. * fail. We cannot perform check_access() after a write, as it might lead to
  854. * false positives, in cases such as:
  855. *
  856. * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
  857. *
  858. * T1: if (__atomic_load_n(&p->flag, ...)) {
  859. * modify *p;
  860. * p->flag = 0;
  861. * }
  862. *
  863. * The only downside is that, if there are 3 threads, with one CAS that
  864. * succeeds, another CAS that fails, and an unmarked racing operation, we may
  865. * point at the wrong CAS as the source of the race. However, if we assume that
  866. * all CAS can succeed in some other execution, the data race is still valid.
  867. */
  868. #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
  869. int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
  870. u##bits val, int mo, int fail_mo); \
  871. int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
  872. u##bits val, int mo, int fail_mo) \
  873. { \
  874. if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
  875. check_access(ptr, bits / BITS_PER_BYTE, \
  876. KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
  877. KCSAN_ACCESS_ATOMIC); \
  878. } \
  879. return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
  880. } \
  881. EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
  882. #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
  883. u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
  884. int mo, int fail_mo); \
  885. u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
  886. int mo, int fail_mo) \
  887. { \
  888. if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
  889. check_access(ptr, bits / BITS_PER_BYTE, \
  890. KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
  891. KCSAN_ACCESS_ATOMIC); \
  892. } \
  893. __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
  894. return exp; \
  895. } \
  896. EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
  897. #define DEFINE_TSAN_ATOMIC_OPS(bits) \
  898. DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
  899. DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
  900. DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
  901. DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
  902. DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
  903. DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
  904. DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
  905. DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
  906. DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
  907. DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
  908. DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
  909. DEFINE_TSAN_ATOMIC_OPS(8);
  910. DEFINE_TSAN_ATOMIC_OPS(16);
  911. DEFINE_TSAN_ATOMIC_OPS(32);
  912. DEFINE_TSAN_ATOMIC_OPS(64);
  913. void __tsan_atomic_thread_fence(int memorder);
  914. void __tsan_atomic_thread_fence(int memorder)
  915. {
  916. __atomic_thread_fence(memorder);
  917. }
  918. EXPORT_SYMBOL(__tsan_atomic_thread_fence);
  919. void __tsan_atomic_signal_fence(int memorder);
  920. void __tsan_atomic_signal_fence(int memorder) { }
  921. EXPORT_SYMBOL(__tsan_atomic_signal_fence);