kcsan-checks.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_KCSAN_CHECKS_H
  3. #define _LINUX_KCSAN_CHECKS_H
  4. /* Note: Only include what is already included by compiler.h. */
  5. #include <linux/compiler_attributes.h>
  6. #include <linux/types.h>
  7. /* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
  8. #define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
  9. #define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
  10. #define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
  11. /* The following are special, and never due to compiler instrumentation. */
  12. #define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
  13. #define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
  14. /*
  15. * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
  16. * even in compilation units that selectively disable KCSAN, but must use KCSAN
  17. * to validate access to an address. Never use these in header files!
  18. */
  19. #ifdef CONFIG_KCSAN
  20. /**
  21. * __kcsan_check_access - check generic access for races
  22. *
  23. * @ptr: address of access
  24. * @size: size of access
  25. * @type: access type modifier
  26. */
  27. void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
  28. /**
  29. * kcsan_disable_current - disable KCSAN for the current context
  30. *
  31. * Supports nesting.
  32. */
  33. void kcsan_disable_current(void);
  34. /**
  35. * kcsan_enable_current - re-enable KCSAN for the current context
  36. *
  37. * Supports nesting.
  38. */
  39. void kcsan_enable_current(void);
  40. void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
  41. /**
  42. * kcsan_nestable_atomic_begin - begin nestable atomic region
  43. *
  44. * Accesses within the atomic region may appear to race with other accesses but
  45. * should be considered atomic.
  46. */
  47. void kcsan_nestable_atomic_begin(void);
  48. /**
  49. * kcsan_nestable_atomic_end - end nestable atomic region
  50. */
  51. void kcsan_nestable_atomic_end(void);
  52. /**
  53. * kcsan_flat_atomic_begin - begin flat atomic region
  54. *
  55. * Accesses within the atomic region may appear to race with other accesses but
  56. * should be considered atomic.
  57. */
  58. void kcsan_flat_atomic_begin(void);
  59. /**
  60. * kcsan_flat_atomic_end - end flat atomic region
  61. */
  62. void kcsan_flat_atomic_end(void);
  63. /**
  64. * kcsan_atomic_next - consider following accesses as atomic
  65. *
  66. * Force treating the next n memory accesses for the current context as atomic
  67. * operations.
  68. *
  69. * @n: number of following memory accesses to treat as atomic.
  70. */
  71. void kcsan_atomic_next(int n);
  72. /**
  73. * kcsan_set_access_mask - set access mask
  74. *
  75. * Set the access mask for all accesses for the current context if non-zero.
  76. * Only value changes to bits set in the mask will be reported.
  77. *
  78. * @mask: bitmask
  79. */
  80. void kcsan_set_access_mask(unsigned long mask);
  81. /* Scoped access information. */
  82. struct kcsan_scoped_access {
  83. struct list_head list;
  84. const volatile void *ptr;
  85. size_t size;
  86. int type;
  87. };
  88. /*
  89. * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
  90. * out of scope; relies on attribute "cleanup", which is supported by all
  91. * compilers that support KCSAN.
  92. */
  93. #define __kcsan_cleanup_scoped \
  94. __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
  95. /**
  96. * kcsan_begin_scoped_access - begin scoped access
  97. *
  98. * Begin scoped access and initialize @sa, which will cause KCSAN to
  99. * continuously check the memory range in the current thread until
  100. * kcsan_end_scoped_access() is called for @sa.
  101. *
  102. * Scoped accesses are implemented by appending @sa to an internal list for the
  103. * current execution context, and then checked on every call into the KCSAN
  104. * runtime.
  105. *
  106. * @ptr: address of access
  107. * @size: size of access
  108. * @type: access type modifier
  109. * @sa: struct kcsan_scoped_access to use for the scope of the access
  110. */
  111. struct kcsan_scoped_access *
  112. kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
  113. struct kcsan_scoped_access *sa);
  114. /**
  115. * kcsan_end_scoped_access - end scoped access
  116. *
  117. * End a scoped access, which will stop KCSAN checking the memory range.
  118. * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
  119. *
  120. * @sa: a previously initialized struct kcsan_scoped_access
  121. */
  122. void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
  123. #else /* CONFIG_KCSAN */
  124. static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
  125. int type) { }
  126. static inline void kcsan_disable_current(void) { }
  127. static inline void kcsan_enable_current(void) { }
  128. static inline void kcsan_enable_current_nowarn(void) { }
  129. static inline void kcsan_nestable_atomic_begin(void) { }
  130. static inline void kcsan_nestable_atomic_end(void) { }
  131. static inline void kcsan_flat_atomic_begin(void) { }
  132. static inline void kcsan_flat_atomic_end(void) { }
  133. static inline void kcsan_atomic_next(int n) { }
  134. static inline void kcsan_set_access_mask(unsigned long mask) { }
  135. struct kcsan_scoped_access { };
  136. #define __kcsan_cleanup_scoped __maybe_unused
  137. static inline struct kcsan_scoped_access *
  138. kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
  139. struct kcsan_scoped_access *sa) { return sa; }
  140. static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
  141. #endif /* CONFIG_KCSAN */
  142. #ifdef __SANITIZE_THREAD__
  143. /*
  144. * Only calls into the runtime when the particular compilation unit has KCSAN
  145. * instrumentation enabled. May be used in header files.
  146. */
  147. #define kcsan_check_access __kcsan_check_access
  148. /*
  149. * Only use these to disable KCSAN for accesses in the current compilation unit;
  150. * calls into libraries may still perform KCSAN checks.
  151. */
  152. #define __kcsan_disable_current kcsan_disable_current
  153. #define __kcsan_enable_current kcsan_enable_current_nowarn
  154. #else
  155. static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  156. int type) { }
  157. static inline void __kcsan_enable_current(void) { }
  158. static inline void __kcsan_disable_current(void) { }
  159. #endif
  160. /**
  161. * __kcsan_check_read - check regular read access for races
  162. *
  163. * @ptr: address of access
  164. * @size: size of access
  165. */
  166. #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
  167. /**
  168. * __kcsan_check_write - check regular write access for races
  169. *
  170. * @ptr: address of access
  171. * @size: size of access
  172. */
  173. #define __kcsan_check_write(ptr, size) \
  174. __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
  175. /**
  176. * __kcsan_check_read_write - check regular read-write access for races
  177. *
  178. * @ptr: address of access
  179. * @size: size of access
  180. */
  181. #define __kcsan_check_read_write(ptr, size) \
  182. __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
  183. /**
  184. * kcsan_check_read - check regular read access for races
  185. *
  186. * @ptr: address of access
  187. * @size: size of access
  188. */
  189. #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
  190. /**
  191. * kcsan_check_write - check regular write access for races
  192. *
  193. * @ptr: address of access
  194. * @size: size of access
  195. */
  196. #define kcsan_check_write(ptr, size) \
  197. kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
  198. /**
  199. * kcsan_check_read_write - check regular read-write access for races
  200. *
  201. * @ptr: address of access
  202. * @size: size of access
  203. */
  204. #define kcsan_check_read_write(ptr, size) \
  205. kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
  206. /*
  207. * Check for atomic accesses: if atomic accesses are not ignored, this simply
  208. * aliases to kcsan_check_access(), otherwise becomes a no-op.
  209. */
  210. #ifdef CONFIG_KCSAN_IGNORE_ATOMICS
  211. #define kcsan_check_atomic_read(...) do { } while (0)
  212. #define kcsan_check_atomic_write(...) do { } while (0)
  213. #define kcsan_check_atomic_read_write(...) do { } while (0)
  214. #else
  215. #define kcsan_check_atomic_read(ptr, size) \
  216. kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
  217. #define kcsan_check_atomic_write(ptr, size) \
  218. kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
  219. #define kcsan_check_atomic_read_write(ptr, size) \
  220. kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
  221. #endif
  222. /**
  223. * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
  224. *
  225. * Assert that there are no concurrent writes to @var; other readers are
  226. * allowed. This assertion can be used to specify properties of concurrent code,
  227. * where violation cannot be detected as a normal data race.
  228. *
  229. * For example, if we only have a single writer, but multiple concurrent
  230. * readers, to avoid data races, all these accesses must be marked; even
  231. * concurrent marked writes racing with the single writer are bugs.
  232. * Unfortunately, due to being marked, they are no longer data races. For cases
  233. * like these, we can use the macro as follows:
  234. *
  235. * .. code-block:: c
  236. *
  237. * void writer(void) {
  238. * spin_lock(&update_foo_lock);
  239. * ASSERT_EXCLUSIVE_WRITER(shared_foo);
  240. * WRITE_ONCE(shared_foo, ...);
  241. * spin_unlock(&update_foo_lock);
  242. * }
  243. * void reader(void) {
  244. * // update_foo_lock does not need to be held!
  245. * ... = READ_ONCE(shared_foo);
  246. * }
  247. *
  248. * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
  249. * checking if a clear scope where no concurrent writes are expected exists.
  250. *
  251. * @var: variable to assert on
  252. */
  253. #define ASSERT_EXCLUSIVE_WRITER(var) \
  254. __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
  255. /*
  256. * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
  257. * expected to be unique for the scope in which instances of kcsan_scoped_access
  258. * are declared.
  259. */
  260. #define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
  261. #define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
  262. struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
  263. __kcsan_cleanup_scoped; \
  264. struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
  265. __maybe_unused = kcsan_begin_scoped_access( \
  266. &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
  267. &__kcsan_scoped_name(id, _))
  268. /**
  269. * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
  270. *
  271. * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
  272. *
  273. * Assert that there are no concurrent writes to @var for the duration of the
  274. * scope in which it is introduced. This provides a better way to fully cover
  275. * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
  276. * increases the likelihood for KCSAN to detect racing accesses.
  277. *
  278. * For example, it allows finding race-condition bugs that only occur due to
  279. * state changes within the scope itself:
  280. *
  281. * .. code-block:: c
  282. *
  283. * void writer(void) {
  284. * spin_lock(&update_foo_lock);
  285. * {
  286. * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
  287. * WRITE_ONCE(shared_foo, 42);
  288. * ...
  289. * // shared_foo should still be 42 here!
  290. * }
  291. * spin_unlock(&update_foo_lock);
  292. * }
  293. * void buggy(void) {
  294. * if (READ_ONCE(shared_foo) == 42)
  295. * WRITE_ONCE(shared_foo, 1); // bug!
  296. * }
  297. *
  298. * @var: variable to assert on
  299. */
  300. #define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
  301. __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
  302. /**
  303. * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
  304. *
  305. * Assert that there are no concurrent accesses to @var (no readers nor
  306. * writers). This assertion can be used to specify properties of concurrent
  307. * code, where violation cannot be detected as a normal data race.
  308. *
  309. * For example, where exclusive access is expected after determining no other
  310. * users of an object are left, but the object is not actually freed. We can
  311. * check that this property actually holds as follows:
  312. *
  313. * .. code-block:: c
  314. *
  315. * if (refcount_dec_and_test(&obj->refcnt)) {
  316. * ASSERT_EXCLUSIVE_ACCESS(*obj);
  317. * do_some_cleanup(obj);
  318. * release_for_reuse(obj);
  319. * }
  320. *
  321. * Note:
  322. *
  323. * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
  324. * checking if a clear scope where no concurrent accesses are expected exists.
  325. *
  326. * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
  327. * fit to detect use-after-free bugs.
  328. *
  329. * @var: variable to assert on
  330. */
  331. #define ASSERT_EXCLUSIVE_ACCESS(var) \
  332. __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
  333. /**
  334. * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
  335. *
  336. * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
  337. *
  338. * Assert that there are no concurrent accesses to @var (no readers nor writers)
  339. * for the entire duration of the scope in which it is introduced. This provides
  340. * a better way to fully cover the enclosing scope, compared to multiple
  341. * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
  342. * racing accesses.
  343. *
  344. * @var: variable to assert on
  345. */
  346. #define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
  347. __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
  348. /**
  349. * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
  350. *
  351. * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
  352. *
  353. * Assert that there are no concurrent writes to a subset of bits in @var;
  354. * concurrent readers are permitted. This assertion captures more detailed
  355. * bit-level properties, compared to the other (word granularity) assertions.
  356. * Only the bits set in @mask are checked for concurrent modifications, while
  357. * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
  358. * are ignored.
  359. *
  360. * Use this for variables, where some bits must not be modified concurrently,
  361. * yet other bits are expected to be modified concurrently.
  362. *
  363. * For example, variables where, after initialization, some bits are read-only,
  364. * but other bits may still be modified concurrently. A reader may wish to
  365. * assert that this is true as follows:
  366. *
  367. * .. code-block:: c
  368. *
  369. * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
  370. * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
  371. *
  372. * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
  373. * to access the masked bits only, and KCSAN optimistically assumes it is
  374. * therefore safe, even in the presence of data races, and marking it with
  375. * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
  376. * it may still be advisable to do so, since we cannot reason about all compiler
  377. * optimizations when it comes to bit manipulations (on the reader and writer
  378. * side). If you are sure nothing can go wrong, we can write the above simply
  379. * as:
  380. *
  381. * .. code-block:: c
  382. *
  383. * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
  384. * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
  385. *
  386. * Another example, where this may be used, is when certain bits of @var may
  387. * only be modified when holding the appropriate lock, but other bits may still
  388. * be modified concurrently. Writers, where other bits may change concurrently,
  389. * could use the assertion as follows:
  390. *
  391. * .. code-block:: c
  392. *
  393. * spin_lock(&foo_lock);
  394. * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
  395. * old_flags = flags;
  396. * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
  397. * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
  398. * spin_unlock(&foo_lock);
  399. *
  400. * @var: variable to assert on
  401. * @mask: only check for modifications to bits set in @mask
  402. */
  403. #define ASSERT_EXCLUSIVE_BITS(var, mask) \
  404. do { \
  405. kcsan_set_access_mask(mask); \
  406. __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
  407. kcsan_set_access_mask(0); \
  408. kcsan_atomic_next(1); \
  409. } while (0)
  410. #endif /* _LINUX_KCSAN_CHECKS_H */