kcsan.h 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_KCSAN_H
  3. #define _LINUX_KCSAN_H
  4. #include <linux/kcsan-checks.h>
  5. #include <linux/types.h>
  6. #ifdef CONFIG_KCSAN
  7. /*
  8. * Context for each thread of execution: for tasks, this is stored in
  9. * task_struct, and interrupts access internal per-CPU storage.
  10. */
  11. struct kcsan_ctx {
  12. int disable_count; /* disable counter */
  13. int atomic_next; /* number of following atomic ops */
  14. /*
  15. * We distinguish between: (a) nestable atomic regions that may contain
  16. * other nestable regions; and (b) flat atomic regions that do not keep
  17. * track of nesting. Both (a) and (b) are entirely independent of each
  18. * other, and a flat region may be started in a nestable region or
  19. * vice-versa.
  20. *
  21. * This is required because, for example, in the annotations for
  22. * seqlocks, we declare seqlock writer critical sections as (a) nestable
  23. * atomic regions, but reader critical sections as (b) flat atomic
  24. * regions, but have encountered cases where seqlock reader critical
  25. * sections are contained within writer critical sections (the opposite
  26. * may be possible, too).
  27. *
  28. * To support these cases, we independently track the depth of nesting
  29. * for (a), and whether the leaf level is flat for (b).
  30. */
  31. int atomic_nest_count;
  32. bool in_flat_atomic;
  33. /*
  34. * Access mask for all accesses if non-zero.
  35. */
  36. unsigned long access_mask;
  37. /* List of scoped accesses. */
  38. struct list_head scoped_accesses;
  39. };
  40. /**
  41. * kcsan_init - initialize KCSAN runtime
  42. */
  43. void kcsan_init(void);
  44. #else /* CONFIG_KCSAN */
  45. static inline void kcsan_init(void) { }
  46. #endif /* CONFIG_KCSAN */
  47. #endif /* _LINUX_KCSAN_H */