lockdep_internals.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * kernel/lockdep_internals.h
  4. *
  5. * Runtime locking correctness validator
  6. *
  7. * lockdep subsystem internal functions and variables.
  8. */
  9. /*
  10. * Lock-class usage-state bits:
  11. */
  12. enum lock_usage_bit {
  13. #define LOCKDEP_STATE(__STATE) \
  14. LOCK_USED_IN_##__STATE, \
  15. LOCK_USED_IN_##__STATE##_READ, \
  16. LOCK_ENABLED_##__STATE, \
  17. LOCK_ENABLED_##__STATE##_READ,
  18. #include "lockdep_states.h"
  19. #undef LOCKDEP_STATE
  20. LOCK_USED,
  21. LOCK_USED_READ,
  22. LOCK_USAGE_STATES,
  23. };
  24. /* states after LOCK_USED_READ are not traced and printed */
  25. static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
  26. #define LOCK_USAGE_READ_MASK 1
  27. #define LOCK_USAGE_DIR_MASK 2
  28. #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
  29. /*
  30. * Usage-state bitmasks:
  31. */
  32. #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
  33. enum {
  34. #define LOCKDEP_STATE(__STATE) \
  35. __LOCKF(USED_IN_##__STATE) \
  36. __LOCKF(USED_IN_##__STATE##_READ) \
  37. __LOCKF(ENABLED_##__STATE) \
  38. __LOCKF(ENABLED_##__STATE##_READ)
  39. #include "lockdep_states.h"
  40. #undef LOCKDEP_STATE
  41. __LOCKF(USED)
  42. __LOCKF(USED_READ)
  43. };
  44. #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
  45. static const unsigned long LOCKF_ENABLED_IRQ =
  46. #include "lockdep_states.h"
  47. 0;
  48. #undef LOCKDEP_STATE
  49. #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
  50. static const unsigned long LOCKF_USED_IN_IRQ =
  51. #include "lockdep_states.h"
  52. 0;
  53. #undef LOCKDEP_STATE
  54. #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
  55. static const unsigned long LOCKF_ENABLED_IRQ_READ =
  56. #include "lockdep_states.h"
  57. 0;
  58. #undef LOCKDEP_STATE
  59. #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
  60. static const unsigned long LOCKF_USED_IN_IRQ_READ =
  61. #include "lockdep_states.h"
  62. 0;
  63. #undef LOCKDEP_STATE
  64. #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
  65. #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
  66. #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
  67. #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
  68. /*
  69. * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
  70. * .data and .bss to fit in required 32MB limit for the kernel. With
  71. * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
  72. * So, reduce the static allocations for lockdeps related structures so that
  73. * everything fits in current required size limit.
  74. */
  75. #ifdef CONFIG_LOCKDEP_SMALL
  76. /*
  77. * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  78. * we track.
  79. *
  80. * We use the per-lock dependency maps in two ways: we grow it by adding
  81. * every to-be-taken lock to all currently held lock's own dependency
  82. * table (if it's not there yet), and we check it for lock order
  83. * conflicts and deadlocks.
  84. */
  85. #define MAX_LOCKDEP_ENTRIES 16384UL
  86. #define MAX_LOCKDEP_CHAINS_BITS 15
  87. #define MAX_STACK_TRACE_ENTRIES 262144UL
  88. #define STACK_TRACE_HASH_SIZE 8192
  89. #else
  90. #define MAX_LOCKDEP_ENTRIES 32768UL
  91. #define MAX_LOCKDEP_CHAINS_BITS 16
  92. /*
  93. * Stack-trace: tightly packed array of stack backtrace
  94. * addresses. Protected by the hash_lock.
  95. */
  96. #define MAX_STACK_TRACE_ENTRIES 524288UL
  97. #define STACK_TRACE_HASH_SIZE 16384
  98. #endif
  99. /*
  100. * Bit definitions for lock_chain.irq_context
  101. */
  102. #define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
  103. #define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
  104. #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
  105. #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
  106. extern struct lock_chain lock_chains[];
  107. #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
  108. extern void get_usage_chars(struct lock_class *class,
  109. char usage[LOCK_USAGE_CHARS]);
  110. extern const char *__get_key_name(const struct lockdep_subclass_key *key,
  111. char *str);
  112. struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
  113. extern unsigned long nr_lock_classes;
  114. extern unsigned long nr_zapped_classes;
  115. extern unsigned long nr_zapped_lock_chains;
  116. extern unsigned long nr_list_entries;
  117. long lockdep_next_lockchain(long i);
  118. unsigned long lock_chain_count(void);
  119. extern unsigned long nr_stack_trace_entries;
  120. extern unsigned int nr_hardirq_chains;
  121. extern unsigned int nr_softirq_chains;
  122. extern unsigned int nr_process_chains;
  123. extern unsigned int nr_free_chain_hlocks;
  124. extern unsigned int nr_lost_chain_hlocks;
  125. extern unsigned int nr_large_chain_blocks;
  126. extern unsigned int max_lockdep_depth;
  127. extern unsigned int max_bfs_queue_depth;
  128. extern unsigned long max_lock_class_idx;
  129. extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
  130. extern unsigned long lock_classes_in_use[];
  131. #ifdef CONFIG_PROVE_LOCKING
  132. extern unsigned long lockdep_count_forward_deps(struct lock_class *);
  133. extern unsigned long lockdep_count_backward_deps(struct lock_class *);
  134. #ifdef CONFIG_TRACE_IRQFLAGS
  135. u64 lockdep_stack_trace_count(void);
  136. u64 lockdep_stack_hash_count(void);
  137. #endif
  138. #else
  139. static inline unsigned long
  140. lockdep_count_forward_deps(struct lock_class *class)
  141. {
  142. return 0;
  143. }
  144. static inline unsigned long
  145. lockdep_count_backward_deps(struct lock_class *class)
  146. {
  147. return 0;
  148. }
  149. #endif
  150. #ifdef CONFIG_DEBUG_LOCKDEP
  151. #include <asm/local.h>
  152. /*
  153. * Various lockdep statistics.
  154. * We want them per cpu as they are often accessed in fast path
  155. * and we want to avoid too much cache bouncing.
  156. */
  157. struct lockdep_stats {
  158. unsigned long chain_lookup_hits;
  159. unsigned int chain_lookup_misses;
  160. unsigned long hardirqs_on_events;
  161. unsigned long hardirqs_off_events;
  162. unsigned long redundant_hardirqs_on;
  163. unsigned long redundant_hardirqs_off;
  164. unsigned long softirqs_on_events;
  165. unsigned long softirqs_off_events;
  166. unsigned long redundant_softirqs_on;
  167. unsigned long redundant_softirqs_off;
  168. int nr_unused_locks;
  169. unsigned int nr_redundant_checks;
  170. unsigned int nr_redundant;
  171. unsigned int nr_cyclic_checks;
  172. unsigned int nr_find_usage_forwards_checks;
  173. unsigned int nr_find_usage_backwards_checks;
  174. /*
  175. * Per lock class locking operation stat counts
  176. */
  177. unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
  178. };
  179. DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
  180. #define __debug_atomic_inc(ptr) \
  181. this_cpu_inc(lockdep_stats.ptr);
  182. #define debug_atomic_inc(ptr) { \
  183. WARN_ON_ONCE(!irqs_disabled()); \
  184. __this_cpu_inc(lockdep_stats.ptr); \
  185. }
  186. #define debug_atomic_dec(ptr) { \
  187. WARN_ON_ONCE(!irqs_disabled()); \
  188. __this_cpu_dec(lockdep_stats.ptr); \
  189. }
  190. #define debug_atomic_read(ptr) ({ \
  191. struct lockdep_stats *__cpu_lockdep_stats; \
  192. unsigned long long __total = 0; \
  193. int __cpu; \
  194. for_each_possible_cpu(__cpu) { \
  195. __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
  196. __total += __cpu_lockdep_stats->ptr; \
  197. } \
  198. __total; \
  199. })
  200. static inline void debug_class_ops_inc(struct lock_class *class)
  201. {
  202. int idx;
  203. idx = class - lock_classes;
  204. __debug_atomic_inc(lock_class_ops[idx]);
  205. }
  206. static inline unsigned long debug_class_ops_read(struct lock_class *class)
  207. {
  208. int idx, cpu;
  209. unsigned long ops = 0;
  210. idx = class - lock_classes;
  211. for_each_possible_cpu(cpu)
  212. ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
  213. return ops;
  214. }
  215. #else
  216. # define __debug_atomic_inc(ptr) do { } while (0)
  217. # define debug_atomic_inc(ptr) do { } while (0)
  218. # define debug_atomic_dec(ptr) do { } while (0)
  219. # define debug_atomic_read(ptr) 0
  220. # define debug_class_ops_inc(ptr) do { } while (0)
  221. #endif