Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
fbb9ce95 IM |
2 | /* |
3 | * kernel/lockdep_internals.h | |
4 | * | |
5 | * Runtime locking correctness validator | |
6 | * | |
7 | * lockdep subsystem internal functions and variables. | |
8 | */ | |
9 | ||
9851673b PZ |
10 | /* |
11 | * Lock-class usage-state bits: | |
12 | */ | |
13 | enum lock_usage_bit { | |
d7b1b021 PZ |
14 | #define LOCKDEP_STATE(__STATE) \ |
15 | LOCK_USED_IN_##__STATE, \ | |
16 | LOCK_USED_IN_##__STATE##_READ, \ | |
17 | LOCK_ENABLED_##__STATE, \ | |
18 | LOCK_ENABLED_##__STATE##_READ, | |
19 | #include "lockdep_states.h" | |
20 | #undef LOCKDEP_STATE | |
21 | LOCK_USED, | |
9851673b PZ |
22 | LOCK_USAGE_STATES |
23 | }; | |
24 | ||
bba2a8f1 FW |
25 | #define LOCK_USAGE_READ_MASK 1 |
26 | #define LOCK_USAGE_DIR_MASK 2 | |
27 | #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) | |
28 | ||
9851673b PZ |
29 | /* |
30 | * Usage-state bitmasks: | |
31 | */ | |
d7b1b021 PZ |
32 | #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), |
33 | ||
34 | enum { | |
35 | #define LOCKDEP_STATE(__STATE) \ | |
36 | __LOCKF(USED_IN_##__STATE) \ | |
37 | __LOCKF(USED_IN_##__STATE##_READ) \ | |
38 | __LOCKF(ENABLED_##__STATE) \ | |
39 | __LOCKF(ENABLED_##__STATE##_READ) | |
40 | #include "lockdep_states.h" | |
41 | #undef LOCKDEP_STATE | |
42 | __LOCKF(USED) | |
43 | }; | |
9851673b | 44 | |
8808a7c6 PZ |
45 | #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | |
46 | static const unsigned long LOCKF_ENABLED_IRQ = | |
47 | #include "lockdep_states.h" | |
48 | 0; | |
49 | #undef LOCKDEP_STATE | |
50 | ||
51 | #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | | |
52 | static const unsigned long LOCKF_USED_IN_IRQ = | |
53 | #include "lockdep_states.h" | |
54 | 0; | |
55 | #undef LOCKDEP_STATE | |
9851673b | 56 | |
8808a7c6 PZ |
57 | #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | |
58 | static const unsigned long LOCKF_ENABLED_IRQ_READ = | |
59 | #include "lockdep_states.h" | |
60 | 0; | |
61 | #undef LOCKDEP_STATE | |
62 | ||
63 | #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | | |
64 | static const unsigned long LOCKF_USED_IN_IRQ_READ = | |
65 | #include "lockdep_states.h" | |
66 | 0; | |
67 | #undef LOCKDEP_STATE | |
9851673b | 68 | |
948f8376 FW |
69 | #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) |
70 | #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) | |
71 | ||
72 | #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ) | |
73 | #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ) | |
74 | ||
e245d99e | 75 | /* |
395102db | 76 | * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, |
e245d99e | 77 | * .data and .bss to fit in required 32MB limit for the kernel. With |
395102db | 78 | * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. |
e245d99e BM |
79 | * So, reduce the static allocations for lockdeps related structures so that |
80 | * everything fits in current required size limit. | |
81 | */ | |
395102db | 82 | #ifdef CONFIG_LOCKDEP_SMALL |
fbb9ce95 IM |
83 | /* |
84 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies | |
85 | * we track. | |
86 | * | |
87 | * We use the per-lock dependency maps in two ways: we grow it by adding | |
88 | * every to-be-taken lock to all currently held lock's own dependency | |
89 | * table (if it's not there yet), and we check it for lock order | |
90 | * conflicts and deadlocks. | |
91 | */ | |
e245d99e BM |
92 | #define MAX_LOCKDEP_ENTRIES 16384UL |
93 | #define MAX_LOCKDEP_CHAINS_BITS 15 | |
94 | #define MAX_STACK_TRACE_ENTRIES 262144UL | |
12593b74 | 95 | #define STACK_TRACE_HASH_SIZE 8192 |
e245d99e | 96 | #else |
1413c038 | 97 | #define MAX_LOCKDEP_ENTRIES 32768UL |
fbb9ce95 | 98 | |
1413c038 | 99 | #define MAX_LOCKDEP_CHAINS_BITS 16 |
443cd507 | 100 | |
fbb9ce95 IM |
101 | /* |
102 | * Stack-trace: tightly packed array of stack backtrace | |
103 | * addresses. Protected by the hash_lock. | |
104 | */ | |
1413c038 | 105 | #define MAX_STACK_TRACE_ENTRIES 524288UL |
12593b74 | 106 | #define STACK_TRACE_HASH_SIZE 16384 |
e245d99e BM |
107 | #endif |
108 | ||
109 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) | |
110 | ||
111 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) | |
fbb9ce95 IM |
112 | |
113 | extern struct list_head all_lock_classes; | |
443cd507 | 114 | extern struct lock_chain lock_chains[]; |
fbb9ce95 | 115 | |
f510b233 PZ |
116 | #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) |
117 | ||
118 | extern void get_usage_chars(struct lock_class *class, | |
119 | char usage[LOCK_USAGE_CHARS]); | |
fbb9ce95 | 120 | |
364f6afc BVA |
121 | extern const char *__get_key_name(const struct lockdep_subclass_key *key, |
122 | char *str); | |
fbb9ce95 | 123 | |
443cd507 HY |
124 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
125 | ||
fbb9ce95 IM |
126 | extern unsigned long nr_lock_classes; |
127 | extern unsigned long nr_list_entries; | |
2212684a BVA |
128 | long lockdep_next_lockchain(long i); |
129 | unsigned long lock_chain_count(void); | |
cd1a28e8 | 130 | extern int nr_chain_hlocks; |
fbb9ce95 IM |
131 | extern unsigned long nr_stack_trace_entries; |
132 | ||
133 | extern unsigned int nr_hardirq_chains; | |
134 | extern unsigned int nr_softirq_chains; | |
135 | extern unsigned int nr_process_chains; | |
136 | extern unsigned int max_lockdep_depth; | |
fbb9ce95 | 137 | |
af012961 PZ |
138 | extern unsigned int max_bfs_queue_depth; |
139 | ||
d6672c50 | 140 | #ifdef CONFIG_PROVE_LOCKING |
419ca3f1 DM |
141 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
142 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | |
8c779229 BVA |
143 | #ifdef CONFIG_TRACE_IRQFLAGS |
144 | u64 lockdep_stack_trace_count(void); | |
145 | u64 lockdep_stack_hash_count(void); | |
146 | #endif | |
d6672c50 IM |
147 | #else |
148 | static inline unsigned long | |
149 | lockdep_count_forward_deps(struct lock_class *class) | |
150 | { | |
151 | return 0; | |
152 | } | |
153 | static inline unsigned long | |
154 | lockdep_count_backward_deps(struct lock_class *class) | |
155 | { | |
156 | return 0; | |
157 | } | |
158 | #endif | |
419ca3f1 | 159 | |
fbb9ce95 | 160 | #ifdef CONFIG_DEBUG_LOCKDEP |
bd6d29c2 FW |
161 | |
162 | #include <asm/local.h> | |
fbb9ce95 | 163 | /* |
bd6d29c2 FW |
164 | * Various lockdep statistics. |
165 | * We want them per cpu as they are often accessed in fast path | |
166 | * and we want to avoid too much cache bouncing. | |
fbb9ce95 | 167 | */ |
bd6d29c2 | 168 | struct lockdep_stats { |
9156e545 KW |
169 | unsigned long chain_lookup_hits; |
170 | unsigned int chain_lookup_misses; | |
171 | unsigned long hardirqs_on_events; | |
172 | unsigned long hardirqs_off_events; | |
173 | unsigned long redundant_hardirqs_on; | |
174 | unsigned long redundant_hardirqs_off; | |
175 | unsigned long softirqs_on_events; | |
176 | unsigned long softirqs_off_events; | |
177 | unsigned long redundant_softirqs_on; | |
178 | unsigned long redundant_softirqs_off; | |
179 | int nr_unused_locks; | |
180 | unsigned int nr_redundant_checks; | |
181 | unsigned int nr_redundant; | |
182 | unsigned int nr_cyclic_checks; | |
183 | unsigned int nr_find_usage_forwards_checks; | |
184 | unsigned int nr_find_usage_backwards_checks; | |
8ca2b56c WL |
185 | |
186 | /* | |
187 | * Per lock class locking operation stat counts | |
188 | */ | |
189 | unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; | |
bd6d29c2 FW |
190 | }; |
191 | ||
192 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | |
8ca2b56c | 193 | extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
bd6d29c2 | 194 | |
ba697f40 FW |
195 | #define __debug_atomic_inc(ptr) \ |
196 | this_cpu_inc(lockdep_stats.ptr); | |
197 | ||
bd6d29c2 | 198 | #define debug_atomic_inc(ptr) { \ |
bd6d29c2 | 199 | WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2b | 200 | __this_cpu_inc(lockdep_stats.ptr); \ |
bd6d29c2 FW |
201 | } |
202 | ||
203 | #define debug_atomic_dec(ptr) { \ | |
bd6d29c2 | 204 | WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2b | 205 | __this_cpu_dec(lockdep_stats.ptr); \ |
bd6d29c2 FW |
206 | } |
207 | ||
208 | #define debug_atomic_read(ptr) ({ \ | |
209 | struct lockdep_stats *__cpu_lockdep_stats; \ | |
210 | unsigned long long __total = 0; \ | |
211 | int __cpu; \ | |
212 | for_each_possible_cpu(__cpu) { \ | |
213 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ | |
214 | __total += __cpu_lockdep_stats->ptr; \ | |
215 | } \ | |
216 | __total; \ | |
217 | }) | |
8ca2b56c WL |
218 | |
219 | static inline void debug_class_ops_inc(struct lock_class *class) | |
220 | { | |
221 | int idx; | |
222 | ||
223 | idx = class - lock_classes; | |
224 | __debug_atomic_inc(lock_class_ops[idx]); | |
225 | } | |
226 | ||
227 | static inline unsigned long debug_class_ops_read(struct lock_class *class) | |
228 | { | |
229 | int idx, cpu; | |
230 | unsigned long ops = 0; | |
231 | ||
232 | idx = class - lock_classes; | |
233 | for_each_possible_cpu(cpu) | |
234 | ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); | |
235 | return ops; | |
236 | } | |
237 | ||
fbb9ce95 | 238 | #else |
ba697f40 | 239 | # define __debug_atomic_inc(ptr) do { } while (0) |
fbb9ce95 IM |
240 | # define debug_atomic_inc(ptr) do { } while (0) |
241 | # define debug_atomic_dec(ptr) do { } while (0) | |
242 | # define debug_atomic_read(ptr) 0 | |
8ca2b56c | 243 | # define debug_class_ops_inc(ptr) do { } while (0) |
fbb9ce95 | 244 | #endif |