1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Runtime locking correctness validator
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
8 * see Documentation/locking/lockdep-design.txt for more details.
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
17 extern int prove_locking;
20 #define MAX_LOCKDEP_SUBCLASSES 8UL
22 #include <linux/types.h>
26 #include <linux/linkage.h>
27 #include <linux/list.h>
28 #include <linux/debug_locks.h>
29 #include <linux/stacktrace.h>
32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33 * the total number of states... :-(
35 #define XXX_LOCK_USAGE_STATES (1+2*4)
38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39 * cached in the instance of lockdep_map
41 * Currently main class (subclass == 0) and signle depth subclass
42 * are cached in lockdep_map. This optimization is mainly targeting
43 * on rq->lock. double_rq_lock() acquires this highly competitive with
46 #define NR_LOCKDEP_CACHING_CLASSES 2
49 * Lock-classes are keyed via unique addresses, by embedding the
50 * lockclass-key into the kernel (or module) .data section. (For
51 * static locks we use the lock address itself as the key.)
53 struct lockdep_subclass_key {
55 } __attribute__ ((__packed__));
57 struct lock_class_key {
58 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
61 extern struct lock_class_key __lockdep_no_validate__;
63 #define LOCKSTAT_POINTS 4
66 * The lock-class itself:
72 struct hlist_node hash_entry;
75 * global list of all lock-classes:
77 struct list_head lock_entry;
79 struct lockdep_subclass_key *key;
80 unsigned int subclass;
81 unsigned int dep_gen_id;
84 * IRQ/softirq usage tracking bits:
86 unsigned long usage_mask;
87 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
90 * These fields represent a directed graph of lock dependencies,
91 * to every node we attach a list of "forward" and a list of
92 * "backward" graph nodes.
94 struct list_head locks_after, locks_before;
97 * Generation counter, when doing certain classes of graph walking,
98 * to ensure that we check one node only once:
103 #ifdef CONFIG_LOCK_STAT
104 unsigned long contention_point[LOCKSTAT_POINTS];
105 unsigned long contending_point[LOCKSTAT_POINTS];
109 #ifdef CONFIG_LOCK_STAT
118 bounce_acquired_write,
119 bounce_acquired_read,
120 bounce_contended_write,
121 bounce_contended_read,
124 bounce_acquired = bounce_acquired_write,
125 bounce_contended = bounce_contended_write,
128 struct lock_class_stats {
129 unsigned long contention_point[LOCKSTAT_POINTS];
130 unsigned long contending_point[LOCKSTAT_POINTS];
131 struct lock_time read_waittime;
132 struct lock_time write_waittime;
133 struct lock_time read_holdtime;
134 struct lock_time write_holdtime;
135 unsigned long bounces[nr_bounce_types];
138 struct lock_class_stats lock_stats(struct lock_class *class);
139 void clear_lock_stats(struct lock_class *class);
143 * Map the lock object (the lock instance) to the lock-class object.
144 * This is embedded into specific lock instances:
147 struct lock_class_key *key;
148 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
150 #ifdef CONFIG_LOCK_STAT
156 static inline void lockdep_copy_map(struct lockdep_map *to,
157 struct lockdep_map *from)
163 * Since the class cache can be modified concurrently we could observe
164 * half pointers (64bit arch using 32bit copy insns). Therefore clear
165 * the caches and take the performance hit.
167 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
168 * that relies on cache abuse.
170 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
171 to->class_cache[i] = NULL;
175 * Every lock has a list of other locks that were taken after it.
176 * We only grow the list, never remove from it:
179 struct list_head entry;
180 struct lock_class *class;
181 struct stack_trace trace;
185 * The parent field is used to implement breadth-first search, and the
186 * bit 0 is reused to indicate if the lock has been accessed in BFS.
188 struct lock_list *parent;
192 * We record lock dependency chains, so that we can cache them:
195 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
196 unsigned int irq_context : 2,
200 struct hlist_node entry;
204 #define MAX_LOCKDEP_KEYS_BITS 13
206 * Subtract one because we offset hlock->class_idx by 1 in order
207 * to make 0 mean no class. This avoids overflowing the class_idx
208 * bitfield and hitting the BUG in hlock_class().
210 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
214 * One-way hash of the dependency chain up to this point. We
215 * hash the hashes step by step as the dependency chain grows.
217 * We use it for dependency-caching and we skip detection
218 * passes and dependency-updates if there is a cache-hit, so
219 * it is absolutely critical for 100% coverage of the validator
220 * to have a unique key value for every unique dependency path
221 * that can occur in the system, to make a unique hash value
222 * as likely as possible - hence the 64-bit width.
224 * The task struct holds the current hash value (initialized
225 * with zero), here we store the previous hash value:
228 unsigned long acquire_ip;
229 struct lockdep_map *instance;
230 struct lockdep_map *nest_lock;
231 #ifdef CONFIG_LOCK_STAT
235 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
237 * The lock-stack is unified in that the lock chains of interrupt
238 * contexts nest ontop of process context chains, but we 'separate'
239 * the hashes by starting with 0 if we cross into an interrupt
240 * context, and we also keep do not add cross-context lock
241 * dependencies - the lock usage graph walking covers that area
242 * anyway, and we'd just unnecessarily increase the number of
243 * dependencies otherwise. [Note: hardirq and softirq contexts
244 * are separated from each other too.]
246 * The following field is used to detect when we cross into an
249 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
250 unsigned int trylock:1; /* 16 bits */
252 unsigned int read:2; /* see lock_acquire() comment */
253 unsigned int check:1; /* see lock_acquire() comment */
254 unsigned int hardirqs_off:1;
255 unsigned int references:12; /* 32 bits */
256 unsigned int pin_count;
260 * Initialization, self-test and debugging-output methods:
262 extern void lockdep_init(void);
263 extern void lockdep_reset(void);
264 extern void lockdep_reset_lock(struct lockdep_map *lock);
265 extern void lockdep_free_key_range(void *start, unsigned long size);
266 extern asmlinkage void lockdep_sys_exit(void);
268 extern void lockdep_off(void);
269 extern void lockdep_on(void);
272 * These methods are used by specific locking variants (spinlocks,
273 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
277 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
278 struct lock_class_key *key, int subclass);
281 * Reinitialize a lock key - for cases where there is special locking or
282 * special initialization of locks so that the validator gets the scope
283 * of dependencies wrong: they are either too broad (they need a class-split)
284 * or they are too narrow (they suffer from a false class-split):
286 #define lockdep_set_class(lock, key) \
287 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
288 #define lockdep_set_class_and_name(lock, key, name) \
289 lockdep_init_map(&(lock)->dep_map, name, key, 0)
290 #define lockdep_set_class_and_subclass(lock, key, sub) \
291 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
292 #define lockdep_set_subclass(lock, sub) \
293 lockdep_init_map(&(lock)->dep_map, #lock, \
294 (lock)->dep_map.key, sub)
296 #define lockdep_set_novalidate_class(lock) \
297 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
299 * Compare locking classes
301 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
303 static inline int lockdep_match_key(struct lockdep_map *lock,
304 struct lock_class_key *key)
306 return lock->key == key;
314 * 0: exclusive (write) acquire
315 * 1: read-acquire (no recursion allowed)
316 * 2: read-acquire with same-instance recursion allowed
320 * 0: simple checks (freeing, held-at-exit-time, etc.)
323 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
324 int trylock, int read, int check,
325 struct lockdep_map *nest_lock, unsigned long ip);
327 extern void lock_release(struct lockdep_map *lock, int nested,
331 * Same "read" as for lock_acquire(), except -1 means any.
333 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
335 static inline int lock_is_held(const struct lockdep_map *lock)
337 return lock_is_held_type(lock, -1);
340 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
341 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
343 extern void lock_set_class(struct lockdep_map *lock, const char *name,
344 struct lock_class_key *key, unsigned int subclass,
347 static inline void lock_set_subclass(struct lockdep_map *lock,
348 unsigned int subclass, unsigned long ip)
350 lock_set_class(lock, lock->name, lock->key, subclass, ip);
353 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
355 struct pin_cookie { unsigned int val; };
357 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
359 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
360 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
361 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
363 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
365 #define lockdep_assert_held(l) do { \
366 WARN_ON(debug_locks && !lockdep_is_held(l)); \
369 #define lockdep_assert_held_exclusive(l) do { \
370 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
373 #define lockdep_assert_held_read(l) do { \
374 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
377 #define lockdep_assert_held_once(l) do { \
378 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
381 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
383 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
384 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
385 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
387 #else /* !CONFIG_LOCKDEP */
389 static inline void lockdep_off(void)
393 static inline void lockdep_on(void)
397 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
398 # define lock_release(l, n, i) do { } while (0)
399 # define lock_downgrade(l, i) do { } while (0)
400 # define lock_set_class(l, n, k, s, i) do { } while (0)
401 # define lock_set_subclass(l, s, i) do { } while (0)
402 # define lockdep_init() do { } while (0)
403 # define lockdep_init_map(lock, name, key, sub) \
404 do { (void)(name); (void)(key); } while (0)
405 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
406 # define lockdep_set_class_and_name(lock, key, name) \
407 do { (void)(key); (void)(name); } while (0)
408 #define lockdep_set_class_and_subclass(lock, key, sub) \
409 do { (void)(key); } while (0)
410 #define lockdep_set_subclass(lock, sub) do { } while (0)
412 #define lockdep_set_novalidate_class(lock) do { } while (0)
415 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
416 * case since the result is not well defined and the caller should rather
417 * #ifdef the call himself.
420 # define lockdep_reset() do { debug_locks = 1; } while (0)
421 # define lockdep_free_key_range(start, size) do { } while (0)
422 # define lockdep_sys_exit() do { } while (0)
424 * The class key takes no space if lockdep is disabled:
426 struct lock_class_key { };
429 * The lockdep_map takes no space if lockdep is disabled:
431 struct lockdep_map { };
433 #define lockdep_depth(tsk) (0)
435 #define lockdep_is_held_type(l, r) (1)
437 #define lockdep_assert_held(l) do { (void)(l); } while (0)
438 #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
439 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
440 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
442 #define lockdep_recursing(tsk) (0)
444 struct pin_cookie { };
446 #define NIL_COOKIE (struct pin_cookie){ }
448 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
449 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
450 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
452 #endif /* !LOCKDEP */
454 enum xhlock_context_t {
460 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
462 * To initialize a lockdep_map statically use this macro.
463 * Note that _name must not be NULL.
465 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
466 { .name = (_name), .key = (void *)(_key), }
468 static inline void lockdep_invariant_state(bool force) {}
469 static inline void lockdep_init_task(struct task_struct *task) {}
470 static inline void lockdep_free_task(struct task_struct *task) {}
472 #ifdef CONFIG_LOCK_STAT
474 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
475 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
477 #define LOCK_CONTENDED(_lock, try, lock) \
480 lock_contended(&(_lock)->dep_map, _RET_IP_); \
483 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
486 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
490 lock_contended(&(_lock)->dep_map, _RET_IP_); \
491 ____err = lock(_lock); \
494 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
498 #else /* CONFIG_LOCK_STAT */
500 #define lock_contended(lockdep_map, ip) do {} while (0)
501 #define lock_acquired(lockdep_map, ip) do {} while (0)
503 #define LOCK_CONTENDED(_lock, try, lock) \
506 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
509 #endif /* CONFIG_LOCK_STAT */
511 #ifdef CONFIG_LOCKDEP
514 * On lockdep we dont want the hand-coded irq-enable of
515 * _raw_*_lock_flags() code, because lockdep assumes
516 * that interrupts are not re-enabled during lock-acquire:
518 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
519 LOCK_CONTENDED((_lock), (try), (lock))
521 #else /* CONFIG_LOCKDEP */
523 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
524 lockfl((_lock), (flags))
526 #endif /* CONFIG_LOCKDEP */
528 #ifdef CONFIG_PROVE_LOCKING
529 extern void print_irqtrace_events(struct task_struct *curr);
531 static inline void print_irqtrace_events(struct task_struct *curr)
537 * For trivial one-depth nesting of a lock-class, the following
538 * global define can be used. (Subsystems with multiple levels
539 * of nesting should define their own lock-nesting subclasses.)
541 #define SINGLE_DEPTH_NESTING 1
544 * Map the dependency ops to NOP or to real lockdep ops, depending
545 * on the per lock-class debug mode:
548 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
549 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
550 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
552 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
553 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
554 #define spin_release(l, n, i) lock_release(l, n, i)
556 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
557 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
558 #define rwlock_release(l, n, i) lock_release(l, n, i)
560 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
561 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
562 #define seqcount_release(l, n, i) lock_release(l, n, i)
564 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
565 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
566 #define mutex_release(l, n, i) lock_release(l, n, i)
568 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
569 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
570 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
571 #define rwsem_release(l, n, i) lock_release(l, n, i)
573 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
574 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
575 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
576 #define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
578 #ifdef CONFIG_PROVE_LOCKING
579 # define might_lock(lock) \
581 typecheck(struct lockdep_map *, &(lock)->dep_map); \
582 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
583 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
585 # define might_lock_read(lock) \
587 typecheck(struct lockdep_map *, &(lock)->dep_map); \
588 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
589 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
592 #define lockdep_assert_irqs_enabled() do { \
593 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
594 !current->hardirqs_enabled, \
595 "IRQs not enabled as expected\n"); \
598 #define lockdep_assert_irqs_disabled() do { \
599 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
600 current->hardirqs_enabled, \
601 "IRQs not disabled as expected\n"); \
605 # define might_lock(lock) do { } while (0)
606 # define might_lock_read(lock) do { } while (0)
607 # define lockdep_assert_irqs_enabled() do { } while (0)
608 # define lockdep_assert_irqs_disabled() do { } while (0)
611 #ifdef CONFIG_LOCKDEP
612 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
615 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
620 #endif /* __LINUX_LOCKDEP_H */