2 * Runtime locking correctness validator
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 * see Documentation/locking/lockdep-design.txt for more details.
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
16 extern int prove_locking;
19 #define MAX_LOCKDEP_SUBCLASSES 8UL
23 #include <linux/linkage.h>
24 #include <linux/list.h>
25 #include <linux/debug_locks.h>
26 #include <linux/stacktrace.h>
29 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
30 * the total number of states... :-(
32 #define XXX_LOCK_USAGE_STATES (1+2*4)
35 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
36 * cached in the instance of lockdep_map
38 * Currently main class (subclass == 0) and signle depth subclass
39 * are cached in lockdep_map. This optimization is mainly targeting
40 * on rq->lock. double_rq_lock() acquires this highly competitive with
43 #define NR_LOCKDEP_CACHING_CLASSES 2
46 * Lock-classes are keyed via unique addresses, by embedding the
47 * lockclass-key into the kernel (or module) .data section. (For
48 * static locks we use the lock address itself as the key.)
50 struct lockdep_subclass_key {
52 } __attribute__ ((__packed__));
54 struct lock_class_key {
55 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
58 extern struct lock_class_key __lockdep_no_validate__;
60 #define LOCKSTAT_POINTS 4
63 * The lock-class itself:
69 struct hlist_node hash_entry;
72 * global list of all lock-classes:
74 struct list_head lock_entry;
76 struct lockdep_subclass_key *key;
77 unsigned int subclass;
78 unsigned int dep_gen_id;
81 * IRQ/softirq usage tracking bits:
83 unsigned long usage_mask;
84 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
87 * These fields represent a directed graph of lock dependencies,
88 * to every node we attach a list of "forward" and a list of
89 * "backward" graph nodes.
91 struct list_head locks_after, locks_before;
94 * Generation counter, when doing certain classes of graph walking,
95 * to ensure that we check one node only once:
100 * Statistics counter:
107 #ifdef CONFIG_LOCK_STAT
108 unsigned long contention_point[LOCKSTAT_POINTS];
109 unsigned long contending_point[LOCKSTAT_POINTS];
113 #ifdef CONFIG_LOCK_STAT
122 bounce_acquired_write,
123 bounce_acquired_read,
124 bounce_contended_write,
125 bounce_contended_read,
128 bounce_acquired = bounce_acquired_write,
129 bounce_contended = bounce_contended_write,
132 struct lock_class_stats {
133 unsigned long contention_point[LOCKSTAT_POINTS];
134 unsigned long contending_point[LOCKSTAT_POINTS];
135 struct lock_time read_waittime;
136 struct lock_time write_waittime;
137 struct lock_time read_holdtime;
138 struct lock_time write_holdtime;
139 unsigned long bounces[nr_bounce_types];
142 struct lock_class_stats lock_stats(struct lock_class *class);
143 void clear_lock_stats(struct lock_class *class);
147 * Map the lock object (the lock instance) to the lock-class object.
148 * This is embedded into specific lock instances:
151 struct lock_class_key *key;
152 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
154 #ifdef CONFIG_LOCK_STAT
158 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
160 * Whether it's a crosslock.
166 static inline void lockdep_copy_map(struct lockdep_map *to,
167 struct lockdep_map *from)
173 * Since the class cache can be modified concurrently we could observe
174 * half pointers (64bit arch using 32bit copy insns). Therefore clear
175 * the caches and take the performance hit.
177 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
178 * that relies on cache abuse.
180 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
181 to->class_cache[i] = NULL;
185 * Every lock has a list of other locks that were taken after it.
186 * We only grow the list, never remove from it:
189 struct list_head entry;
190 struct lock_class *class;
191 struct stack_trace trace;
195 * The parent field is used to implement breadth-first search, and the
196 * bit 0 is reused to indicate if the lock has been accessed in BFS.
198 struct lock_list *parent;
202 * We record lock dependency chains, so that we can cache them:
205 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
206 unsigned int irq_context : 2,
210 struct hlist_node entry;
214 #define MAX_LOCKDEP_KEYS_BITS 13
216 * Subtract one because we offset hlock->class_idx by 1 in order
217 * to make 0 mean no class. This avoids overflowing the class_idx
218 * bitfield and hitting the BUG in hlock_class().
220 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
224 * One-way hash of the dependency chain up to this point. We
225 * hash the hashes step by step as the dependency chain grows.
227 * We use it for dependency-caching and we skip detection
228 * passes and dependency-updates if there is a cache-hit, so
229 * it is absolutely critical for 100% coverage of the validator
230 * to have a unique key value for every unique dependency path
231 * that can occur in the system, to make a unique hash value
232 * as likely as possible - hence the 64-bit width.
234 * The task struct holds the current hash value (initialized
235 * with zero), here we store the previous hash value:
238 unsigned long acquire_ip;
239 struct lockdep_map *instance;
240 struct lockdep_map *nest_lock;
241 #ifdef CONFIG_LOCK_STAT
245 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
247 * The lock-stack is unified in that the lock chains of interrupt
248 * contexts nest ontop of process context chains, but we 'separate'
249 * the hashes by starting with 0 if we cross into an interrupt
250 * context, and we also keep do not add cross-context lock
251 * dependencies - the lock usage graph walking covers that area
252 * anyway, and we'd just unnecessarily increase the number of
253 * dependencies otherwise. [Note: hardirq and softirq contexts
254 * are separated from each other too.]
256 * The following field is used to detect when we cross into an
259 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
260 unsigned int trylock:1; /* 16 bits */
262 unsigned int read:2; /* see lock_acquire() comment */
263 unsigned int check:1; /* see lock_acquire() comment */
264 unsigned int hardirqs_off:1;
265 unsigned int references:12; /* 32 bits */
266 unsigned int pin_count;
267 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
271 * A value of cross_gen_id will be stored when holding this,
272 * which is globally increased whenever each crosslock is held.
278 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
279 #define MAX_XHLOCK_TRACE_ENTRIES 5
282 * This is for keeping locks waiting for commit so that true dependencies
283 * can be added at commit step.
287 * Id for each entry in the ring buffer. This is used to
288 * decide whether the ring buffer was overwritten or not.
292 * |<----------- hist_lock ring buffer size ------->|
293 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
294 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
296 * where 'p' represents an acquisition in process
297 * context, 'i' represents an acquisition in irq
300 * In this example, the ring buffer was overwritten by
301 * acquisitions in irq context, that should be detected on
302 * rollback or commit.
304 unsigned int hist_id;
307 * Seperate stack_trace data. This will be used at commit step.
309 struct stack_trace trace;
310 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
313 * Seperate hlock instance. This will be used at commit step.
315 * TODO: Use a smaller data structure containing only necessary
316 * data. However, we should make lockdep code able to handle the
319 struct held_lock hlock;
323 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
324 * be called instead of lockdep_init_map().
328 * When more than one acquisition of crosslocks are overlapped,
329 * we have to perform commit for them based on cross_gen_id of
330 * the first acquisition, which allows us to add more true
333 * Moreover, when no acquisition of a crosslock is in progress,
334 * we should not perform commit because the lock might not exist
335 * any more, which might cause incorrect memory access. So we
336 * have to track the number of acquisitions of a crosslock.
341 * Seperate hlock instance. This will be used at commit step.
343 * TODO: Use a smaller data structure containing only necessary
344 * data. However, we should make lockdep code able to handle the
347 struct held_lock hlock;
350 struct lockdep_map_cross {
351 struct lockdep_map map;
352 struct cross_lock xlock;
357 * Initialization, self-test and debugging-output methods:
359 extern void lockdep_info(void);
360 extern void lockdep_reset(void);
361 extern void lockdep_reset_lock(struct lockdep_map *lock);
362 extern void lockdep_free_key_range(void *start, unsigned long size);
363 extern asmlinkage void lockdep_sys_exit(void);
365 extern void lockdep_off(void);
366 extern void lockdep_on(void);
369 * These methods are used by specific locking variants (spinlocks,
370 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
374 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
375 struct lock_class_key *key, int subclass);
378 * Reinitialize a lock key - for cases where there is special locking or
379 * special initialization of locks so that the validator gets the scope
380 * of dependencies wrong: they are either too broad (they need a class-split)
381 * or they are too narrow (they suffer from a false class-split):
383 #define lockdep_set_class(lock, key) \
384 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
385 #define lockdep_set_class_and_name(lock, key, name) \
386 lockdep_init_map(&(lock)->dep_map, name, key, 0)
387 #define lockdep_set_class_and_subclass(lock, key, sub) \
388 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
389 #define lockdep_set_subclass(lock, sub) \
390 lockdep_init_map(&(lock)->dep_map, #lock, \
391 (lock)->dep_map.key, sub)
393 #define lockdep_set_novalidate_class(lock) \
394 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
396 * Compare locking classes
398 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
400 static inline int lockdep_match_key(struct lockdep_map *lock,
401 struct lock_class_key *key)
403 return lock->key == key;
411 * 0: exclusive (write) acquire
412 * 1: read-acquire (no recursion allowed)
413 * 2: read-acquire with same-instance recursion allowed
417 * 0: simple checks (freeing, held-at-exit-time, etc.)
420 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
421 int trylock, int read, int check,
422 struct lockdep_map *nest_lock, unsigned long ip);
424 extern void lock_release(struct lockdep_map *lock, int nested,
428 * Same "read" as for lock_acquire(), except -1 means any.
430 extern int lock_is_held_type(struct lockdep_map *lock, int read);
432 static inline int lock_is_held(struct lockdep_map *lock)
434 return lock_is_held_type(lock, -1);
437 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
438 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
440 extern void lock_set_class(struct lockdep_map *lock, const char *name,
441 struct lock_class_key *key, unsigned int subclass,
444 static inline void lock_set_subclass(struct lockdep_map *lock,
445 unsigned int subclass, unsigned long ip)
447 lock_set_class(lock, lock->name, lock->key, subclass, ip);
450 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
452 struct pin_cookie { unsigned int val; };
454 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
456 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
457 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
458 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
460 # define INIT_LOCKDEP .lockdep_recursion = 0,
462 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
464 #define lockdep_assert_held(l) do { \
465 WARN_ON(debug_locks && !lockdep_is_held(l)); \
468 #define lockdep_assert_held_exclusive(l) do { \
469 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
472 #define lockdep_assert_held_read(l) do { \
473 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
476 #define lockdep_assert_held_once(l) do { \
477 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
480 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
482 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
483 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
484 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
486 #else /* !CONFIG_LOCKDEP */
488 static inline void lockdep_off(void)
492 static inline void lockdep_on(void)
496 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
497 # define lock_release(l, n, i) do { } while (0)
498 # define lock_downgrade(l, i) do { } while (0)
499 # define lock_set_class(l, n, k, s, i) do { } while (0)
500 # define lock_set_subclass(l, s, i) do { } while (0)
501 # define lockdep_info() do { } while (0)
502 # define lockdep_init_map(lock, name, key, sub) \
503 do { (void)(name); (void)(key); } while (0)
504 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
505 # define lockdep_set_class_and_name(lock, key, name) \
506 do { (void)(key); (void)(name); } while (0)
507 #define lockdep_set_class_and_subclass(lock, key, sub) \
508 do { (void)(key); } while (0)
509 #define lockdep_set_subclass(lock, sub) do { } while (0)
511 #define lockdep_set_novalidate_class(lock) do { } while (0)
514 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
515 * case since the result is not well defined and the caller should rather
516 * #ifdef the call himself.
519 # define INIT_LOCKDEP
520 # define lockdep_reset() do { debug_locks = 1; } while (0)
521 # define lockdep_free_key_range(start, size) do { } while (0)
522 # define lockdep_sys_exit() do { } while (0)
524 * The class key takes no space if lockdep is disabled:
526 struct lock_class_key { };
528 #define lockdep_depth(tsk) (0)
530 #define lockdep_is_held_type(l, r) (1)
532 #define lockdep_assert_held(l) do { (void)(l); } while (0)
533 #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
534 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
535 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
537 #define lockdep_recursing(tsk) (0)
539 struct pin_cookie { };
541 #define NIL_COOKIE (struct pin_cookie){ }
543 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
544 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
545 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
547 #endif /* !LOCKDEP */
549 enum xhlock_context_t {
556 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
557 extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
559 struct lock_class_key *key,
561 extern void lock_commit_crosslock(struct lockdep_map *lock);
564 * What we essencially have to initialize is 'nr_acquire'. Other members
565 * will be initialized in add_xlock().
567 #define STATIC_CROSS_LOCK_INIT() \
570 #define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
571 { .map.name = (_name), .map.key = (void *)(_key), \
572 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
575 * To initialize a lockdep_map statically use this macro.
576 * Note that _name must not be NULL.
578 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
579 { .name = (_name), .key = (void *)(_key), .cross = 0, }
581 extern void crossrelease_hist_start(enum xhlock_context_t c);
582 extern void crossrelease_hist_end(enum xhlock_context_t c);
583 extern void lockdep_init_task(struct task_struct *task);
584 extern void lockdep_free_task(struct task_struct *task);
587 * To initialize a lockdep_map statically use this macro.
588 * Note that _name must not be NULL.
590 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
591 { .name = (_name), .key = (void *)(_key), }
593 static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
594 static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
595 static inline void lockdep_init_task(struct task_struct *task) {}
596 static inline void lockdep_free_task(struct task_struct *task) {}
599 #ifdef CONFIG_LOCK_STAT
601 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
602 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
604 #define LOCK_CONTENDED(_lock, try, lock) \
607 lock_contended(&(_lock)->dep_map, _RET_IP_); \
610 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
613 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
617 lock_contended(&(_lock)->dep_map, _RET_IP_); \
618 ____err = lock(_lock); \
621 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
625 #else /* CONFIG_LOCK_STAT */
627 #define lock_contended(lockdep_map, ip) do {} while (0)
628 #define lock_acquired(lockdep_map, ip) do {} while (0)
630 #define LOCK_CONTENDED(_lock, try, lock) \
633 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
636 #endif /* CONFIG_LOCK_STAT */
638 #ifdef CONFIG_LOCKDEP
641 * On lockdep we dont want the hand-coded irq-enable of
642 * _raw_*_lock_flags() code, because lockdep assumes
643 * that interrupts are not re-enabled during lock-acquire:
645 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
646 LOCK_CONTENDED((_lock), (try), (lock))
648 #else /* CONFIG_LOCKDEP */
650 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
651 lockfl((_lock), (flags))
653 #endif /* CONFIG_LOCKDEP */
655 #ifdef CONFIG_TRACE_IRQFLAGS
656 extern void print_irqtrace_events(struct task_struct *curr);
658 static inline void print_irqtrace_events(struct task_struct *curr)
664 * For trivial one-depth nesting of a lock-class, the following
665 * global define can be used. (Subsystems with multiple levels
666 * of nesting should define their own lock-nesting subclasses.)
668 #define SINGLE_DEPTH_NESTING 1
671 * Map the dependency ops to NOP or to real lockdep ops, depending
672 * on the per lock-class debug mode:
675 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
676 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
677 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
679 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
680 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
681 #define spin_release(l, n, i) lock_release(l, n, i)
683 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
684 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
685 #define rwlock_release(l, n, i) lock_release(l, n, i)
687 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
688 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
689 #define seqcount_release(l, n, i) lock_release(l, n, i)
691 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
692 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
693 #define mutex_release(l, n, i) lock_release(l, n, i)
695 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
696 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
697 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
698 #define rwsem_release(l, n, i) lock_release(l, n, i)
700 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
701 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
702 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
703 #define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
705 #ifdef CONFIG_PROVE_LOCKING
706 # define might_lock(lock) \
708 typecheck(struct lockdep_map *, &(lock)->dep_map); \
709 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
710 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
712 # define might_lock_read(lock) \
714 typecheck(struct lockdep_map *, &(lock)->dep_map); \
715 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
716 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
719 # define might_lock(lock) do { } while (0)
720 # define might_lock_read(lock) do { } while (0)
723 #ifdef CONFIG_LOCKDEP
724 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
727 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
732 #endif /* __LINUX_LOCKDEP_H */