lockstat: warn about disabled lock debugging
[linux-2.6-block.git] / kernel / lockdep.c
CommitLineData
fbb9ce95
IM
1/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
4b32d0a4
PZ
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
fbb9ce95
IM
10 *
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
13 *
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
17 *
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
20 *
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
24 *
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
27 */
a5e25883 28#define DISABLE_BRANCH_PROFILING
fbb9ce95
IM
29#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/delay.h>
32#include <linux/module.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/spinlock.h>
36#include <linux/kallsyms.h>
37#include <linux/interrupt.h>
38#include <linux/stacktrace.h>
39#include <linux/debug_locks.h>
40#include <linux/irqflags.h>
99de055a 41#include <linux/utsname.h>
4b32d0a4 42#include <linux/hash.h>
81d68a96 43#include <linux/ftrace.h>
b4b136f4 44#include <linux/stringify.h>
fbb9ce95
IM
45
46#include <asm/sections.h>
47
48#include "lockdep_internals.h"
49
f20786ff
PZ
50#ifdef CONFIG_PROVE_LOCKING
51int prove_locking = 1;
52module_param(prove_locking, int, 0644);
53#else
54#define prove_locking 0
55#endif
56
57#ifdef CONFIG_LOCK_STAT
58int lock_stat = 1;
59module_param(lock_stat, int, 0644);
60#else
61#define lock_stat 0
62#endif
63
fbb9ce95 64/*
74c383f1
IM
65 * lockdep_lock: protects the lockdep graph, the hashes and the
66 * class/list/hash allocators.
fbb9ce95
IM
67 *
68 * This is one of the rare exceptions where it's justified
69 * to use a raw spinlock - we really dont want the spinlock
74c383f1 70 * code to recurse back into the lockdep code...
fbb9ce95 71 */
74c383f1
IM
72static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
73
74static int graph_lock(void)
75{
76 __raw_spin_lock(&lockdep_lock);
77 /*
78 * Make sure that if another CPU detected a bug while
79 * walking the graph we dont change it (while the other
80 * CPU is busy printing out stuff with the graph lock
81 * dropped already)
82 */
83 if (!debug_locks) {
84 __raw_spin_unlock(&lockdep_lock);
85 return 0;
86 }
bb065afb
SR
87 /* prevent any recursions within lockdep from causing deadlocks */
88 current->lockdep_recursion++;
74c383f1
IM
89 return 1;
90}
91
92static inline int graph_unlock(void)
93{
381a2292
JP
94 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
95 return DEBUG_LOCKS_WARN_ON(1);
96
bb065afb 97 current->lockdep_recursion--;
74c383f1
IM
98 __raw_spin_unlock(&lockdep_lock);
99 return 0;
100}
101
102/*
103 * Turn lock debugging off and return with 0 if it was off already,
104 * and also release the graph lock:
105 */
106static inline int debug_locks_off_graph_unlock(void)
107{
108 int ret = debug_locks_off();
109
110 __raw_spin_unlock(&lockdep_lock);
111
112 return ret;
113}
fbb9ce95
IM
114
115static int lockdep_initialized;
116
117unsigned long nr_list_entries;
118static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
119
fbb9ce95
IM
120/*
121 * All data structures here are protected by the global debug_lock.
122 *
123 * Mutex key structs only get allocated, once during bootup, and never
124 * get freed - this significantly simplifies the debugging code.
125 */
126unsigned long nr_lock_classes;
127static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
128
f82b217e
DJ
129static inline struct lock_class *hlock_class(struct held_lock *hlock)
130{
131 if (!hlock->class_idx) {
132 DEBUG_LOCKS_WARN_ON(1);
133 return NULL;
134 }
135 return lock_classes + hlock->class_idx - 1;
136}
137
f20786ff
PZ
138#ifdef CONFIG_LOCK_STAT
139static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
140
c7e78cff 141static int lock_point(unsigned long points[], unsigned long ip)
f20786ff
PZ
142{
143 int i;
144
c7e78cff
PZ
145 for (i = 0; i < LOCKSTAT_POINTS; i++) {
146 if (points[i] == 0) {
147 points[i] = ip;
f20786ff
PZ
148 break;
149 }
c7e78cff 150 if (points[i] == ip)
f20786ff
PZ
151 break;
152 }
153
154 return i;
155}
156
157static void lock_time_inc(struct lock_time *lt, s64 time)
158{
159 if (time > lt->max)
160 lt->max = time;
161
162 if (time < lt->min || !lt->min)
163 lt->min = time;
164
165 lt->total += time;
166 lt->nr++;
167}
168
c46261de
PZ
169static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
170{
171 dst->min += src->min;
172 dst->max += src->max;
173 dst->total += src->total;
174 dst->nr += src->nr;
175}
176
177struct lock_class_stats lock_stats(struct lock_class *class)
178{
179 struct lock_class_stats stats;
180 int cpu, i;
181
182 memset(&stats, 0, sizeof(struct lock_class_stats));
183 for_each_possible_cpu(cpu) {
184 struct lock_class_stats *pcs =
185 &per_cpu(lock_stats, cpu)[class - lock_classes];
186
187 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
188 stats.contention_point[i] += pcs->contention_point[i];
189
c7e78cff
PZ
190 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
191 stats.contending_point[i] += pcs->contending_point[i];
192
c46261de
PZ
193 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
194 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
195
196 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
197 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
96645678
PZ
198
199 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
200 stats.bounces[i] += pcs->bounces[i];
c46261de
PZ
201 }
202
203 return stats;
204}
205
206void clear_lock_stats(struct lock_class *class)
207{
208 int cpu;
209
210 for_each_possible_cpu(cpu) {
211 struct lock_class_stats *cpu_stats =
212 &per_cpu(lock_stats, cpu)[class - lock_classes];
213
214 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
215 }
216 memset(class->contention_point, 0, sizeof(class->contention_point));
c7e78cff 217 memset(class->contending_point, 0, sizeof(class->contending_point));
c46261de
PZ
218}
219
f20786ff
PZ
220static struct lock_class_stats *get_lock_stats(struct lock_class *class)
221{
222 return &get_cpu_var(lock_stats)[class - lock_classes];
223}
224
225static void put_lock_stats(struct lock_class_stats *stats)
226{
227 put_cpu_var(lock_stats);
228}
229
230static void lock_release_holdtime(struct held_lock *hlock)
231{
232 struct lock_class_stats *stats;
233 s64 holdtime;
234
235 if (!lock_stat)
236 return;
237
238 holdtime = sched_clock() - hlock->holdtime_stamp;
239
f82b217e 240 stats = get_lock_stats(hlock_class(hlock));
f20786ff
PZ
241 if (hlock->read)
242 lock_time_inc(&stats->read_holdtime, holdtime);
243 else
244 lock_time_inc(&stats->write_holdtime, holdtime);
245 put_lock_stats(stats);
246}
247#else
248static inline void lock_release_holdtime(struct held_lock *hlock)
249{
250}
251#endif
252
fbb9ce95
IM
253/*
254 * We keep a global list of all lock classes. The list only grows,
255 * never shrinks. The list is only accessed with the lockdep
256 * spinlock lock held.
257 */
258LIST_HEAD(all_lock_classes);
259
260/*
261 * The lockdep classes are in a hash-table as well, for fast lookup:
262 */
263#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
264#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
4b32d0a4 265#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
fbb9ce95
IM
266#define classhashentry(key) (classhash_table + __classhashfn((key)))
267
268static struct list_head classhash_table[CLASSHASH_SIZE];
269
fbb9ce95
IM
270/*
271 * We put the lock dependency chains into a hash-table as well, to cache
272 * their existence:
273 */
274#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
275#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
4b32d0a4 276#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
fbb9ce95
IM
277#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
278
279static struct list_head chainhash_table[CHAINHASH_SIZE];
280
281/*
282 * The hash key of the lock dependency chains is a hash itself too:
283 * it's a hash of all locks taken up to that lock, including that lock.
284 * It's a 64-bit hash, because it's important for the keys to be
285 * unique.
286 */
287#define iterate_chain_key(key1, key2) \
03cbc358
IM
288 (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
289 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
fbb9ce95
IM
290 (key2))
291
1d09daa5 292void lockdep_off(void)
fbb9ce95
IM
293{
294 current->lockdep_recursion++;
295}
fbb9ce95
IM
296EXPORT_SYMBOL(lockdep_off);
297
1d09daa5 298void lockdep_on(void)
fbb9ce95
IM
299{
300 current->lockdep_recursion--;
301}
fbb9ce95
IM
302EXPORT_SYMBOL(lockdep_on);
303
fbb9ce95
IM
304/*
305 * Debugging switches:
306 */
307
308#define VERBOSE 0
33e94e96 309#define VERY_VERBOSE 0
fbb9ce95
IM
310
311#if VERBOSE
312# define HARDIRQ_VERBOSE 1
313# define SOFTIRQ_VERBOSE 1
cf40bd16 314# define RECLAIM_VERBOSE 1
fbb9ce95
IM
315#else
316# define HARDIRQ_VERBOSE 0
317# define SOFTIRQ_VERBOSE 0
cf40bd16 318# define RECLAIM_VERBOSE 0
fbb9ce95
IM
319#endif
320
cf40bd16 321#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
fbb9ce95
IM
322/*
323 * Quick filtering for interesting events:
324 */
325static int class_filter(struct lock_class *class)
326{
f9829cce
AK
327#if 0
328 /* Example */
fbb9ce95 329 if (class->name_version == 1 &&
f9829cce 330 !strcmp(class->name, "lockname"))
fbb9ce95
IM
331 return 1;
332 if (class->name_version == 1 &&
f9829cce 333 !strcmp(class->name, "&struct->lockfield"))
fbb9ce95 334 return 1;
f9829cce 335#endif
a6640897
IM
336 /* Filter everything else. 1 would be to allow everything else */
337 return 0;
fbb9ce95
IM
338}
339#endif
340
341static int verbose(struct lock_class *class)
342{
343#if VERBOSE
344 return class_filter(class);
345#endif
346 return 0;
347}
348
fbb9ce95
IM
349/*
350 * Stack-trace: tightly packed array of stack backtrace
74c383f1 351 * addresses. Protected by the graph_lock.
fbb9ce95
IM
352 */
353unsigned long nr_stack_trace_entries;
354static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
355
356static int save_trace(struct stack_trace *trace)
357{
358 trace->nr_entries = 0;
359 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
360 trace->entries = stack_trace + nr_stack_trace_entries;
361
5a1b3999 362 trace->skip = 3;
5a1b3999 363
ab1b6f03 364 save_stack_trace(trace);
fbb9ce95
IM
365
366 trace->max_entries = trace->nr_entries;
367
368 nr_stack_trace_entries += trace->nr_entries;
fbb9ce95
IM
369
370 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
74c383f1
IM
371 if (!debug_locks_off_graph_unlock())
372 return 0;
373
374 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
375 printk("turning off the locking correctness validator.\n");
376 dump_stack();
377
fbb9ce95
IM
378 return 0;
379 }
380
381 return 1;
382}
383
384unsigned int nr_hardirq_chains;
385unsigned int nr_softirq_chains;
386unsigned int nr_process_chains;
387unsigned int max_lockdep_depth;
388unsigned int max_recursion_depth;
389
419ca3f1
DM
390static unsigned int lockdep_dependency_gen_id;
391
392static bool lockdep_dependency_visit(struct lock_class *source,
393 unsigned int depth)
394{
395 if (!depth)
396 lockdep_dependency_gen_id++;
397 if (source->dep_gen_id == lockdep_dependency_gen_id)
398 return true;
399 source->dep_gen_id = lockdep_dependency_gen_id;
400 return false;
401}
402
fbb9ce95
IM
403#ifdef CONFIG_DEBUG_LOCKDEP
404/*
405 * We cannot printk in early bootup code. Not even early_printk()
406 * might work. So we mark any initialization errors and printk
407 * about it later on, in lockdep_info().
408 */
409static int lockdep_init_error;
c71063c9
JB
410static unsigned long lockdep_init_trace_data[20];
411static struct stack_trace lockdep_init_trace = {
412 .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
413 .entries = lockdep_init_trace_data,
414};
fbb9ce95
IM
415
416/*
417 * Various lockdep statistics:
418 */
419atomic_t chain_lookup_hits;
420atomic_t chain_lookup_misses;
421atomic_t hardirqs_on_events;
422atomic_t hardirqs_off_events;
423atomic_t redundant_hardirqs_on;
424atomic_t redundant_hardirqs_off;
425atomic_t softirqs_on_events;
426atomic_t softirqs_off_events;
427atomic_t redundant_softirqs_on;
428atomic_t redundant_softirqs_off;
429atomic_t nr_unused_locks;
430atomic_t nr_cyclic_checks;
431atomic_t nr_cyclic_check_recursions;
432atomic_t nr_find_usage_forwards_checks;
433atomic_t nr_find_usage_forwards_recursions;
434atomic_t nr_find_usage_backwards_checks;
435atomic_t nr_find_usage_backwards_recursions;
436# define debug_atomic_inc(ptr) atomic_inc(ptr)
437# define debug_atomic_dec(ptr) atomic_dec(ptr)
438# define debug_atomic_read(ptr) atomic_read(ptr)
439#else
440# define debug_atomic_inc(ptr) do { } while (0)
441# define debug_atomic_dec(ptr) do { } while (0)
442# define debug_atomic_read(ptr) 0
443#endif
444
445/*
446 * Locking printouts:
447 */
448
fabe9c42 449#define __USAGE(__STATE) \
b4b136f4
PZ
450 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
451 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
452 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
453 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
fabe9c42 454
fbb9ce95
IM
455static const char *usage_str[] =
456{
fabe9c42
PZ
457#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
458#include "lockdep_states.h"
459#undef LOCKDEP_STATE
460 [LOCK_USED] = "INITIAL USE",
fbb9ce95
IM
461};
462
463const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
464{
ffb45122 465 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
fbb9ce95
IM
466}
467
3ff176ca 468static inline unsigned long lock_flag(enum lock_usage_bit bit)
fbb9ce95 469{
3ff176ca
PZ
470 return 1UL << bit;
471}
fbb9ce95 472
3ff176ca
PZ
473static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
474{
475 char c = '.';
476
477 if (class->usage_mask & lock_flag(bit + 2))
478 c = '+';
479 if (class->usage_mask & lock_flag(bit)) {
480 c = '-';
481 if (class->usage_mask & lock_flag(bit + 2))
482 c = '?';
fbb9ce95
IM
483 }
484
3ff176ca
PZ
485 return c;
486}
cf40bd16 487
f510b233 488void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
3ff176ca 489{
f510b233 490 int i = 0;
cf40bd16 491
f510b233
PZ
492#define LOCKDEP_STATE(__STATE) \
493 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
494 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
495#include "lockdep_states.h"
496#undef LOCKDEP_STATE
497
498 usage[i] = '\0';
fbb9ce95
IM
499}
500
501static void print_lock_name(struct lock_class *class)
502{
f510b233 503 char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
fbb9ce95
IM
504 const char *name;
505
f510b233 506 get_usage_chars(class, usage);
fbb9ce95
IM
507
508 name = class->name;
509 if (!name) {
510 name = __get_key_name(class->key, str);
511 printk(" (%s", name);
512 } else {
513 printk(" (%s", name);
514 if (class->name_version > 1)
515 printk("#%d", class->name_version);
516 if (class->subclass)
517 printk("/%d", class->subclass);
518 }
f510b233 519 printk("){%s}", usage);
fbb9ce95
IM
520}
521
522static void print_lockdep_cache(struct lockdep_map *lock)
523{
524 const char *name;
9281acea 525 char str[KSYM_NAME_LEN];
fbb9ce95
IM
526
527 name = lock->name;
528 if (!name)
529 name = __get_key_name(lock->key->subkeys, str);
530
531 printk("%s", name);
532}
533
534static void print_lock(struct held_lock *hlock)
535{
f82b217e 536 print_lock_name(hlock_class(hlock));
fbb9ce95
IM
537 printk(", at: ");
538 print_ip_sym(hlock->acquire_ip);
539}
540
541static void lockdep_print_held_locks(struct task_struct *curr)
542{
543 int i, depth = curr->lockdep_depth;
544
545 if (!depth) {
ba25f9dc 546 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
fbb9ce95
IM
547 return;
548 }
549 printk("%d lock%s held by %s/%d:\n",
ba25f9dc 550 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
fbb9ce95
IM
551
552 for (i = 0; i < depth; i++) {
553 printk(" #%d: ", i);
554 print_lock(curr->held_locks + i);
555 }
556}
fbb9ce95
IM
557
558static void print_lock_class_header(struct lock_class *class, int depth)
559{
560 int bit;
561
f9829cce 562 printk("%*s->", depth, "");
fbb9ce95
IM
563 print_lock_name(class);
564 printk(" ops: %lu", class->ops);
565 printk(" {\n");
566
567 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
568 if (class->usage_mask & (1 << bit)) {
569 int len = depth;
570
f9829cce 571 len += printk("%*s %s", depth, "", usage_str[bit]);
fbb9ce95
IM
572 len += printk(" at:\n");
573 print_stack_trace(class->usage_traces + bit, len);
574 }
575 }
f9829cce 576 printk("%*s }\n", depth, "");
fbb9ce95 577
f9829cce 578 printk("%*s ... key at: ",depth,"");
fbb9ce95
IM
579 print_ip_sym((unsigned long)class->key);
580}
581
582/*
583 * printk all lock dependencies starting at <entry>:
584 */
7807fafa
IM
585static void __used
586print_lock_dependencies(struct lock_class *class, int depth)
fbb9ce95
IM
587{
588 struct lock_list *entry;
589
419ca3f1
DM
590 if (lockdep_dependency_visit(class, depth))
591 return;
592
fbb9ce95
IM
593 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
594 return;
595
596 print_lock_class_header(class, depth);
597
598 list_for_each_entry(entry, &class->locks_after, entry) {
b23984d0
JP
599 if (DEBUG_LOCKS_WARN_ON(!entry->class))
600 return;
601
fbb9ce95
IM
602 print_lock_dependencies(entry->class, depth + 1);
603
f9829cce 604 printk("%*s ... acquired at:\n",depth,"");
fbb9ce95
IM
605 print_stack_trace(&entry->trace, 2);
606 printk("\n");
607 }
608}
609
8e18257d
PZ
610static void print_kernel_version(void)
611{
612 printk("%s %.*s\n", init_utsname()->release,
613 (int)strcspn(init_utsname()->version, " "),
614 init_utsname()->version);
615}
616
617static int very_verbose(struct lock_class *class)
618{
619#if VERY_VERBOSE
620 return class_filter(class);
621#endif
622 return 0;
623}
624
fbb9ce95 625/*
8e18257d 626 * Is this the address of a static object:
fbb9ce95 627 */
8e18257d 628static int static_obj(void *obj)
fbb9ce95 629{
8e18257d
PZ
630 unsigned long start = (unsigned long) &_stext,
631 end = (unsigned long) &_end,
632 addr = (unsigned long) obj;
633#ifdef CONFIG_SMP
634 int i;
635#endif
636
fbb9ce95 637 /*
8e18257d 638 * static variable?
fbb9ce95 639 */
8e18257d
PZ
640 if ((addr >= start) && (addr < end))
641 return 1;
fbb9ce95 642
8e18257d 643#ifdef CONFIG_SMP
fbb9ce95 644 /*
8e18257d 645 * percpu var?
fbb9ce95 646 */
8e18257d
PZ
647 for_each_possible_cpu(i) {
648 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
649 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
650 + per_cpu_offset(i);
fbb9ce95 651
8e18257d
PZ
652 if ((addr >= start) && (addr < end))
653 return 1;
654 }
ca58abcb 655#endif
fbb9ce95 656
8e18257d
PZ
657 /*
658 * module var?
659 */
660 return is_module_address(addr);
99de055a
DJ
661}
662
fbb9ce95 663/*
8e18257d
PZ
664 * To make lock name printouts unique, we calculate a unique
665 * class->name_version generation counter:
fbb9ce95 666 */
8e18257d 667static int count_matching_names(struct lock_class *new_class)
fbb9ce95 668{
8e18257d
PZ
669 struct lock_class *class;
670 int count = 0;
fbb9ce95 671
8e18257d 672 if (!new_class->name)
fbb9ce95
IM
673 return 0;
674
8e18257d
PZ
675 list_for_each_entry(class, &all_lock_classes, lock_entry) {
676 if (new_class->key - new_class->subclass == class->key)
677 return class->name_version;
678 if (class->name && !strcmp(class->name, new_class->name))
679 count = max(count, class->name_version);
680 }
fbb9ce95 681
8e18257d 682 return count + 1;
fbb9ce95
IM
683}
684
8e18257d
PZ
685/*
686 * Register a lock's class in the hash-table, if the class is not present
687 * yet. Otherwise we look it up. We cache the result in the lock object
688 * itself, so actual lookup of the hash should be once per lock object.
689 */
690static inline struct lock_class *
691look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
fbb9ce95 692{
8e18257d
PZ
693 struct lockdep_subclass_key *key;
694 struct list_head *hash_head;
695 struct lock_class *class;
fbb9ce95 696
8e18257d
PZ
697#ifdef CONFIG_DEBUG_LOCKDEP
698 /*
699 * If the architecture calls into lockdep before initializing
700 * the hashes then we'll warn about it later. (we cannot printk
701 * right now)
702 */
703 if (unlikely(!lockdep_initialized)) {
704 lockdep_init();
705 lockdep_init_error = 1;
c71063c9 706 save_stack_trace(&lockdep_init_trace);
8e18257d
PZ
707 }
708#endif
fbb9ce95 709
8e18257d
PZ
710 /*
711 * Static locks do not have their class-keys yet - for them the key
712 * is the lock object itself:
713 */
714 if (unlikely(!lock->key))
715 lock->key = (void *)lock;
fbb9ce95 716
8e18257d
PZ
717 /*
718 * NOTE: the class-key must be unique. For dynamic locks, a static
719 * lock_class_key variable is passed in through the mutex_init()
720 * (or spin_lock_init()) call - which acts as the key. For static
721 * locks we use the lock object itself as the key.
722 */
4b32d0a4
PZ
723 BUILD_BUG_ON(sizeof(struct lock_class_key) >
724 sizeof(struct lockdep_map));
fbb9ce95 725
8e18257d 726 key = lock->key->subkeys + subclass;
ca268c69 727
8e18257d 728 hash_head = classhashentry(key);
74c383f1 729
8e18257d
PZ
730 /*
731 * We can walk the hash lockfree, because the hash only
732 * grows, and we are careful when adding entries to the end:
733 */
4b32d0a4
PZ
734 list_for_each_entry(class, hash_head, hash_entry) {
735 if (class->key == key) {
736 WARN_ON_ONCE(class->name != lock->name);
8e18257d 737 return class;
4b32d0a4
PZ
738 }
739 }
fbb9ce95 740
8e18257d 741 return NULL;
fbb9ce95
IM
742}
743
744/*
8e18257d
PZ
745 * Register a lock's class in the hash-table, if the class is not present
746 * yet. Otherwise we look it up. We cache the result in the lock object
747 * itself, so actual lookup of the hash should be once per lock object.
fbb9ce95 748 */
8e18257d
PZ
749static inline struct lock_class *
750register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
fbb9ce95 751{
8e18257d
PZ
752 struct lockdep_subclass_key *key;
753 struct list_head *hash_head;
754 struct lock_class *class;
755 unsigned long flags;
756
757 class = look_up_lock_class(lock, subclass);
758 if (likely(class))
759 return class;
760
761 /*
762 * Debug-check: all keys must be persistent!
763 */
764 if (!static_obj(lock->key)) {
765 debug_locks_off();
766 printk("INFO: trying to register non-static key.\n");
767 printk("the code is fine but needs lockdep annotation.\n");
768 printk("turning off the locking correctness validator.\n");
769 dump_stack();
770
771 return NULL;
772 }
773
774 key = lock->key->subkeys + subclass;
775 hash_head = classhashentry(key);
776
777 raw_local_irq_save(flags);
778 if (!graph_lock()) {
779 raw_local_irq_restore(flags);
780 return NULL;
781 }
782 /*
783 * We have to do the hash-walk again, to avoid races
784 * with another CPU:
785 */
786 list_for_each_entry(class, hash_head, hash_entry)
787 if (class->key == key)
788 goto out_unlock_set;
789 /*
790 * Allocate a new key from the static array, and add it to
791 * the hash:
792 */
793 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
794 if (!debug_locks_off_graph_unlock()) {
795 raw_local_irq_restore(flags);
796 return NULL;
797 }
798 raw_local_irq_restore(flags);
799
800 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
801 printk("turning off the locking correctness validator.\n");
802 return NULL;
803 }
804 class = lock_classes + nr_lock_classes++;
805 debug_atomic_inc(&nr_unused_locks);
806 class->key = key;
807 class->name = lock->name;
808 class->subclass = subclass;
809 INIT_LIST_HEAD(&class->lock_entry);
810 INIT_LIST_HEAD(&class->locks_before);
811 INIT_LIST_HEAD(&class->locks_after);
812 class->name_version = count_matching_names(class);
813 /*
814 * We use RCU's safe list-add method to make
815 * parallel walking of the hash-list safe:
816 */
817 list_add_tail_rcu(&class->hash_entry, hash_head);
1481197b
DF
818 /*
819 * Add it to the global list of classes:
820 */
821 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
8e18257d
PZ
822
823 if (verbose(class)) {
824 graph_unlock();
825 raw_local_irq_restore(flags);
826
827 printk("\nnew class %p: %s", class->key, class->name);
828 if (class->name_version > 1)
829 printk("#%d", class->name_version);
830 printk("\n");
831 dump_stack();
832
833 raw_local_irq_save(flags);
834 if (!graph_lock()) {
835 raw_local_irq_restore(flags);
836 return NULL;
837 }
838 }
839out_unlock_set:
840 graph_unlock();
841 raw_local_irq_restore(flags);
842
843 if (!subclass || force)
844 lock->class_cache = class;
845
846 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
847 return NULL;
848
849 return class;
850}
851
852#ifdef CONFIG_PROVE_LOCKING
853/*
854 * Allocate a lockdep entry. (assumes the graph_lock held, returns
855 * with NULL on failure)
856 */
857static struct lock_list *alloc_list_entry(void)
858{
859 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
860 if (!debug_locks_off_graph_unlock())
861 return NULL;
862
863 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
864 printk("turning off the locking correctness validator.\n");
865 return NULL;
866 }
867 return list_entries + nr_list_entries++;
868}
869
870/*
871 * Add a new dependency to the head of the list:
872 */
873static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
874 struct list_head *head, unsigned long ip, int distance)
875{
876 struct lock_list *entry;
877 /*
878 * Lock not present yet - get a new dependency struct and
879 * add it to the list:
880 */
881 entry = alloc_list_entry();
882 if (!entry)
883 return 0;
884
8e18257d
PZ
885 if (!save_trace(&entry->trace))
886 return 0;
887
74870172
ZY
888 entry->class = this;
889 entry->distance = distance;
8e18257d
PZ
890 /*
891 * Since we never remove from the dependency list, the list can
892 * be walked lockless by other CPUs, it's only allocation
893 * that must be protected by the spinlock. But this also means
894 * we must make new entries visible only once writes to the
895 * entry become visible - hence the RCU op:
896 */
897 list_add_tail_rcu(&entry->entry, head);
898
899 return 1;
900}
901
902/*
903 * Recursive, forwards-direction lock-dependency checking, used for
904 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
905 * checking.
906 *
907 * (to keep the stackframe of the recursive functions small we
908 * use these global variables, and we also mark various helper
909 * functions as noinline.)
910 */
911static struct held_lock *check_source, *check_target;
912
913/*
914 * Print a dependency chain entry (this is only done when a deadlock
915 * has been detected):
916 */
917static noinline int
918print_circular_bug_entry(struct lock_list *target, unsigned int depth)
919{
920 if (debug_locks_silent)
921 return 0;
922 printk("\n-> #%u", depth);
923 print_lock_name(target->class);
924 printk(":\n");
925 print_stack_trace(&target->trace, 6);
926
927 return 0;
928}
929
930/*
931 * When a circular dependency is detected, print the
932 * header first:
933 */
934static noinline int
935print_circular_bug_header(struct lock_list *entry, unsigned int depth)
936{
937 struct task_struct *curr = current;
938
939 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
940 return 0;
941
942 printk("\n=======================================================\n");
943 printk( "[ INFO: possible circular locking dependency detected ]\n");
944 print_kernel_version();
945 printk( "-------------------------------------------------------\n");
946 printk("%s/%d is trying to acquire lock:\n",
ba25f9dc 947 curr->comm, task_pid_nr(curr));
8e18257d
PZ
948 print_lock(check_source);
949 printk("\nbut task is already holding lock:\n");
950 print_lock(check_target);
951 printk("\nwhich lock already depends on the new lock.\n\n");
952 printk("\nthe existing dependency chain (in reverse order) is:\n");
953
954 print_circular_bug_entry(entry, depth);
955
956 return 0;
957}
958
959static noinline int print_circular_bug_tail(void)
960{
961 struct task_struct *curr = current;
962 struct lock_list this;
963
964 if (debug_locks_silent)
965 return 0;
966
f82b217e 967 this.class = hlock_class(check_source);
8e18257d
PZ
968 if (!save_trace(&this.trace))
969 return 0;
970
971 print_circular_bug_entry(&this, 0);
972
973 printk("\nother info that might help us debug this:\n\n");
974 lockdep_print_held_locks(curr);
975
976 printk("\nstack backtrace:\n");
977 dump_stack();
978
979 return 0;
980}
981
982#define RECURSION_LIMIT 40
983
984static int noinline print_infinite_recursion_bug(void)
985{
986 if (!debug_locks_off_graph_unlock())
987 return 0;
988
989 WARN_ON(1);
990
991 return 0;
992}
993
419ca3f1
DM
994unsigned long __lockdep_count_forward_deps(struct lock_class *class,
995 unsigned int depth)
996{
997 struct lock_list *entry;
998 unsigned long ret = 1;
999
1000 if (lockdep_dependency_visit(class, depth))
1001 return 0;
1002
1003 /*
1004 * Recurse this class's dependency list:
1005 */
1006 list_for_each_entry(entry, &class->locks_after, entry)
1007 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1008
1009 return ret;
1010}
1011
1012unsigned long lockdep_count_forward_deps(struct lock_class *class)
1013{
1014 unsigned long ret, flags;
1015
1016 local_irq_save(flags);
1017 __raw_spin_lock(&lockdep_lock);
1018 ret = __lockdep_count_forward_deps(class, 0);
1019 __raw_spin_unlock(&lockdep_lock);
1020 local_irq_restore(flags);
1021
1022 return ret;
1023}
1024
1025unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1026 unsigned int depth)
1027{
1028 struct lock_list *entry;
1029 unsigned long ret = 1;
1030
1031 if (lockdep_dependency_visit(class, depth))
1032 return 0;
1033 /*
1034 * Recurse this class's dependency list:
1035 */
1036 list_for_each_entry(entry, &class->locks_before, entry)
1037 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1038
1039 return ret;
1040}
1041
1042unsigned long lockdep_count_backward_deps(struct lock_class *class)
1043{
1044 unsigned long ret, flags;
1045
1046 local_irq_save(flags);
1047 __raw_spin_lock(&lockdep_lock);
1048 ret = __lockdep_count_backward_deps(class, 0);
1049 __raw_spin_unlock(&lockdep_lock);
1050 local_irq_restore(flags);
1051
1052 return ret;
1053}
1054
8e18257d
PZ
1055/*
1056 * Prove that the dependency graph starting at <entry> can not
1057 * lead to <target>. Print an error and return 0 if it does.
1058 */
1059static noinline int
1060check_noncircular(struct lock_class *source, unsigned int depth)
1061{
1062 struct lock_list *entry;
1063
419ca3f1
DM
1064 if (lockdep_dependency_visit(source, depth))
1065 return 1;
1066
8e18257d
PZ
1067 debug_atomic_inc(&nr_cyclic_check_recursions);
1068 if (depth > max_recursion_depth)
fbb9ce95 1069 max_recursion_depth = depth;
ca268c69 1070 if (depth >= RECURSION_LIMIT)
fbb9ce95
IM
1071 return print_infinite_recursion_bug();
1072 /*
1073 * Check this lock's dependency list:
1074 */
1075 list_for_each_entry(entry, &source->locks_after, entry) {
f82b217e 1076 if (entry->class == hlock_class(check_target))
fbb9ce95
IM
1077 return print_circular_bug_header(entry, depth+1);
1078 debug_atomic_inc(&nr_cyclic_checks);
1079 if (!check_noncircular(entry->class, depth+1))
1080 return print_circular_bug_entry(entry, depth+1);
1081 }
1082 return 1;
1083}
1084
81d68a96 1085#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
fbb9ce95
IM
1086/*
1087 * Forwards and backwards subgraph searching, for the purposes of
1088 * proving that two subgraphs can be connected by a new dependency
1089 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1090 */
1091static enum lock_usage_bit find_usage_bit;
1092static struct lock_class *forwards_match, *backwards_match;
1093
1094/*
1095 * Find a node in the forwards-direction dependency sub-graph starting
1096 * at <source> that matches <find_usage_bit>.
1097 *
1098 * Return 2 if such a node exists in the subgraph, and put that node
1099 * into <forwards_match>.
1100 *
1101 * Return 1 otherwise and keep <forwards_match> unchanged.
1102 * Return 0 on error.
1103 */
1104static noinline int
1105find_usage_forwards(struct lock_class *source, unsigned int depth)
1106{
1107 struct lock_list *entry;
1108 int ret;
1109
419ca3f1
DM
1110 if (lockdep_dependency_visit(source, depth))
1111 return 1;
1112
fbb9ce95
IM
1113 if (depth > max_recursion_depth)
1114 max_recursion_depth = depth;
ca268c69 1115 if (depth >= RECURSION_LIMIT)
fbb9ce95
IM
1116 return print_infinite_recursion_bug();
1117
1118 debug_atomic_inc(&nr_find_usage_forwards_checks);
1119 if (source->usage_mask & (1 << find_usage_bit)) {
1120 forwards_match = source;
1121 return 2;
1122 }
1123
1124 /*
1125 * Check this lock's dependency list:
1126 */
1127 list_for_each_entry(entry, &source->locks_after, entry) {
1128 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1129 ret = find_usage_forwards(entry->class, depth+1);
1130 if (ret == 2 || ret == 0)
1131 return ret;
1132 }
1133 return 1;
1134}
1135
1136/*
1137 * Find a node in the backwards-direction dependency sub-graph starting
1138 * at <source> that matches <find_usage_bit>.
1139 *
1140 * Return 2 if such a node exists in the subgraph, and put that node
1141 * into <backwards_match>.
1142 *
1143 * Return 1 otherwise and keep <backwards_match> unchanged.
1144 * Return 0 on error.
1145 */
1d09daa5 1146static noinline int
fbb9ce95
IM
1147find_usage_backwards(struct lock_class *source, unsigned int depth)
1148{
1149 struct lock_list *entry;
1150 int ret;
1151
419ca3f1
DM
1152 if (lockdep_dependency_visit(source, depth))
1153 return 1;
1154
381a2292
JP
1155 if (!__raw_spin_is_locked(&lockdep_lock))
1156 return DEBUG_LOCKS_WARN_ON(1);
1157
fbb9ce95
IM
1158 if (depth > max_recursion_depth)
1159 max_recursion_depth = depth;
ca268c69 1160 if (depth >= RECURSION_LIMIT)
fbb9ce95
IM
1161 return print_infinite_recursion_bug();
1162
1163 debug_atomic_inc(&nr_find_usage_backwards_checks);
1164 if (source->usage_mask & (1 << find_usage_bit)) {
1165 backwards_match = source;
1166 return 2;
1167 }
1168
f82b217e
DJ
1169 if (!source && debug_locks_off_graph_unlock()) {
1170 WARN_ON(1);
1171 return 0;
1172 }
1173
fbb9ce95
IM
1174 /*
1175 * Check this lock's dependency list:
1176 */
1177 list_for_each_entry(entry, &source->locks_before, entry) {
1178 debug_atomic_inc(&nr_find_usage_backwards_recursions);
1179 ret = find_usage_backwards(entry->class, depth+1);
1180 if (ret == 2 || ret == 0)
1181 return ret;
1182 }
1183 return 1;
1184}
1185
1186static int
1187print_bad_irq_dependency(struct task_struct *curr,
1188 struct held_lock *prev,
1189 struct held_lock *next,
1190 enum lock_usage_bit bit1,
1191 enum lock_usage_bit bit2,
1192 const char *irqclass)
1193{
74c383f1 1194 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
fbb9ce95
IM
1195 return 0;
1196
1197 printk("\n======================================================\n");
1198 printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1199 irqclass, irqclass);
99de055a 1200 print_kernel_version();
fbb9ce95
IM
1201 printk( "------------------------------------------------------\n");
1202 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
ba25f9dc 1203 curr->comm, task_pid_nr(curr),
fbb9ce95
IM
1204 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1205 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1206 curr->hardirqs_enabled,
1207 curr->softirqs_enabled);
1208 print_lock(next);
1209
1210 printk("\nand this task is already holding:\n");
1211 print_lock(prev);
1212 printk("which would create a new lock dependency:\n");
f82b217e 1213 print_lock_name(hlock_class(prev));
fbb9ce95 1214 printk(" ->");
f82b217e 1215 print_lock_name(hlock_class(next));
fbb9ce95
IM
1216 printk("\n");
1217
1218 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1219 irqclass);
1220 print_lock_name(backwards_match);
1221 printk("\n... which became %s-irq-safe at:\n", irqclass);
1222
1223 print_stack_trace(backwards_match->usage_traces + bit1, 1);
1224
1225 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1226 print_lock_name(forwards_match);
1227 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1228 printk("...");
1229
1230 print_stack_trace(forwards_match->usage_traces + bit2, 1);
1231
1232 printk("\nother info that might help us debug this:\n\n");
1233 lockdep_print_held_locks(curr);
1234
1235 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1236 print_lock_dependencies(backwards_match, 0);
1237
1238 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1239 print_lock_dependencies(forwards_match, 0);
1240
1241 printk("\nstack backtrace:\n");
1242 dump_stack();
1243
1244 return 0;
1245}
1246
1247static int
1248check_usage(struct task_struct *curr, struct held_lock *prev,
1249 struct held_lock *next, enum lock_usage_bit bit_backwards,
1250 enum lock_usage_bit bit_forwards, const char *irqclass)
1251{
1252 int ret;
1253
1254 find_usage_bit = bit_backwards;
1255 /* fills in <backwards_match> */
f82b217e 1256 ret = find_usage_backwards(hlock_class(prev), 0);
fbb9ce95
IM
1257 if (!ret || ret == 1)
1258 return ret;
1259
1260 find_usage_bit = bit_forwards;
f82b217e 1261 ret = find_usage_forwards(hlock_class(next), 0);
fbb9ce95
IM
1262 if (!ret || ret == 1)
1263 return ret;
1264 /* ret == 2 */
1265 return print_bad_irq_dependency(curr, prev, next,
1266 bit_backwards, bit_forwards, irqclass);
1267}
1268
4f367d8a
PZ
1269static const char *state_names[] = {
1270#define LOCKDEP_STATE(__STATE) \
b4b136f4 1271 __stringify(__STATE),
4f367d8a
PZ
1272#include "lockdep_states.h"
1273#undef LOCKDEP_STATE
1274};
1275
1276static const char *state_rnames[] = {
1277#define LOCKDEP_STATE(__STATE) \
b4b136f4 1278 __stringify(__STATE)"-READ",
4f367d8a
PZ
1279#include "lockdep_states.h"
1280#undef LOCKDEP_STATE
1281};
1282
1283static inline const char *state_name(enum lock_usage_bit bit)
8e18257d 1284{
4f367d8a
PZ
1285 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1286}
8e18257d 1287
4f367d8a
PZ
1288static int exclusive_bit(int new_bit)
1289{
8e18257d 1290 /*
4f367d8a
PZ
1291 * USED_IN
1292 * USED_IN_READ
1293 * ENABLED
1294 * ENABLED_READ
1295 *
1296 * bit 0 - write/read
1297 * bit 1 - used_in/enabled
1298 * bit 2+ state
8e18257d 1299 */
4f367d8a
PZ
1300
1301 int state = new_bit & ~3;
1302 int dir = new_bit & 2;
8e18257d
PZ
1303
1304 /*
4f367d8a 1305 * keep state, bit flip the direction and strip read.
8e18257d 1306 */
4f367d8a
PZ
1307 return state | (dir ^ 2);
1308}
1309
1310static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1311 struct held_lock *next, enum lock_usage_bit bit)
1312{
8e18257d 1313 /*
4f367d8a
PZ
1314 * Prove that the new dependency does not connect a hardirq-safe
1315 * lock with a hardirq-unsafe lock - to achieve this we search
8e18257d
PZ
1316 * the backwards-subgraph starting at <prev>, and the
1317 * forwards-subgraph starting at <next>:
1318 */
4f367d8a
PZ
1319 if (!check_usage(curr, prev, next, bit,
1320 exclusive_bit(bit), state_name(bit)))
8e18257d
PZ
1321 return 0;
1322
4f367d8a
PZ
1323 bit++; /* _READ */
1324
cf40bd16 1325 /*
4f367d8a
PZ
1326 * Prove that the new dependency does not connect a hardirq-safe-read
1327 * lock with a hardirq-unsafe lock - to achieve this we search
cf40bd16
NP
1328 * the backwards-subgraph starting at <prev>, and the
1329 * forwards-subgraph starting at <next>:
1330 */
4f367d8a
PZ
1331 if (!check_usage(curr, prev, next, bit,
1332 exclusive_bit(bit), state_name(bit)))
cf40bd16
NP
1333 return 0;
1334
4f367d8a
PZ
1335 return 1;
1336}
1337
1338static int
1339check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1340 struct held_lock *next)
1341{
1342#define LOCKDEP_STATE(__STATE) \
1343 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
cf40bd16 1344 return 0;
4f367d8a
PZ
1345#include "lockdep_states.h"
1346#undef LOCKDEP_STATE
cf40bd16 1347
8e18257d
PZ
1348 return 1;
1349}
1350
1351static void inc_chains(void)
1352{
1353 if (current->hardirq_context)
1354 nr_hardirq_chains++;
1355 else {
1356 if (current->softirq_context)
1357 nr_softirq_chains++;
1358 else
1359 nr_process_chains++;
1360 }
1361}
1362
1363#else
1364
1365static inline int
1366check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1367 struct held_lock *next)
1368{
1369 return 1;
1370}
1371
1372static inline void inc_chains(void)
1373{
1374 nr_process_chains++;
1375}
1376
fbb9ce95
IM
1377#endif
1378
1379static int
1380print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1381 struct held_lock *next)
1382{
74c383f1 1383 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
fbb9ce95
IM
1384 return 0;
1385
1386 printk("\n=============================================\n");
1387 printk( "[ INFO: possible recursive locking detected ]\n");
99de055a 1388 print_kernel_version();
fbb9ce95
IM
1389 printk( "---------------------------------------------\n");
1390 printk("%s/%d is trying to acquire lock:\n",
ba25f9dc 1391 curr->comm, task_pid_nr(curr));
fbb9ce95
IM
1392 print_lock(next);
1393 printk("\nbut task is already holding lock:\n");
1394 print_lock(prev);
1395
1396 printk("\nother info that might help us debug this:\n");
1397 lockdep_print_held_locks(curr);
1398
1399 printk("\nstack backtrace:\n");
1400 dump_stack();
1401
1402 return 0;
1403}
1404
1405/*
1406 * Check whether we are holding such a class already.
1407 *
1408 * (Note that this has to be done separately, because the graph cannot
1409 * detect such classes of deadlocks.)
1410 *
1411 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1412 */
1413static int
1414check_deadlock(struct task_struct *curr, struct held_lock *next,
1415 struct lockdep_map *next_instance, int read)
1416{
1417 struct held_lock *prev;
7531e2f3 1418 struct held_lock *nest = NULL;
fbb9ce95
IM
1419 int i;
1420
1421 for (i = 0; i < curr->lockdep_depth; i++) {
1422 prev = curr->held_locks + i;
7531e2f3
PZ
1423
1424 if (prev->instance == next->nest_lock)
1425 nest = prev;
1426
f82b217e 1427 if (hlock_class(prev) != hlock_class(next))
fbb9ce95 1428 continue;
7531e2f3 1429
fbb9ce95
IM
1430 /*
1431 * Allow read-after-read recursion of the same
6c9076ec 1432 * lock class (i.e. read_lock(lock)+read_lock(lock)):
fbb9ce95 1433 */
6c9076ec 1434 if ((read == 2) && prev->read)
fbb9ce95 1435 return 2;
7531e2f3
PZ
1436
1437 /*
1438 * We're holding the nest_lock, which serializes this lock's
1439 * nesting behaviour.
1440 */
1441 if (nest)
1442 return 2;
1443
fbb9ce95
IM
1444 return print_deadlock_bug(curr, prev, next);
1445 }
1446 return 1;
1447}
1448
1449/*
1450 * There was a chain-cache miss, and we are about to add a new dependency
1451 * to a previous lock. We recursively validate the following rules:
1452 *
1453 * - would the adding of the <prev> -> <next> dependency create a
1454 * circular dependency in the graph? [== circular deadlock]
1455 *
1456 * - does the new prev->next dependency connect any hardirq-safe lock
1457 * (in the full backwards-subgraph starting at <prev>) with any
1458 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1459 * <next>)? [== illegal lock inversion with hardirq contexts]
1460 *
1461 * - does the new prev->next dependency connect any softirq-safe lock
1462 * (in the full backwards-subgraph starting at <prev>) with any
1463 * softirq-unsafe lock (in the full forwards-subgraph starting at
1464 * <next>)? [== illegal lock inversion with softirq contexts]
1465 *
1466 * any of these scenarios could lead to a deadlock.
1467 *
1468 * Then if all the validations pass, we add the forwards and backwards
1469 * dependency.
1470 */
1471static int
1472check_prev_add(struct task_struct *curr, struct held_lock *prev,
068135e6 1473 struct held_lock *next, int distance)
fbb9ce95
IM
1474{
1475 struct lock_list *entry;
1476 int ret;
1477
1478 /*
1479 * Prove that the new <prev> -> <next> dependency would not
1480 * create a circular dependency in the graph. (We do this by
1481 * forward-recursing into the graph starting at <next>, and
1482 * checking whether we can reach <prev>.)
1483 *
1484 * We are using global variables to control the recursion, to
1485 * keep the stackframe size of the recursive functions low:
1486 */
1487 check_source = next;
1488 check_target = prev;
f82b217e 1489 if (!(check_noncircular(hlock_class(next), 0)))
fbb9ce95
IM
1490 return print_circular_bug_tail();
1491
8e18257d 1492 if (!check_prev_add_irq(curr, prev, next))
fbb9ce95
IM
1493 return 0;
1494
fbb9ce95
IM
1495 /*
1496 * For recursive read-locks we do all the dependency checks,
1497 * but we dont store read-triggered dependencies (only
1498 * write-triggered dependencies). This ensures that only the
1499 * write-side dependencies matter, and that if for example a
1500 * write-lock never takes any other locks, then the reads are
1501 * equivalent to a NOP.
1502 */
1503 if (next->read == 2 || prev->read == 2)
1504 return 1;
1505 /*
1506 * Is the <prev> -> <next> dependency already present?
1507 *
1508 * (this may occur even though this is a new chain: consider
1509 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1510 * chains - the second one will be new, but L1 already has
1511 * L2 added to its dependency list, due to the first chain.)
1512 */
f82b217e
DJ
1513 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1514 if (entry->class == hlock_class(next)) {
068135e6
JB
1515 if (distance == 1)
1516 entry->distance = 1;
fbb9ce95 1517 return 2;
068135e6 1518 }
fbb9ce95
IM
1519 }
1520
1521 /*
1522 * Ok, all validations passed, add the new lock
1523 * to the previous lock's dependency list:
1524 */
f82b217e
DJ
1525 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1526 &hlock_class(prev)->locks_after,
1527 next->acquire_ip, distance);
068135e6 1528
fbb9ce95
IM
1529 if (!ret)
1530 return 0;
910b1b2e 1531
f82b217e
DJ
1532 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1533 &hlock_class(next)->locks_before,
1534 next->acquire_ip, distance);
910b1b2e
JP
1535 if (!ret)
1536 return 0;
fbb9ce95
IM
1537
1538 /*
8e18257d
PZ
1539 * Debugging printouts:
1540 */
f82b217e 1541 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
8e18257d
PZ
1542 graph_unlock();
1543 printk("\n new dependency: ");
f82b217e 1544 print_lock_name(hlock_class(prev));
8e18257d 1545 printk(" => ");
f82b217e 1546 print_lock_name(hlock_class(next));
8e18257d 1547 printk("\n");
fbb9ce95 1548 dump_stack();
8e18257d 1549 return graph_lock();
fbb9ce95 1550 }
8e18257d
PZ
1551 return 1;
1552}
fbb9ce95 1553
8e18257d
PZ
1554/*
1555 * Add the dependency to all directly-previous locks that are 'relevant'.
1556 * The ones that are relevant are (in increasing distance from curr):
1557 * all consecutive trylock entries and the final non-trylock entry - or
1558 * the end of this context's lock-chain - whichever comes first.
1559 */
1560static int
1561check_prevs_add(struct task_struct *curr, struct held_lock *next)
1562{
1563 int depth = curr->lockdep_depth;
1564 struct held_lock *hlock;
d6d897ce 1565
fbb9ce95 1566 /*
8e18257d
PZ
1567 * Debugging checks.
1568 *
1569 * Depth must not be zero for a non-head lock:
fbb9ce95 1570 */
8e18257d
PZ
1571 if (!depth)
1572 goto out_bug;
fbb9ce95 1573 /*
8e18257d
PZ
1574 * At least two relevant locks must exist for this
1575 * to be a head:
fbb9ce95 1576 */
8e18257d
PZ
1577 if (curr->held_locks[depth].irq_context !=
1578 curr->held_locks[depth-1].irq_context)
1579 goto out_bug;
74c383f1 1580
8e18257d
PZ
1581 for (;;) {
1582 int distance = curr->lockdep_depth - depth + 1;
1583 hlock = curr->held_locks + depth-1;
1584 /*
1585 * Only non-recursive-read entries get new dependencies
1586 * added:
1587 */
1588 if (hlock->read != 2) {
1589 if (!check_prev_add(curr, hlock, next, distance))
1590 return 0;
1591 /*
1592 * Stop after the first non-trylock entry,
1593 * as non-trylock entries have added their
1594 * own direct dependencies already, so this
1595 * lock is connected to them indirectly:
1596 */
1597 if (!hlock->trylock)
1598 break;
74c383f1 1599 }
8e18257d
PZ
1600 depth--;
1601 /*
1602 * End of lock-stack?
1603 */
1604 if (!depth)
1605 break;
1606 /*
1607 * Stop the search if we cross into another context:
1608 */
1609 if (curr->held_locks[depth].irq_context !=
1610 curr->held_locks[depth-1].irq_context)
1611 break;
fbb9ce95 1612 }
8e18257d
PZ
1613 return 1;
1614out_bug:
1615 if (!debug_locks_off_graph_unlock())
1616 return 0;
fbb9ce95 1617
8e18257d 1618 WARN_ON(1);
fbb9ce95 1619
8e18257d 1620 return 0;
fbb9ce95
IM
1621}
1622
8e18257d 1623unsigned long nr_lock_chains;
443cd507 1624struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
cd1a28e8 1625int nr_chain_hlocks;
443cd507
HY
1626static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1627
1628struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1629{
1630 return lock_classes + chain_hlocks[chain->base + i];
1631}
8e18257d 1632
fbb9ce95
IM
1633/*
1634 * Look up a dependency chain. If the key is not present yet then
9e860d00
JP
1635 * add it and return 1 - in this case the new dependency chain is
1636 * validated. If the key is already hashed, return 0.
1637 * (On return with 1 graph_lock is held.)
fbb9ce95 1638 */
443cd507
HY
1639static inline int lookup_chain_cache(struct task_struct *curr,
1640 struct held_lock *hlock,
1641 u64 chain_key)
fbb9ce95 1642{
f82b217e 1643 struct lock_class *class = hlock_class(hlock);
fbb9ce95
IM
1644 struct list_head *hash_head = chainhashentry(chain_key);
1645 struct lock_chain *chain;
443cd507 1646 struct held_lock *hlock_curr, *hlock_next;
cd1a28e8 1647 int i, j, n, cn;
fbb9ce95 1648
381a2292
JP
1649 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1650 return 0;
fbb9ce95
IM
1651 /*
1652 * We can walk it lock-free, because entries only get added
1653 * to the hash:
1654 */
1655 list_for_each_entry(chain, hash_head, entry) {
1656 if (chain->chain_key == chain_key) {
1657cache_hit:
1658 debug_atomic_inc(&chain_lookup_hits);
81fc685a 1659 if (very_verbose(class))
755cd900
AM
1660 printk("\nhash chain already cached, key: "
1661 "%016Lx tail class: [%p] %s\n",
1662 (unsigned long long)chain_key,
1663 class->key, class->name);
fbb9ce95
IM
1664 return 0;
1665 }
1666 }
81fc685a 1667 if (very_verbose(class))
755cd900
AM
1668 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1669 (unsigned long long)chain_key, class->key, class->name);
fbb9ce95
IM
1670 /*
1671 * Allocate a new chain entry from the static array, and add
1672 * it to the hash:
1673 */
74c383f1
IM
1674 if (!graph_lock())
1675 return 0;
fbb9ce95
IM
1676 /*
1677 * We have to walk the chain again locked - to avoid duplicates:
1678 */
1679 list_for_each_entry(chain, hash_head, entry) {
1680 if (chain->chain_key == chain_key) {
74c383f1 1681 graph_unlock();
fbb9ce95
IM
1682 goto cache_hit;
1683 }
1684 }
1685 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
74c383f1
IM
1686 if (!debug_locks_off_graph_unlock())
1687 return 0;
1688
fbb9ce95
IM
1689 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1690 printk("turning off the locking correctness validator.\n");
1691 return 0;
1692 }
1693 chain = lock_chains + nr_lock_chains++;
1694 chain->chain_key = chain_key;
443cd507
HY
1695 chain->irq_context = hlock->irq_context;
1696 /* Find the first held_lock of current chain */
1697 hlock_next = hlock;
1698 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1699 hlock_curr = curr->held_locks + i;
1700 if (hlock_curr->irq_context != hlock_next->irq_context)
1701 break;
1702 hlock_next = hlock;
1703 }
1704 i++;
1705 chain->depth = curr->lockdep_depth + 1 - i;
cd1a28e8
HY
1706 cn = nr_chain_hlocks;
1707 while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1708 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1709 if (n == cn)
1710 break;
1711 cn = n;
1712 }
1713 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1714 chain->base = cn;
443cd507 1715 for (j = 0; j < chain->depth - 1; j++, i++) {
f82b217e 1716 int lock_id = curr->held_locks[i].class_idx - 1;
443cd507
HY
1717 chain_hlocks[chain->base + j] = lock_id;
1718 }
1719 chain_hlocks[chain->base + j] = class - lock_classes;
1720 }
fbb9ce95
IM
1721 list_add_tail_rcu(&chain->entry, hash_head);
1722 debug_atomic_inc(&chain_lookup_misses);
8e18257d
PZ
1723 inc_chains();
1724
1725 return 1;
1726}
1727
1728static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
4e6045f1 1729 struct held_lock *hlock, int chain_head, u64 chain_key)
8e18257d
PZ
1730{
1731 /*
1732 * Trylock needs to maintain the stack of held locks, but it
1733 * does not add new dependencies, because trylock can be done
1734 * in any order.
1735 *
1736 * We look up the chain_key and do the O(N^2) check and update of
1737 * the dependencies only if this is a new dependency chain.
1738 * (If lookup_chain_cache() returns with 1 it acquires
1739 * graph_lock for us)
1740 */
1741 if (!hlock->trylock && (hlock->check == 2) &&
443cd507 1742 lookup_chain_cache(curr, hlock, chain_key)) {
8e18257d
PZ
1743 /*
1744 * Check whether last held lock:
1745 *
1746 * - is irq-safe, if this lock is irq-unsafe
1747 * - is softirq-safe, if this lock is hardirq-unsafe
1748 *
1749 * And check whether the new lock's dependency graph
1750 * could lead back to the previous lock.
1751 *
1752 * any of these scenarios could lead to a deadlock. If
1753 * All validations
1754 */
1755 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1756
1757 if (!ret)
1758 return 0;
1759 /*
1760 * Mark recursive read, as we jump over it when
1761 * building dependencies (just like we jump over
1762 * trylock entries):
1763 */
1764 if (ret == 2)
1765 hlock->read = 2;
1766 /*
1767 * Add dependency only if this lock is not the head
1768 * of the chain, and if it's not a secondary read-lock:
1769 */
1770 if (!chain_head && ret != 2)
1771 if (!check_prevs_add(curr, hlock))
1772 return 0;
1773 graph_unlock();
1774 } else
1775 /* after lookup_chain_cache(): */
1776 if (unlikely(!debug_locks))
1777 return 0;
fbb9ce95
IM
1778
1779 return 1;
1780}
8e18257d
PZ
1781#else
1782static inline int validate_chain(struct task_struct *curr,
1783 struct lockdep_map *lock, struct held_lock *hlock,
3aa416b0 1784 int chain_head, u64 chain_key)
8e18257d
PZ
1785{
1786 return 1;
1787}
ca58abcb 1788#endif
fbb9ce95
IM
1789
1790/*
1791 * We are building curr_chain_key incrementally, so double-check
1792 * it from scratch, to make sure that it's done correctly:
1793 */
1d09daa5 1794static void check_chain_key(struct task_struct *curr)
fbb9ce95
IM
1795{
1796#ifdef CONFIG_DEBUG_LOCKDEP
1797 struct held_lock *hlock, *prev_hlock = NULL;
1798 unsigned int i, id;
1799 u64 chain_key = 0;
1800
1801 for (i = 0; i < curr->lockdep_depth; i++) {
1802 hlock = curr->held_locks + i;
1803 if (chain_key != hlock->prev_chain_key) {
1804 debug_locks_off();
2df8b1d6 1805 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
fbb9ce95
IM
1806 curr->lockdep_depth, i,
1807 (unsigned long long)chain_key,
1808 (unsigned long long)hlock->prev_chain_key);
fbb9ce95
IM
1809 return;
1810 }
f82b217e 1811 id = hlock->class_idx - 1;
381a2292
JP
1812 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1813 return;
1814
fbb9ce95
IM
1815 if (prev_hlock && (prev_hlock->irq_context !=
1816 hlock->irq_context))
1817 chain_key = 0;
1818 chain_key = iterate_chain_key(chain_key, id);
1819 prev_hlock = hlock;
1820 }
1821 if (chain_key != curr->curr_chain_key) {
1822 debug_locks_off();
2df8b1d6 1823 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
fbb9ce95
IM
1824 curr->lockdep_depth, i,
1825 (unsigned long long)chain_key,
1826 (unsigned long long)curr->curr_chain_key);
fbb9ce95
IM
1827 }
1828#endif
1829}
1830
8e18257d
PZ
1831static int
1832print_usage_bug(struct task_struct *curr, struct held_lock *this,
1833 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1834{
1835 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1836 return 0;
1837
1838 printk("\n=================================\n");
1839 printk( "[ INFO: inconsistent lock state ]\n");
1840 print_kernel_version();
1841 printk( "---------------------------------\n");
1842
1843 printk("inconsistent {%s} -> {%s} usage.\n",
1844 usage_str[prev_bit], usage_str[new_bit]);
1845
1846 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
ba25f9dc 1847 curr->comm, task_pid_nr(curr),
8e18257d
PZ
1848 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1849 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1850 trace_hardirqs_enabled(curr),
1851 trace_softirqs_enabled(curr));
1852 print_lock(this);
1853
1854 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
f82b217e 1855 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
8e18257d
PZ
1856
1857 print_irqtrace_events(curr);
1858 printk("\nother info that might help us debug this:\n");
1859 lockdep_print_held_locks(curr);
1860
1861 printk("\nstack backtrace:\n");
1862 dump_stack();
1863
1864 return 0;
1865}
1866
1867/*
1868 * Print out an error if an invalid bit is set:
1869 */
1870static inline int
1871valid_state(struct task_struct *curr, struct held_lock *this,
1872 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1873{
f82b217e 1874 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
8e18257d
PZ
1875 return print_usage_bug(curr, this, bad_bit, new_bit);
1876 return 1;
1877}
1878
1879static int mark_lock(struct task_struct *curr, struct held_lock *this,
1880 enum lock_usage_bit new_bit);
1881
81d68a96 1882#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
fbb9ce95
IM
1883
1884/*
1885 * print irq inversion bug:
1886 */
1887static int
1888print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1889 struct held_lock *this, int forwards,
1890 const char *irqclass)
1891{
74c383f1 1892 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
fbb9ce95
IM
1893 return 0;
1894
1895 printk("\n=========================================================\n");
1896 printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
99de055a 1897 print_kernel_version();
fbb9ce95
IM
1898 printk( "---------------------------------------------------------\n");
1899 printk("%s/%d just changed the state of lock:\n",
ba25f9dc 1900 curr->comm, task_pid_nr(curr));
fbb9ce95
IM
1901 print_lock(this);
1902 if (forwards)
1903 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1904 else
1905 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1906 print_lock_name(other);
1907 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1908
1909 printk("\nother info that might help us debug this:\n");
1910 lockdep_print_held_locks(curr);
1911
1912 printk("\nthe first lock's dependencies:\n");
f82b217e 1913 print_lock_dependencies(hlock_class(this), 0);
fbb9ce95
IM
1914
1915 printk("\nthe second lock's dependencies:\n");
1916 print_lock_dependencies(other, 0);
1917
1918 printk("\nstack backtrace:\n");
1919 dump_stack();
1920
1921 return 0;
1922}
1923
1924/*
1925 * Prove that in the forwards-direction subgraph starting at <this>
1926 * there is no lock matching <mask>:
1927 */
1928static int
1929check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1930 enum lock_usage_bit bit, const char *irqclass)
1931{
1932 int ret;
1933
1934 find_usage_bit = bit;
1935 /* fills in <forwards_match> */
f82b217e 1936 ret = find_usage_forwards(hlock_class(this), 0);
fbb9ce95
IM
1937 if (!ret || ret == 1)
1938 return ret;
1939
1940 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1941}
1942
1943/*
1944 * Prove that in the backwards-direction subgraph starting at <this>
1945 * there is no lock matching <mask>:
1946 */
1947static int
1948check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1949 enum lock_usage_bit bit, const char *irqclass)
1950{
1951 int ret;
1952
1953 find_usage_bit = bit;
1954 /* fills in <backwards_match> */
f82b217e 1955 ret = find_usage_backwards(hlock_class(this), 0);
fbb9ce95
IM
1956 if (!ret || ret == 1)
1957 return ret;
1958
1959 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1960}
1961
3117df04 1962void print_irqtrace_events(struct task_struct *curr)
fbb9ce95
IM
1963{
1964 printk("irq event stamp: %u\n", curr->irq_events);
1965 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
1966 print_ip_sym(curr->hardirq_enable_ip);
1967 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1968 print_ip_sym(curr->hardirq_disable_ip);
1969 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
1970 print_ip_sym(curr->softirq_enable_ip);
1971 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1972 print_ip_sym(curr->softirq_disable_ip);
1973}
1974
cd95302d 1975static int HARDIRQ_verbose(struct lock_class *class)
fbb9ce95 1976{
8e18257d
PZ
1977#if HARDIRQ_VERBOSE
1978 return class_filter(class);
1979#endif
fbb9ce95
IM
1980 return 0;
1981}
1982
cd95302d 1983static int SOFTIRQ_verbose(struct lock_class *class)
fbb9ce95 1984{
8e18257d
PZ
1985#if SOFTIRQ_VERBOSE
1986 return class_filter(class);
1987#endif
1988 return 0;
fbb9ce95
IM
1989}
1990
cd95302d 1991static int RECLAIM_FS_verbose(struct lock_class *class)
cf40bd16
NP
1992{
1993#if RECLAIM_VERBOSE
1994 return class_filter(class);
1995#endif
1996 return 0;
1997}
1998
fbb9ce95
IM
1999#define STRICT_READ_CHECKS 1
2000
cd95302d
PZ
2001static int (*state_verbose_f[])(struct lock_class *class) = {
2002#define LOCKDEP_STATE(__STATE) \
2003 __STATE##_verbose,
2004#include "lockdep_states.h"
2005#undef LOCKDEP_STATE
2006};
2007
2008static inline int state_verbose(enum lock_usage_bit bit,
2009 struct lock_class *class)
2010{
2011 return state_verbose_f[bit >> 2](class);
2012}
2013
42c50d54
PZ
2014typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2015 enum lock_usage_bit bit, const char *name);
2016
6a6904d3 2017static int
9d3651a2 2018mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit)
6a6904d3 2019{
f989209e 2020 int excl_bit = exclusive_bit(new_bit);
9d3651a2 2021 int read = new_bit & 1;
42c50d54
PZ
2022 int dir = new_bit & 2;
2023
38aa2714
PZ
2024 /*
2025 * mark USED_IN has to look forwards -- to ensure no dependency
2026 * has ENABLED state, which would allow recursion deadlocks.
2027 *
2028 * mark ENABLED has to look backwards -- to ensure no dependee
2029 * has USED_IN state, which, again, would allow recursion deadlocks.
2030 */
42c50d54
PZ
2031 check_usage_f usage = dir ?
2032 check_usage_backwards : check_usage_forwards;
f989209e 2033
38aa2714
PZ
2034 /*
2035 * Validate that this particular lock does not have conflicting
2036 * usage states.
2037 */
6a6904d3
PZ
2038 if (!valid_state(curr, this, new_bit, excl_bit))
2039 return 0;
42c50d54 2040
38aa2714
PZ
2041 /*
2042 * Validate that the lock dependencies don't have conflicting usage
2043 * states.
2044 */
2045 if ((!read || !dir || STRICT_READ_CHECKS) &&
4f367d8a 2046 !usage(curr, this, excl_bit, state_name(new_bit)))
6a6904d3 2047 return 0;
780e820b 2048
38aa2714
PZ
2049 /*
2050 * Check for read in write conflicts
2051 */
2052 if (!read) {
2053 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2054 return 0;
2055
2056 if (STRICT_READ_CHECKS &&
4f367d8a
PZ
2057 !usage(curr, this, excl_bit + 1,
2058 state_name(new_bit + 1)))
38aa2714
PZ
2059 return 0;
2060 }
780e820b 2061
cd95302d 2062 if (state_verbose(new_bit, hlock_class(this)))
6a6904d3
PZ
2063 return 2;
2064
2065 return 1;
2066}
2067
cf40bd16 2068enum mark_type {
36bfb9bb
PZ
2069#define LOCKDEP_STATE(__STATE) __STATE,
2070#include "lockdep_states.h"
2071#undef LOCKDEP_STATE
cf40bd16
NP
2072};
2073
fbb9ce95
IM
2074/*
2075 * Mark all held locks with a usage bit:
2076 */
1d09daa5 2077static int
cf40bd16 2078mark_held_locks(struct task_struct *curr, enum mark_type mark)
fbb9ce95
IM
2079{
2080 enum lock_usage_bit usage_bit;
2081 struct held_lock *hlock;
2082 int i;
2083
2084 for (i = 0; i < curr->lockdep_depth; i++) {
2085 hlock = curr->held_locks + i;
2086
cf2ad4d1
PZ
2087 usage_bit = 2 + (mark << 2); /* ENABLED */
2088 if (hlock->read)
2089 usage_bit += 1; /* READ */
2090
2091 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
cf40bd16 2092
4ff773bb 2093 if (!mark_lock(curr, hlock, usage_bit))
fbb9ce95
IM
2094 return 0;
2095 }
2096
2097 return 1;
2098}
2099
2100/*
2101 * Debugging helper: via this flag we know that we are in
2102 * 'early bootup code', and will warn about any invalid irqs-on event:
2103 */
2104static int early_boot_irqs_enabled;
2105
2106void early_boot_irqs_off(void)
2107{
2108 early_boot_irqs_enabled = 0;
2109}
2110
2111void early_boot_irqs_on(void)
2112{
2113 early_boot_irqs_enabled = 1;
2114}
2115
2116/*
2117 * Hardirqs will be enabled:
2118 */
6afe40b4 2119void trace_hardirqs_on_caller(unsigned long ip)
fbb9ce95
IM
2120{
2121 struct task_struct *curr = current;
fbb9ce95 2122
6afe40b4 2123 time_hardirqs_on(CALLER_ADDR0, ip);
81d68a96 2124
fbb9ce95
IM
2125 if (unlikely(!debug_locks || current->lockdep_recursion))
2126 return;
2127
2128 if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2129 return;
2130
2131 if (unlikely(curr->hardirqs_enabled)) {
2132 debug_atomic_inc(&redundant_hardirqs_on);
2133 return;
2134 }
2135 /* we'll do an OFF -> ON transition: */
2136 curr->hardirqs_enabled = 1;
fbb9ce95
IM
2137
2138 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2139 return;
2140 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2141 return;
2142 /*
2143 * We are going to turn hardirqs on, so set the
2144 * usage bit for all held locks:
2145 */
cf40bd16 2146 if (!mark_held_locks(curr, HARDIRQ))
fbb9ce95
IM
2147 return;
2148 /*
2149 * If we have softirqs enabled, then set the usage
2150 * bit for all held locks. (disabled hardirqs prevented
2151 * this bit from being set before)
2152 */
2153 if (curr->softirqs_enabled)
cf40bd16 2154 if (!mark_held_locks(curr, SOFTIRQ))
fbb9ce95
IM
2155 return;
2156
8e18257d
PZ
2157 curr->hardirq_enable_ip = ip;
2158 curr->hardirq_enable_event = ++curr->irq_events;
2159 debug_atomic_inc(&hardirqs_on_events);
2160}
81d68a96 2161EXPORT_SYMBOL(trace_hardirqs_on_caller);
8e18257d 2162
1d09daa5 2163void trace_hardirqs_on(void)
81d68a96
SR
2164{
2165 trace_hardirqs_on_caller(CALLER_ADDR0);
2166}
8e18257d
PZ
2167EXPORT_SYMBOL(trace_hardirqs_on);
2168
2169/*
2170 * Hardirqs were disabled:
2171 */
6afe40b4 2172void trace_hardirqs_off_caller(unsigned long ip)
8e18257d
PZ
2173{
2174 struct task_struct *curr = current;
2175
6afe40b4 2176 time_hardirqs_off(CALLER_ADDR0, ip);
81d68a96 2177
8e18257d
PZ
2178 if (unlikely(!debug_locks || current->lockdep_recursion))
2179 return;
2180
2181 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2182 return;
2183
2184 if (curr->hardirqs_enabled) {
2185 /*
2186 * We have done an ON -> OFF transition:
2187 */
2188 curr->hardirqs_enabled = 0;
6afe40b4 2189 curr->hardirq_disable_ip = ip;
8e18257d
PZ
2190 curr->hardirq_disable_event = ++curr->irq_events;
2191 debug_atomic_inc(&hardirqs_off_events);
2192 } else
2193 debug_atomic_inc(&redundant_hardirqs_off);
2194}
81d68a96 2195EXPORT_SYMBOL(trace_hardirqs_off_caller);
8e18257d 2196
1d09daa5 2197void trace_hardirqs_off(void)
81d68a96
SR
2198{
2199 trace_hardirqs_off_caller(CALLER_ADDR0);
2200}
8e18257d
PZ
2201EXPORT_SYMBOL(trace_hardirqs_off);
2202
2203/*
2204 * Softirqs will be enabled:
2205 */
2206void trace_softirqs_on(unsigned long ip)
2207{
2208 struct task_struct *curr = current;
2209
2210 if (unlikely(!debug_locks))
2211 return;
2212
2213 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2214 return;
2215
2216 if (curr->softirqs_enabled) {
2217 debug_atomic_inc(&redundant_softirqs_on);
2218 return;
2219 }
2220
2221 /*
2222 * We'll do an OFF -> ON transition:
2223 */
2224 curr->softirqs_enabled = 1;
2225 curr->softirq_enable_ip = ip;
2226 curr->softirq_enable_event = ++curr->irq_events;
2227 debug_atomic_inc(&softirqs_on_events);
2228 /*
2229 * We are going to turn softirqs on, so set the
2230 * usage bit for all held locks, if hardirqs are
2231 * enabled too:
2232 */
2233 if (curr->hardirqs_enabled)
cf40bd16 2234 mark_held_locks(curr, SOFTIRQ);
8e18257d
PZ
2235}
2236
2237/*
2238 * Softirqs were disabled:
2239 */
2240void trace_softirqs_off(unsigned long ip)
2241{
2242 struct task_struct *curr = current;
2243
2244 if (unlikely(!debug_locks))
2245 return;
2246
2247 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2248 return;
2249
2250 if (curr->softirqs_enabled) {
2251 /*
2252 * We have done an ON -> OFF transition:
2253 */
2254 curr->softirqs_enabled = 0;
2255 curr->softirq_disable_ip = ip;
2256 curr->softirq_disable_event = ++curr->irq_events;
2257 debug_atomic_inc(&softirqs_off_events);
2258 DEBUG_LOCKS_WARN_ON(!softirq_count());
2259 } else
2260 debug_atomic_inc(&redundant_softirqs_off);
2261}
2262
cf40bd16
NP
2263void lockdep_trace_alloc(gfp_t gfp_mask)
2264{
2265 struct task_struct *curr = current;
2266
2267 if (unlikely(!debug_locks))
2268 return;
2269
2270 /* no reclaim without waiting on it */
2271 if (!(gfp_mask & __GFP_WAIT))
2272 return;
2273
2274 /* this guy won't enter reclaim */
2275 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2276 return;
2277
2278 /* We're only interested __GFP_FS allocations for now */
2279 if (!(gfp_mask & __GFP_FS))
2280 return;
2281
2282 if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
2283 return;
2284
2285 mark_held_locks(curr, RECLAIM_FS);
2286}
2287
8e18257d
PZ
2288static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2289{
2290 /*
2291 * If non-trylock use in a hardirq or softirq context, then
2292 * mark the lock as used in these contexts:
2293 */
2294 if (!hlock->trylock) {
2295 if (hlock->read) {
2296 if (curr->hardirq_context)
2297 if (!mark_lock(curr, hlock,
2298 LOCK_USED_IN_HARDIRQ_READ))
2299 return 0;
2300 if (curr->softirq_context)
2301 if (!mark_lock(curr, hlock,
2302 LOCK_USED_IN_SOFTIRQ_READ))
2303 return 0;
2304 } else {
2305 if (curr->hardirq_context)
2306 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2307 return 0;
2308 if (curr->softirq_context)
2309 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2310 return 0;
2311 }
2312 }
2313 if (!hlock->hardirqs_off) {
2314 if (hlock->read) {
2315 if (!mark_lock(curr, hlock,
4fc95e86 2316 LOCK_ENABLED_HARDIRQ_READ))
8e18257d
PZ
2317 return 0;
2318 if (curr->softirqs_enabled)
2319 if (!mark_lock(curr, hlock,
4fc95e86 2320 LOCK_ENABLED_SOFTIRQ_READ))
8e18257d
PZ
2321 return 0;
2322 } else {
2323 if (!mark_lock(curr, hlock,
4fc95e86 2324 LOCK_ENABLED_HARDIRQ))
8e18257d
PZ
2325 return 0;
2326 if (curr->softirqs_enabled)
2327 if (!mark_lock(curr, hlock,
4fc95e86 2328 LOCK_ENABLED_SOFTIRQ))
8e18257d
PZ
2329 return 0;
2330 }
2331 }
2332
cf40bd16
NP
2333 /*
2334 * We reuse the irq context infrastructure more broadly as a general
2335 * context checking code. This tests GFP_FS recursion (a lock taken
2336 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2337 * allocation).
2338 */
2339 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2340 if (hlock->read) {
2341 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2342 return 0;
2343 } else {
2344 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2345 return 0;
2346 }
2347 }
2348
8e18257d
PZ
2349 return 1;
2350}
2351
2352static int separate_irq_context(struct task_struct *curr,
2353 struct held_lock *hlock)
2354{
2355 unsigned int depth = curr->lockdep_depth;
2356
2357 /*
2358 * Keep track of points where we cross into an interrupt context:
2359 */
2360 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2361 curr->softirq_context;
2362 if (depth) {
2363 struct held_lock *prev_hlock;
2364
2365 prev_hlock = curr->held_locks + depth-1;
2366 /*
2367 * If we cross into another context, reset the
2368 * hash key (this also prevents the checking and the
2369 * adding of the dependency to 'prev'):
2370 */
2371 if (prev_hlock->irq_context != hlock->irq_context)
2372 return 1;
2373 }
2374 return 0;
fbb9ce95
IM
2375}
2376
8e18257d 2377#else
fbb9ce95 2378
8e18257d
PZ
2379static inline
2380int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2381 enum lock_usage_bit new_bit)
fbb9ce95 2382{
8e18257d
PZ
2383 WARN_ON(1);
2384 return 1;
2385}
fbb9ce95 2386
8e18257d
PZ
2387static inline int mark_irqflags(struct task_struct *curr,
2388 struct held_lock *hlock)
2389{
2390 return 1;
2391}
fbb9ce95 2392
8e18257d
PZ
2393static inline int separate_irq_context(struct task_struct *curr,
2394 struct held_lock *hlock)
2395{
2396 return 0;
fbb9ce95
IM
2397}
2398
8e18257d 2399#endif
fbb9ce95
IM
2400
2401/*
8e18257d 2402 * Mark a lock with a usage bit, and validate the state transition:
fbb9ce95 2403 */
1d09daa5 2404static int mark_lock(struct task_struct *curr, struct held_lock *this,
0764d23c 2405 enum lock_usage_bit new_bit)
fbb9ce95 2406{
8e18257d 2407 unsigned int new_mask = 1 << new_bit, ret = 1;
fbb9ce95
IM
2408
2409 /*
8e18257d
PZ
2410 * If already set then do not dirty the cacheline,
2411 * nor do any checks:
fbb9ce95 2412 */
f82b217e 2413 if (likely(hlock_class(this)->usage_mask & new_mask))
8e18257d
PZ
2414 return 1;
2415
2416 if (!graph_lock())
2417 return 0;
fbb9ce95 2418 /*
8e18257d 2419 * Make sure we didnt race:
fbb9ce95 2420 */
f82b217e 2421 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
8e18257d
PZ
2422 graph_unlock();
2423 return 1;
2424 }
fbb9ce95 2425
f82b217e 2426 hlock_class(this)->usage_mask |= new_mask;
fbb9ce95 2427
f82b217e 2428 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
8e18257d 2429 return 0;
fbb9ce95 2430
8e18257d 2431 switch (new_bit) {
5346417e
PZ
2432#define LOCKDEP_STATE(__STATE) \
2433 case LOCK_USED_IN_##__STATE: \
2434 case LOCK_USED_IN_##__STATE##_READ: \
2435 case LOCK_ENABLED_##__STATE: \
2436 case LOCK_ENABLED_##__STATE##_READ:
2437#include "lockdep_states.h"
2438#undef LOCKDEP_STATE
8e18257d
PZ
2439 ret = mark_lock_irq(curr, this, new_bit);
2440 if (!ret)
2441 return 0;
2442 break;
2443 case LOCK_USED:
8e18257d
PZ
2444 debug_atomic_dec(&nr_unused_locks);
2445 break;
2446 default:
2447 if (!debug_locks_off_graph_unlock())
2448 return 0;
2449 WARN_ON(1);
2450 return 0;
2451 }
fbb9ce95 2452
8e18257d
PZ
2453 graph_unlock();
2454
2455 /*
2456 * We must printk outside of the graph_lock:
2457 */
2458 if (ret == 2) {
2459 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2460 print_lock(this);
2461 print_irqtrace_events(curr);
2462 dump_stack();
2463 }
2464
2465 return ret;
2466}
fbb9ce95
IM
2467
2468/*
2469 * Initialize a lock instance's lock-class mapping info:
2470 */
2471void lockdep_init_map(struct lockdep_map *lock, const char *name,
4dfbb9d8 2472 struct lock_class_key *key, int subclass)
fbb9ce95
IM
2473{
2474 if (unlikely(!debug_locks))
2475 return;
2476
2477 if (DEBUG_LOCKS_WARN_ON(!key))
2478 return;
2479 if (DEBUG_LOCKS_WARN_ON(!name))
2480 return;
2481 /*
2482 * Sanity check, the lock-class key must be persistent:
2483 */
2484 if (!static_obj(key)) {
2485 printk("BUG: key %p not in .data!\n", key);
2486 DEBUG_LOCKS_WARN_ON(1);
2487 return;
2488 }
2489 lock->name = name;
2490 lock->key = key;
d6d897ce 2491 lock->class_cache = NULL;
96645678
PZ
2492#ifdef CONFIG_LOCK_STAT
2493 lock->cpu = raw_smp_processor_id();
2494#endif
4dfbb9d8
PZ
2495 if (subclass)
2496 register_lock_class(lock, subclass, 1);
fbb9ce95 2497}
fbb9ce95
IM
2498EXPORT_SYMBOL_GPL(lockdep_init_map);
2499
2500/*
2501 * This gets called for every mutex_lock*()/spin_lock*() operation.
2502 * We maintain the dependency maps and validate the locking attempt:
2503 */
2504static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2505 int trylock, int read, int check, int hardirqs_off,
7531e2f3 2506 struct lockdep_map *nest_lock, unsigned long ip)
fbb9ce95
IM
2507{
2508 struct task_struct *curr = current;
d6d897ce 2509 struct lock_class *class = NULL;
fbb9ce95 2510 struct held_lock *hlock;
fbb9ce95
IM
2511 unsigned int depth, id;
2512 int chain_head = 0;
2513 u64 chain_key;
2514
f20786ff
PZ
2515 if (!prove_locking)
2516 check = 1;
2517
fbb9ce95
IM
2518 if (unlikely(!debug_locks))
2519 return 0;
2520
2521 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2522 return 0;
2523
2524 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2525 debug_locks_off();
2526 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2527 printk("turning off the locking correctness validator.\n");
2528 return 0;
2529 }
2530
d6d897ce
IM
2531 if (!subclass)
2532 class = lock->class_cache;
2533 /*
2534 * Not cached yet or subclass?
2535 */
fbb9ce95 2536 if (unlikely(!class)) {
4dfbb9d8 2537 class = register_lock_class(lock, subclass, 0);
fbb9ce95
IM
2538 if (!class)
2539 return 0;
2540 }
2541 debug_atomic_inc((atomic_t *)&class->ops);
2542 if (very_verbose(class)) {
2543 printk("\nacquire class [%p] %s", class->key, class->name);
2544 if (class->name_version > 1)
2545 printk("#%d", class->name_version);
2546 printk("\n");
2547 dump_stack();
2548 }
2549
2550 /*
2551 * Add the lock to the list of currently held locks.
2552 * (we dont increase the depth just yet, up until the
2553 * dependency checks are done)
2554 */
2555 depth = curr->lockdep_depth;
2556 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2557 return 0;
2558
2559 hlock = curr->held_locks + depth;
f82b217e
DJ
2560 if (DEBUG_LOCKS_WARN_ON(!class))
2561 return 0;
2562 hlock->class_idx = class - lock_classes + 1;
fbb9ce95
IM
2563 hlock->acquire_ip = ip;
2564 hlock->instance = lock;
7531e2f3 2565 hlock->nest_lock = nest_lock;
fbb9ce95
IM
2566 hlock->trylock = trylock;
2567 hlock->read = read;
2568 hlock->check = check;
6951b12a 2569 hlock->hardirqs_off = !!hardirqs_off;
f20786ff
PZ
2570#ifdef CONFIG_LOCK_STAT
2571 hlock->waittime_stamp = 0;
2572 hlock->holdtime_stamp = sched_clock();
2573#endif
fbb9ce95 2574
8e18257d
PZ
2575 if (check == 2 && !mark_irqflags(curr, hlock))
2576 return 0;
2577
fbb9ce95 2578 /* mark it as used: */
4ff773bb 2579 if (!mark_lock(curr, hlock, LOCK_USED))
fbb9ce95 2580 return 0;
8e18257d 2581
fbb9ce95 2582 /*
17aacfb9 2583 * Calculate the chain hash: it's the combined hash of all the
fbb9ce95
IM
2584 * lock keys along the dependency chain. We save the hash value
2585 * at every step so that we can get the current hash easily
2586 * after unlock. The chain hash is then used to cache dependency
2587 * results.
2588 *
2589 * The 'key ID' is what is the most compact key value to drive
2590 * the hash, not class->key.
2591 */
2592 id = class - lock_classes;
2593 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2594 return 0;
2595
2596 chain_key = curr->curr_chain_key;
2597 if (!depth) {
2598 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2599 return 0;
2600 chain_head = 1;
2601 }
2602
2603 hlock->prev_chain_key = chain_key;
8e18257d
PZ
2604 if (separate_irq_context(curr, hlock)) {
2605 chain_key = 0;
2606 chain_head = 1;
fbb9ce95 2607 }
fbb9ce95 2608 chain_key = iterate_chain_key(chain_key, id);
fbb9ce95 2609
3aa416b0 2610 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
8e18257d 2611 return 0;
381a2292 2612
3aa416b0 2613 curr->curr_chain_key = chain_key;
fbb9ce95
IM
2614 curr->lockdep_depth++;
2615 check_chain_key(curr);
60e114d1
JP
2616#ifdef CONFIG_DEBUG_LOCKDEP
2617 if (unlikely(!debug_locks))
2618 return 0;
2619#endif
fbb9ce95
IM
2620 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2621 debug_locks_off();
2622 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2623 printk("turning off the locking correctness validator.\n");
2624 return 0;
2625 }
381a2292 2626
fbb9ce95
IM
2627 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2628 max_lockdep_depth = curr->lockdep_depth;
2629
2630 return 1;
2631}
2632
2633static int
2634print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2635 unsigned long ip)
2636{
2637 if (!debug_locks_off())
2638 return 0;
2639 if (debug_locks_silent)
2640 return 0;
2641
2642 printk("\n=====================================\n");
2643 printk( "[ BUG: bad unlock balance detected! ]\n");
2644 printk( "-------------------------------------\n");
2645 printk("%s/%d is trying to release lock (",
ba25f9dc 2646 curr->comm, task_pid_nr(curr));
fbb9ce95
IM
2647 print_lockdep_cache(lock);
2648 printk(") at:\n");
2649 print_ip_sym(ip);
2650 printk("but there are no more locks to release!\n");
2651 printk("\nother info that might help us debug this:\n");
2652 lockdep_print_held_locks(curr);
2653
2654 printk("\nstack backtrace:\n");
2655 dump_stack();
2656
2657 return 0;
2658}
2659
2660/*
2661 * Common debugging checks for both nested and non-nested unlock:
2662 */
2663static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2664 unsigned long ip)
2665{
2666 if (unlikely(!debug_locks))
2667 return 0;
2668 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2669 return 0;
2670
2671 if (curr->lockdep_depth <= 0)
2672 return print_unlock_inbalance_bug(curr, lock, ip);
2673
2674 return 1;
2675}
2676
64aa348e 2677static int
00ef9f73
PZ
2678__lock_set_class(struct lockdep_map *lock, const char *name,
2679 struct lock_class_key *key, unsigned int subclass,
2680 unsigned long ip)
64aa348e
PZ
2681{
2682 struct task_struct *curr = current;
2683 struct held_lock *hlock, *prev_hlock;
2684 struct lock_class *class;
2685 unsigned int depth;
2686 int i;
2687
2688 depth = curr->lockdep_depth;
2689 if (DEBUG_LOCKS_WARN_ON(!depth))
2690 return 0;
2691
2692 prev_hlock = NULL;
2693 for (i = depth-1; i >= 0; i--) {
2694 hlock = curr->held_locks + i;
2695 /*
2696 * We must not cross into another context:
2697 */
2698 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2699 break;
2700 if (hlock->instance == lock)
2701 goto found_it;
2702 prev_hlock = hlock;
2703 }
2704 return print_unlock_inbalance_bug(curr, lock, ip);
2705
2706found_it:
00ef9f73 2707 lockdep_init_map(lock, name, key, 0);
64aa348e 2708 class = register_lock_class(lock, subclass, 0);
f82b217e 2709 hlock->class_idx = class - lock_classes + 1;
64aa348e
PZ
2710
2711 curr->lockdep_depth = i;
2712 curr->curr_chain_key = hlock->prev_chain_key;
2713
2714 for (; i < depth; i++) {
2715 hlock = curr->held_locks + i;
2716 if (!__lock_acquire(hlock->instance,
f82b217e 2717 hlock_class(hlock)->subclass, hlock->trylock,
64aa348e 2718 hlock->read, hlock->check, hlock->hardirqs_off,
7531e2f3 2719 hlock->nest_lock, hlock->acquire_ip))
64aa348e
PZ
2720 return 0;
2721 }
2722
2723 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2724 return 0;
2725 return 1;
2726}
2727
fbb9ce95
IM
2728/*
2729 * Remove the lock to the list of currently held locks in a
2730 * potentially non-nested (out of order) manner. This is a
2731 * relatively rare operation, as all the unlock APIs default
2732 * to nested mode (which uses lock_release()):
2733 */
2734static int
2735lock_release_non_nested(struct task_struct *curr,
2736 struct lockdep_map *lock, unsigned long ip)
2737{
2738 struct held_lock *hlock, *prev_hlock;
2739 unsigned int depth;
2740 int i;
2741
2742 /*
2743 * Check whether the lock exists in the current stack
2744 * of held locks:
2745 */
2746 depth = curr->lockdep_depth;
2747 if (DEBUG_LOCKS_WARN_ON(!depth))
2748 return 0;
2749
2750 prev_hlock = NULL;
2751 for (i = depth-1; i >= 0; i--) {
2752 hlock = curr->held_locks + i;
2753 /*
2754 * We must not cross into another context:
2755 */
2756 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2757 break;
2758 if (hlock->instance == lock)
2759 goto found_it;
2760 prev_hlock = hlock;
2761 }
2762 return print_unlock_inbalance_bug(curr, lock, ip);
2763
2764found_it:
f20786ff
PZ
2765 lock_release_holdtime(hlock);
2766
fbb9ce95
IM
2767 /*
2768 * We have the right lock to unlock, 'hlock' points to it.
2769 * Now we remove it from the stack, and add back the other
2770 * entries (if any), recalculating the hash along the way:
2771 */
2772 curr->lockdep_depth = i;
2773 curr->curr_chain_key = hlock->prev_chain_key;
2774
2775 for (i++; i < depth; i++) {
2776 hlock = curr->held_locks + i;
2777 if (!__lock_acquire(hlock->instance,
f82b217e 2778 hlock_class(hlock)->subclass, hlock->trylock,
fbb9ce95 2779 hlock->read, hlock->check, hlock->hardirqs_off,
7531e2f3 2780 hlock->nest_lock, hlock->acquire_ip))
fbb9ce95
IM
2781 return 0;
2782 }
2783
2784 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2785 return 0;
2786 return 1;
2787}
2788
2789/*
2790 * Remove the lock to the list of currently held locks - this gets
2791 * called on mutex_unlock()/spin_unlock*() (or on a failed
2792 * mutex_lock_interruptible()). This is done for unlocks that nest
2793 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2794 */
2795static int lock_release_nested(struct task_struct *curr,
2796 struct lockdep_map *lock, unsigned long ip)
2797{
2798 struct held_lock *hlock;
2799 unsigned int depth;
2800
2801 /*
2802 * Pop off the top of the lock stack:
2803 */
2804 depth = curr->lockdep_depth - 1;
2805 hlock = curr->held_locks + depth;
2806
2807 /*
2808 * Is the unlock non-nested:
2809 */
2810 if (hlock->instance != lock)
2811 return lock_release_non_nested(curr, lock, ip);
2812 curr->lockdep_depth--;
2813
2814 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2815 return 0;
2816
2817 curr->curr_chain_key = hlock->prev_chain_key;
2818
f20786ff
PZ
2819 lock_release_holdtime(hlock);
2820
fbb9ce95
IM
2821#ifdef CONFIG_DEBUG_LOCKDEP
2822 hlock->prev_chain_key = 0;
f82b217e 2823 hlock->class_idx = 0;
fbb9ce95
IM
2824 hlock->acquire_ip = 0;
2825 hlock->irq_context = 0;
2826#endif
2827 return 1;
2828}
2829
2830/*
2831 * Remove the lock to the list of currently held locks - this gets
2832 * called on mutex_unlock()/spin_unlock*() (or on a failed
2833 * mutex_lock_interruptible()). This is done for unlocks that nest
2834 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2835 */
2836static void
2837__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2838{
2839 struct task_struct *curr = current;
2840
2841 if (!check_unlock(curr, lock, ip))
2842 return;
2843
2844 if (nested) {
2845 if (!lock_release_nested(curr, lock, ip))
2846 return;
2847 } else {
2848 if (!lock_release_non_nested(curr, lock, ip))
2849 return;
2850 }
2851
2852 check_chain_key(curr);
2853}
2854
2855/*
2856 * Check whether we follow the irq-flags state precisely:
2857 */
1d09daa5 2858static void check_flags(unsigned long flags)
fbb9ce95 2859{
992860e9
IM
2860#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2861 defined(CONFIG_TRACE_IRQFLAGS)
fbb9ce95
IM
2862 if (!debug_locks)
2863 return;
2864
5f9fa8a6
IM
2865 if (irqs_disabled_flags(flags)) {
2866 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2867 printk("possible reason: unannotated irqs-off.\n");
2868 }
2869 } else {
2870 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2871 printk("possible reason: unannotated irqs-on.\n");
2872 }
2873 }
fbb9ce95
IM
2874
2875 /*
2876 * We dont accurately track softirq state in e.g.
2877 * hardirq contexts (such as on 4KSTACKS), so only
2878 * check if not in hardirq contexts:
2879 */
2880 if (!hardirq_count()) {
2881 if (softirq_count())
2882 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2883 else
2884 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2885 }
2886
2887 if (!debug_locks)
2888 print_irqtrace_events(current);
2889#endif
2890}
2891
00ef9f73
PZ
2892void lock_set_class(struct lockdep_map *lock, const char *name,
2893 struct lock_class_key *key, unsigned int subclass,
2894 unsigned long ip)
64aa348e
PZ
2895{
2896 unsigned long flags;
2897
2898 if (unlikely(current->lockdep_recursion))
2899 return;
2900
2901 raw_local_irq_save(flags);
2902 current->lockdep_recursion = 1;
2903 check_flags(flags);
00ef9f73 2904 if (__lock_set_class(lock, name, key, subclass, ip))
64aa348e
PZ
2905 check_chain_key(current);
2906 current->lockdep_recursion = 0;
2907 raw_local_irq_restore(flags);
2908}
00ef9f73 2909EXPORT_SYMBOL_GPL(lock_set_class);
64aa348e 2910
fbb9ce95
IM
2911/*
2912 * We are not always called with irqs disabled - do that here,
2913 * and also avoid lockdep recursion:
2914 */
1d09daa5 2915void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
7531e2f3
PZ
2916 int trylock, int read, int check,
2917 struct lockdep_map *nest_lock, unsigned long ip)
fbb9ce95
IM
2918{
2919 unsigned long flags;
2920
2921 if (unlikely(current->lockdep_recursion))
2922 return;
2923
2924 raw_local_irq_save(flags);
2925 check_flags(flags);
2926
2927 current->lockdep_recursion = 1;
2928 __lock_acquire(lock, subclass, trylock, read, check,
7531e2f3 2929 irqs_disabled_flags(flags), nest_lock, ip);
fbb9ce95
IM
2930 current->lockdep_recursion = 0;
2931 raw_local_irq_restore(flags);
2932}
fbb9ce95
IM
2933EXPORT_SYMBOL_GPL(lock_acquire);
2934
1d09daa5 2935void lock_release(struct lockdep_map *lock, int nested,
0764d23c 2936 unsigned long ip)
fbb9ce95
IM
2937{
2938 unsigned long flags;
2939
2940 if (unlikely(current->lockdep_recursion))
2941 return;
2942
2943 raw_local_irq_save(flags);
2944 check_flags(flags);
2945 current->lockdep_recursion = 1;
2946 __lock_release(lock, nested, ip);
2947 current->lockdep_recursion = 0;
2948 raw_local_irq_restore(flags);
2949}
fbb9ce95
IM
2950EXPORT_SYMBOL_GPL(lock_release);
2951
cf40bd16
NP
2952void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2953{
2954 current->lockdep_reclaim_gfp = gfp_mask;
2955}
2956
2957void lockdep_clear_current_reclaim_state(void)
2958{
2959 current->lockdep_reclaim_gfp = 0;
2960}
2961
f20786ff
PZ
2962#ifdef CONFIG_LOCK_STAT
2963static int
2964print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2965 unsigned long ip)
2966{
2967 if (!debug_locks_off())
2968 return 0;
2969 if (debug_locks_silent)
2970 return 0;
2971
2972 printk("\n=================================\n");
2973 printk( "[ BUG: bad contention detected! ]\n");
2974 printk( "---------------------------------\n");
2975 printk("%s/%d is trying to contend lock (",
ba25f9dc 2976 curr->comm, task_pid_nr(curr));
f20786ff
PZ
2977 print_lockdep_cache(lock);
2978 printk(") at:\n");
2979 print_ip_sym(ip);
2980 printk("but there are no locks held!\n");
2981 printk("\nother info that might help us debug this:\n");
2982 lockdep_print_held_locks(curr);
2983
2984 printk("\nstack backtrace:\n");
2985 dump_stack();
2986
2987 return 0;
2988}
2989
2990static void
2991__lock_contended(struct lockdep_map *lock, unsigned long ip)
2992{
2993 struct task_struct *curr = current;
2994 struct held_lock *hlock, *prev_hlock;
2995 struct lock_class_stats *stats;
2996 unsigned int depth;
c7e78cff 2997 int i, contention_point, contending_point;
f20786ff
PZ
2998
2999 depth = curr->lockdep_depth;
3000 if (DEBUG_LOCKS_WARN_ON(!depth))
3001 return;
3002
3003 prev_hlock = NULL;
3004 for (i = depth-1; i >= 0; i--) {
3005 hlock = curr->held_locks + i;
3006 /*
3007 * We must not cross into another context:
3008 */
3009 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3010 break;
3011 if (hlock->instance == lock)
3012 goto found_it;
3013 prev_hlock = hlock;
3014 }
3015 print_lock_contention_bug(curr, lock, ip);
3016 return;
3017
3018found_it:
3019 hlock->waittime_stamp = sched_clock();
3020
c7e78cff
PZ
3021 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3022 contending_point = lock_point(hlock_class(hlock)->contending_point,
3023 lock->ip);
f20786ff 3024
f82b217e 3025 stats = get_lock_stats(hlock_class(hlock));
c7e78cff
PZ
3026 if (contention_point < LOCKSTAT_POINTS)
3027 stats->contention_point[contention_point]++;
3028 if (contending_point < LOCKSTAT_POINTS)
3029 stats->contending_point[contending_point]++;
96645678
PZ
3030 if (lock->cpu != smp_processor_id())
3031 stats->bounces[bounce_contended + !!hlock->read]++;
f20786ff
PZ
3032 put_lock_stats(stats);
3033}
3034
3035static void
c7e78cff 3036__lock_acquired(struct lockdep_map *lock, unsigned long ip)
f20786ff
PZ
3037{
3038 struct task_struct *curr = current;
3039 struct held_lock *hlock, *prev_hlock;
3040 struct lock_class_stats *stats;
3041 unsigned int depth;
3042 u64 now;
96645678
PZ
3043 s64 waittime = 0;
3044 int i, cpu;
f20786ff
PZ
3045
3046 depth = curr->lockdep_depth;
3047 if (DEBUG_LOCKS_WARN_ON(!depth))
3048 return;
3049
3050 prev_hlock = NULL;
3051 for (i = depth-1; i >= 0; i--) {
3052 hlock = curr->held_locks + i;
3053 /*
3054 * We must not cross into another context:
3055 */
3056 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3057 break;
3058 if (hlock->instance == lock)
3059 goto found_it;
3060 prev_hlock = hlock;
3061 }
3062 print_lock_contention_bug(curr, lock, _RET_IP_);
3063 return;
3064
3065found_it:
96645678
PZ
3066 cpu = smp_processor_id();
3067 if (hlock->waittime_stamp) {
3068 now = sched_clock();
3069 waittime = now - hlock->waittime_stamp;
3070 hlock->holdtime_stamp = now;
3071 }
f20786ff 3072
f82b217e 3073 stats = get_lock_stats(hlock_class(hlock));
96645678
PZ
3074 if (waittime) {
3075 if (hlock->read)
3076 lock_time_inc(&stats->read_waittime, waittime);
3077 else
3078 lock_time_inc(&stats->write_waittime, waittime);
3079 }
3080 if (lock->cpu != cpu)
3081 stats->bounces[bounce_acquired + !!hlock->read]++;
f20786ff 3082 put_lock_stats(stats);
96645678
PZ
3083
3084 lock->cpu = cpu;
c7e78cff 3085 lock->ip = ip;
f20786ff
PZ
3086}
3087
3088void lock_contended(struct lockdep_map *lock, unsigned long ip)
3089{
3090 unsigned long flags;
3091
3092 if (unlikely(!lock_stat))
3093 return;
3094
3095 if (unlikely(current->lockdep_recursion))
3096 return;
3097
3098 raw_local_irq_save(flags);
3099 check_flags(flags);
3100 current->lockdep_recursion = 1;
3101 __lock_contended(lock, ip);
3102 current->lockdep_recursion = 0;
3103 raw_local_irq_restore(flags);
3104}
3105EXPORT_SYMBOL_GPL(lock_contended);
3106
c7e78cff 3107void lock_acquired(struct lockdep_map *lock, unsigned long ip)
f20786ff
PZ
3108{
3109 unsigned long flags;
3110
3111 if (unlikely(!lock_stat))
3112 return;
3113
3114 if (unlikely(current->lockdep_recursion))
3115 return;
3116
3117 raw_local_irq_save(flags);
3118 check_flags(flags);
3119 current->lockdep_recursion = 1;
c7e78cff 3120 __lock_acquired(lock, ip);
f20786ff
PZ
3121 current->lockdep_recursion = 0;
3122 raw_local_irq_restore(flags);
3123}
3124EXPORT_SYMBOL_GPL(lock_acquired);
3125#endif
3126
fbb9ce95
IM
3127/*
3128 * Used by the testsuite, sanitize the validator state
3129 * after a simulated failure:
3130 */
3131
3132void lockdep_reset(void)
3133{
3134 unsigned long flags;
23d95a03 3135 int i;
fbb9ce95
IM
3136
3137 raw_local_irq_save(flags);
3138 current->curr_chain_key = 0;
3139 current->lockdep_depth = 0;
3140 current->lockdep_recursion = 0;
3141 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3142 nr_hardirq_chains = 0;
3143 nr_softirq_chains = 0;
3144 nr_process_chains = 0;
3145 debug_locks = 1;
23d95a03
IM
3146 for (i = 0; i < CHAINHASH_SIZE; i++)
3147 INIT_LIST_HEAD(chainhash_table + i);
fbb9ce95
IM
3148 raw_local_irq_restore(flags);
3149}
3150
3151static void zap_class(struct lock_class *class)
3152{
3153 int i;
3154
3155 /*
3156 * Remove all dependencies this lock is
3157 * involved in:
3158 */
3159 for (i = 0; i < nr_list_entries; i++) {
3160 if (list_entries[i].class == class)
3161 list_del_rcu(&list_entries[i].entry);
3162 }
3163 /*
3164 * Unhash the class and remove it from the all_lock_classes list:
3165 */
3166 list_del_rcu(&class->hash_entry);
3167 list_del_rcu(&class->lock_entry);
3168
8bfe0298 3169 class->key = NULL;
fbb9ce95
IM
3170}
3171
fabe874a 3172static inline int within(const void *addr, void *start, unsigned long size)
fbb9ce95
IM
3173{
3174 return addr >= start && addr < start + size;
3175}
3176
3177void lockdep_free_key_range(void *start, unsigned long size)
3178{
3179 struct lock_class *class, *next;
3180 struct list_head *head;
3181 unsigned long flags;
3182 int i;
5a26db5b 3183 int locked;
fbb9ce95
IM
3184
3185 raw_local_irq_save(flags);
5a26db5b 3186 locked = graph_lock();
fbb9ce95
IM
3187
3188 /*
3189 * Unhash all classes that were created by this module:
3190 */
3191 for (i = 0; i < CLASSHASH_SIZE; i++) {
3192 head = classhash_table + i;
3193 if (list_empty(head))
3194 continue;
fabe874a 3195 list_for_each_entry_safe(class, next, head, hash_entry) {
fbb9ce95
IM
3196 if (within(class->key, start, size))
3197 zap_class(class);
fabe874a
AV
3198 else if (within(class->name, start, size))
3199 zap_class(class);
3200 }
fbb9ce95
IM
3201 }
3202
5a26db5b
NP
3203 if (locked)
3204 graph_unlock();
fbb9ce95
IM
3205 raw_local_irq_restore(flags);
3206}
3207
3208void lockdep_reset_lock(struct lockdep_map *lock)
3209{
d6d897ce 3210 struct lock_class *class, *next;
fbb9ce95
IM
3211 struct list_head *head;
3212 unsigned long flags;
3213 int i, j;
5a26db5b 3214 int locked;
fbb9ce95
IM
3215
3216 raw_local_irq_save(flags);
fbb9ce95
IM
3217
3218 /*
d6d897ce
IM
3219 * Remove all classes this lock might have:
3220 */
3221 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3222 /*
3223 * If the class exists we look it up and zap it:
3224 */
3225 class = look_up_lock_class(lock, j);
3226 if (class)
3227 zap_class(class);
3228 }
3229 /*
3230 * Debug check: in the end all mapped classes should
3231 * be gone.
fbb9ce95 3232 */
5a26db5b 3233 locked = graph_lock();
fbb9ce95
IM
3234 for (i = 0; i < CLASSHASH_SIZE; i++) {
3235 head = classhash_table + i;
3236 if (list_empty(head))
3237 continue;
3238 list_for_each_entry_safe(class, next, head, hash_entry) {
d6d897ce 3239 if (unlikely(class == lock->class_cache)) {
74c383f1
IM
3240 if (debug_locks_off_graph_unlock())
3241 WARN_ON(1);
d6d897ce 3242 goto out_restore;
fbb9ce95
IM
3243 }
3244 }
3245 }
5a26db5b
NP
3246 if (locked)
3247 graph_unlock();
d6d897ce
IM
3248
3249out_restore:
fbb9ce95
IM
3250 raw_local_irq_restore(flags);
3251}
3252
1499993c 3253void lockdep_init(void)
fbb9ce95
IM
3254{
3255 int i;
3256
3257 /*
3258 * Some architectures have their own start_kernel()
3259 * code which calls lockdep_init(), while we also
3260 * call lockdep_init() from the start_kernel() itself,
3261 * and we want to initialize the hashes only once:
3262 */
3263 if (lockdep_initialized)
3264 return;
3265
3266 for (i = 0; i < CLASSHASH_SIZE; i++)
3267 INIT_LIST_HEAD(classhash_table + i);
3268
3269 for (i = 0; i < CHAINHASH_SIZE; i++)
3270 INIT_LIST_HEAD(chainhash_table + i);
3271
3272 lockdep_initialized = 1;
3273}
3274
3275void __init lockdep_info(void)
3276{
3277 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3278
b0788caf 3279 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
fbb9ce95
IM
3280 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3281 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
b0788caf 3282 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
fbb9ce95
IM
3283 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3284 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3285 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
3286
3287 printk(" memory used by lock dependency info: %lu kB\n",
3288 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3289 sizeof(struct list_head) * CLASSHASH_SIZE +
3290 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3291 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3292 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3293
3294 printk(" per task-struct memory footprint: %lu bytes\n",
3295 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3296
3297#ifdef CONFIG_DEBUG_LOCKDEP
c71063c9
JB
3298 if (lockdep_init_error) {
3299 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3300 printk("Call stack leading to lockdep invocation was:\n");
3301 print_stack_trace(&lockdep_init_trace, 0);
3302 }
fbb9ce95
IM
3303#endif
3304}
3305
fbb9ce95
IM
3306static void
3307print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
55794a41 3308 const void *mem_to, struct held_lock *hlock)
fbb9ce95
IM
3309{
3310 if (!debug_locks_off())
3311 return;
3312 if (debug_locks_silent)
3313 return;
3314
3315 printk("\n=========================\n");
3316 printk( "[ BUG: held lock freed! ]\n");
3317 printk( "-------------------------\n");
3318 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
ba25f9dc 3319 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
55794a41 3320 print_lock(hlock);
fbb9ce95
IM
3321 lockdep_print_held_locks(curr);
3322
3323 printk("\nstack backtrace:\n");
3324 dump_stack();
3325}
3326
54561783
ON
3327static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3328 const void* lock_from, unsigned long lock_len)
3329{
3330 return lock_from + lock_len <= mem_from ||
3331 mem_from + mem_len <= lock_from;
3332}
3333
fbb9ce95
IM
3334/*
3335 * Called when kernel memory is freed (or unmapped), or if a lock
3336 * is destroyed or reinitialized - this code checks whether there is
3337 * any held lock in the memory range of <from> to <to>:
3338 */
3339void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3340{
fbb9ce95
IM
3341 struct task_struct *curr = current;
3342 struct held_lock *hlock;
3343 unsigned long flags;
3344 int i;
3345
3346 if (unlikely(!debug_locks))
3347 return;
3348
3349 local_irq_save(flags);
3350 for (i = 0; i < curr->lockdep_depth; i++) {
3351 hlock = curr->held_locks + i;
3352
54561783
ON
3353 if (not_in_range(mem_from, mem_len, hlock->instance,
3354 sizeof(*hlock->instance)))
fbb9ce95
IM
3355 continue;
3356
54561783 3357 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
fbb9ce95
IM
3358 break;
3359 }
3360 local_irq_restore(flags);
3361}
ed07536e 3362EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
fbb9ce95
IM
3363
3364static void print_held_locks_bug(struct task_struct *curr)
3365{
3366 if (!debug_locks_off())
3367 return;
3368 if (debug_locks_silent)
3369 return;
3370
3371 printk("\n=====================================\n");
3372 printk( "[ BUG: lock held at task exit time! ]\n");
3373 printk( "-------------------------------------\n");
3374 printk("%s/%d is exiting with locks still held!\n",
ba25f9dc 3375 curr->comm, task_pid_nr(curr));
fbb9ce95
IM
3376 lockdep_print_held_locks(curr);
3377
3378 printk("\nstack backtrace:\n");
3379 dump_stack();
3380}
3381
3382void debug_check_no_locks_held(struct task_struct *task)
3383{
3384 if (unlikely(task->lockdep_depth > 0))
3385 print_held_locks_bug(task);
3386}
3387
3388void debug_show_all_locks(void)
3389{
3390 struct task_struct *g, *p;
3391 int count = 10;
3392 int unlock = 1;
3393
9c35dd7f
JP
3394 if (unlikely(!debug_locks)) {
3395 printk("INFO: lockdep is turned off.\n");
3396 return;
3397 }
fbb9ce95
IM
3398 printk("\nShowing all locks held in the system:\n");
3399
3400 /*
3401 * Here we try to get the tasklist_lock as hard as possible,
3402 * if not successful after 2 seconds we ignore it (but keep
3403 * trying). This is to enable a debug printout even if a
3404 * tasklist_lock-holding task deadlocks or crashes.
3405 */
3406retry:
3407 if (!read_trylock(&tasklist_lock)) {
3408 if (count == 10)
3409 printk("hm, tasklist_lock locked, retrying... ");
3410 if (count) {
3411 count--;
3412 printk(" #%d", 10-count);
3413 mdelay(200);
3414 goto retry;
3415 }
3416 printk(" ignoring it.\n");
3417 unlock = 0;
46fec7ac 3418 } else {
3419 if (count != 10)
3420 printk(KERN_CONT " locked it.\n");
fbb9ce95 3421 }
fbb9ce95
IM
3422
3423 do_each_thread(g, p) {
85684873
IM
3424 /*
3425 * It's not reliable to print a task's held locks
3426 * if it's not sleeping (or if it's not the current
3427 * task):
3428 */
3429 if (p->state == TASK_RUNNING && p != current)
3430 continue;
fbb9ce95
IM
3431 if (p->lockdep_depth)
3432 lockdep_print_held_locks(p);
3433 if (!unlock)
3434 if (read_trylock(&tasklist_lock))
3435 unlock = 1;
3436 } while_each_thread(g, p);
3437
3438 printk("\n");
3439 printk("=============================================\n\n");
3440
3441 if (unlock)
3442 read_unlock(&tasklist_lock);
3443}
fbb9ce95
IM
3444EXPORT_SYMBOL_GPL(debug_show_all_locks);
3445
82a1fcb9
IM
3446/*
3447 * Careful: only use this function if you are sure that
3448 * the task cannot run in parallel!
3449 */
3450void __debug_show_held_locks(struct task_struct *task)
fbb9ce95 3451{
9c35dd7f
JP
3452 if (unlikely(!debug_locks)) {
3453 printk("INFO: lockdep is turned off.\n");
3454 return;
3455 }
fbb9ce95
IM
3456 lockdep_print_held_locks(task);
3457}
82a1fcb9
IM
3458EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3459
3460void debug_show_held_locks(struct task_struct *task)
3461{
3462 __debug_show_held_locks(task);
3463}
fbb9ce95 3464EXPORT_SYMBOL_GPL(debug_show_held_locks);
b351d164
PZ
3465
3466void lockdep_sys_exit(void)
3467{
3468 struct task_struct *curr = current;
3469
3470 if (unlikely(curr->lockdep_depth)) {
3471 if (!debug_locks_off())
3472 return;
3473 printk("\n================================================\n");
3474 printk( "[ BUG: lock held when returning to user space! ]\n");
3475 printk( "------------------------------------------------\n");
3476 printk("%s/%d is leaving the kernel with locks still held!\n",
3477 curr->comm, curr->pid);
3478 lockdep_print_held_locks(curr);
3479 }
3480}