locking/lockdep: Avoid creating redundant links
authorPeter Zijlstra <peterz@infradead.org>
Fri, 3 Mar 2017 09:13:38 +0000 (10:13 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 10 Aug 2017 10:29:04 +0000 (12:29 +0200)
Two boots + a make defconfig, the first didn't have the redundant bit
in, the second did:

 lock-classes:                         1168       1169 [max: 8191]
 direct dependencies:                  7688       5812 [max: 32768]
 indirect dependencies:               25492      25937
 all direct dependencies:            220113     217512
 dependency chains:                    9005       9008 [max: 65536]
 dependency chain hlocks:             34450      34366 [max: 327680]
 in-hardirq chains:                      55         51
 in-softirq chains:                     371        378
 in-process chains:                    8579       8579
 stack-trace entries:                108073      88474 [max: 524288]
 combined max dependencies:       178738560  169094640

 max locking depth:                      15         15
 max bfs queue depth:                   320        329

 cyclic checks:                        9123       9190

 redundant checks:                                5046
 redundant links:                                 1828

 find-mask forwards checks:            2564       2599
 find-mask backwards checks:          39521      39789

So it saves nearly 2k links and a fair chunk of stack-trace entries, but
as expected, makes no real difference on the indirect dependencies.

At the same time, you see the max BFS depth increase, which is also
expected, although it could easily be boot variance -- these numbers are
not entirely stable between boots.

The down side is that the cycles in the graph become larger and thus
the reports harder to read.

XXX: do we want this as a CONFIG variable, implied by LOCKDEP_SMALL?

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Nikolay Borisov <nborisov@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: akpm@linux-foundation.org
Cc: boqun.feng@gmail.com
Cc: iamjoonsoo.kim@lge.com
Cc: kernel-team@lge.com
Cc: kirill@shutemov.name
Cc: npiggin@gmail.com
Cc: walken@google.com
Link: http://lkml.kernel.org/r/20170303091338.GH6536@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/lockdep.c
kernel/locking/lockdep_internals.h
kernel/locking/lockdep_proc.c

index 986f2fa79dbb4cf6b870cf449d0dd558373571fc..b2dd313951cebb365fd8c62988e3c0cce26f57ef 100644 (file)
@@ -1307,6 +1307,19 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
        return result;
 }
 
+static noinline int
+check_redundant(struct lock_list *root, struct lock_class *target,
+               struct lock_list **target_entry)
+{
+       int result;
+
+       debug_atomic_inc(nr_redundant_checks);
+
+       result = __bfs_forwards(root, target, class_equal, target_entry);
+
+       return result;
+}
+
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
  * Forwards and backwards subgraph searching, for the purposes of
@@ -1872,6 +1885,20 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
                }
        }
 
+       /*
+        * Is the <prev> -> <next> link redundant?
+        */
+       this.class = hlock_class(prev);
+       this.parent = NULL;
+       ret = check_redundant(&this, hlock_class(next), &target_entry);
+       if (!ret) {
+               debug_atomic_inc(nr_redundant);
+               return 2;
+       }
+       if (ret < 0)
+               return print_bfs_bug(ret);
+
+
        if (!*stack_saved) {
                if (!save_trace(&trace))
                        return 0;
index c08fbd2f5ba9fa2a806f326a3a85f5d021d74027..1da4669d57a7a62c330c8d24ca9d30819a892a2a 100644 (file)
@@ -143,6 +143,8 @@ struct lockdep_stats {
        int     redundant_softirqs_on;
        int     redundant_softirqs_off;
        int     nr_unused_locks;
+       int     nr_redundant_checks;
+       int     nr_redundant;
        int     nr_cyclic_checks;
        int     nr_cyclic_check_recursions;
        int     nr_find_usage_forwards_checks;
index 6d1fcc786081a0cddb66cb2537cae22e1121b685..68d9e267ccd46df87d88bef89a809a40a05ed9bd 100644 (file)
@@ -201,6 +201,10 @@ static void lockdep_stats_debug_show(struct seq_file *m)
                debug_atomic_read(chain_lookup_hits));
        seq_printf(m, " cyclic checks:                 %11llu\n",
                debug_atomic_read(nr_cyclic_checks));
+       seq_printf(m, " redundant checks:              %11llu\n",
+               debug_atomic_read(nr_redundant_checks));
+       seq_printf(m, " redundant links:               %11llu\n",
+               debug_atomic_read(nr_redundant));
        seq_printf(m, " find-mask forwards checks:     %11llu\n",
                debug_atomic_read(nr_find_usage_forwards_checks));
        seq_printf(m, " find-mask backwards checks:    %11llu\n",