slab: destroy a slab without holding any alien cache lock
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Wed, 6 Aug 2014 23:04:33 +0000 (16:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 7 Aug 2014 01:01:14 +0000 (18:01 -0700)
I haven't heard that this alien cache lock is contended, but to reduce
chance of contention would be better generally.  And with this change,
we can simplify complex lockdep annotation in slab code.  In the
following patch, it will be implemented.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.c

index e4ce73c32a7a509a3e31206cc10985254ccc4c9a..e4dc0896b891c07ed0063417533d80641fa95e5c 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache **alc_ptr)
 }
 
 static void __drain_alien_cache(struct kmem_cache *cachep,
-                               struct array_cache *ac, int node)
+                               struct array_cache *ac, int node,
+                               struct list_head *list)
 {
        struct kmem_cache_node *n = get_node(cachep, node);
-       LIST_HEAD(list);
 
        if (ac->avail) {
                spin_lock(&n->list_lock);
@@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
                if (n->shared)
                        transfer_objects(n->shared, ac, ac->limit);
 
-               free_block(cachep, ac->entry, ac->avail, node, &list);
+               free_block(cachep, ac->entry, ac->avail, node, list);
                ac->avail = 0;
                spin_unlock(&n->list_lock);
-               slabs_destroy(cachep, &list);
        }
 }
 
@@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
                if (alc) {
                        ac = &alc->ac;
                        if (ac->avail && spin_trylock_irq(&alc->lock)) {
-                               __drain_alien_cache(cachep, ac, node);
+                               LIST_HEAD(list);
+
+                               __drain_alien_cache(cachep, ac, node, &list);
                                spin_unlock_irq(&alc->lock);
+                               slabs_destroy(cachep, &list);
                        }
                }
        }
@@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep,
        for_each_online_node(i) {
                alc = alien[i];
                if (alc) {
+                       LIST_HEAD(list);
+
                        ac = &alc->ac;
                        spin_lock_irqsave(&alc->lock, flags);
-                       __drain_alien_cache(cachep, ac, i);
+                       __drain_alien_cache(cachep, ac, i, &list);
                        spin_unlock_irqrestore(&alc->lock, flags);
+                       slabs_destroy(cachep, &list);
                }
        }
 }
@@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
                spin_lock(&alien->lock);
                if (unlikely(ac->avail == ac->limit)) {
                        STATS_INC_ACOVERFLOW(cachep);
-                       __drain_alien_cache(cachep, ac, nodeid);
+                       __drain_alien_cache(cachep, ac, nodeid, &list);
                }
                ac_put_obj(cachep, ac, objp);
                spin_unlock(&alien->lock);
+               slabs_destroy(cachep, &list);
        } else {
                n = get_node(cachep, nodeid);
                spin_lock(&n->list_lock);