mm/slab.c use list_first_entry_or_null()
authorGeliang Tang <geliangtang@163.com>
Thu, 14 Jan 2016 23:17:56 +0000 (15:17 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Jan 2016 00:00:49 +0000 (16:00 -0800)
Simplify the code with list_first_entry_or_null().

Signed-off-by: Geliang Tang <geliangtang@163.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.c

index 4765c97ce6900d98b8ce2968cf9ff62a176f6e42..6bb046649450626abbcb0796c23084f2e9ac6584 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2791,18 +2791,18 @@ retry:
        }
 
        while (batchcount > 0) {
-               struct list_head *entry;
                struct page *page;
                /* Get slab alloc is to come from. */
-               entry = n->slabs_partial.next;
-               if (entry == &n->slabs_partial) {
+               page = list_first_entry_or_null(&n->slabs_partial,
+                               struct page, lru);
+               if (!page) {
                        n->free_touched = 1;
-                       entry = n->slabs_free.next;
-                       if (entry == &n->slabs_free)
+                       page = list_first_entry_or_null(&n->slabs_free,
+                                       struct page, lru);
+                       if (!page)
                                goto must_grow;
                }
 
-               page = list_entry(entry, struct page, lru);
                check_spinlock_acquired(cachep);
 
                /*
@@ -3085,7 +3085,6 @@ retry:
 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
                                int nodeid)
 {
-       struct list_head *entry;
        struct page *page;
        struct kmem_cache_node *n;
        void *obj;
@@ -3098,15 +3097,16 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 retry:
        check_irq_off();
        spin_lock(&n->list_lock);
-       entry = n->slabs_partial.next;
-       if (entry == &n->slabs_partial) {
+       page = list_first_entry_or_null(&n->slabs_partial,
+                       struct page, lru);
+       if (!page) {
                n->free_touched = 1;
-               entry = n->slabs_free.next;
-               if (entry == &n->slabs_free)
+               page = list_first_entry_or_null(&n->slabs_free,
+                               struct page, lru);
+               if (!page)
                        goto must_grow;
        }
 
-       page = list_entry(entry, struct page, lru);
        check_spinlock_acquired_node(cachep, nodeid);
 
        STATS_INC_NODEALLOCS(cachep);