mm/page_owner: initialize page owner without holding the zone lock
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Tue, 26 Jul 2016 22:23:43 +0000 (15:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 23:19:19 +0000 (16:19 -0700)
It's not necessary to initialized page_owner with holding the zone lock.
It would cause more contention on the zone lock although it's not a big
problem since it is just debug feature.  But, it is better than before
so do it.  This is also preparation step to use stackdepot in page owner
feature.  Stackdepot allocates new pages when there is no reserved space
and holding the zone lock in this case will cause deadlock.

Link: http://lkml.kernel.org/r/1464230275-25791-2-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/compaction.c
mm/page_alloc.c
mm/page_isolation.c

index 3cda95451d93e3932f3bcfb902ea5e7c9f316c9c..4ae1294068a8652df2a365aefb15cb748d7d40a6 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kasan.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
+#include <linux/page_owner.h>
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
@@ -79,6 +80,8 @@ static void map_pages(struct list_head *list)
                arch_alloc_page(page, order);
                kernel_map_pages(page, nr_pages, 1);
                kasan_alloc_pages(page, order);
+
+               set_page_owner(page, order, __GFP_MOVABLE);
                if (order)
                        split_page(page, order);
 
index 44cee1e1d65b5502b6c722c46031cf20ff20c460..f07552fc43e1c90c421b3ec0a7392f254a528721 100644 (file)
@@ -2509,8 +2509,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
        zone->free_area[order].nr_free--;
        rmv_page_order(page);
 
-       set_page_owner(page, order, __GFP_MOVABLE);
-
        /* Set the pageblock if the isolated page is at least a pageblock */
        if (order >= pageblock_order - 1) {
                struct page *endpage = page + (1 << order) - 1;
index 612122bf6a4236ff57a8bacf1d3dcd600c02c821..927f5ee24c879a96e5c604b8b6c1a328d9c5d8c8 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/pageblock-flags.h>
 #include <linux/memory.h>
 #include <linux/hugetlb.h>
+#include <linux/page_owner.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -108,8 +109,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
                        if (pfn_valid_within(page_to_pfn(buddy)) &&
                            !is_migrate_isolate_page(buddy)) {
                                __isolate_free_page(page, order);
-                               kernel_map_pages(page, (1 << order), 1);
-                               set_page_refcounted(page);
                                isolated_page = page;
                        }
                }
@@ -128,8 +127,12 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
        zone->nr_isolate_pageblock--;
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
-       if (isolated_page)
+       if (isolated_page) {
+               kernel_map_pages(page, (1 << order), 1);
+               set_page_refcounted(page);
+               set_page_owner(page, order, __GFP_MOVABLE);
                __free_pages(isolated_page, order);
+       }
 }
 
 static inline struct page *