mm: page_alloc: clear PG_locked before checking flags on free
authorJohannes Weiner <hannes@cmpxchg.org>
Fri, 19 Jun 2009 17:30:56 +0000 (19:30 +0200)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Jun 2009 23:08:22 +0000 (16:08 -0700)
da456f1 "page allocator: do not disable interrupts in free_page_mlock()" moved
the PG_mlocked clearing after the flag sanity checking which makes mlocked
pages always trigger 'bad page'.  Fix this by clearing the bit up front.

Reported--and-debugged-by: Peter Chubb <peter.chubb@nicta.com.au>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Tested-by: Maxim Levitsky <maximlevitsky@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index 6f0753fe694c682978cc91b8863359dcda5b823f..30d5093a099dec7f5739a2e78be65f440206ac51 100644 (file)
@@ -488,7 +488,6 @@ static inline void __free_one_page(struct page *page,
  */
 static inline void free_page_mlock(struct page *page)
 {
-       __ClearPageMlocked(page);
        __dec_zone_page_state(page, NR_MLOCK);
        __count_vm_event(UNEVICTABLE_MLOCKFREED);
 }
@@ -558,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int i;
        int bad = 0;
-       int clearMlocked = PageMlocked(page);
+       int wasMlocked = TestClearPageMlocked(page);
 
        kmemcheck_free_shadow(page, order);
 
@@ -576,7 +575,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        kernel_map_pages(page, 1 << order, 0);
 
        local_irq_save(flags);
-       if (unlikely(clearMlocked))
+       if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, order,
@@ -1022,7 +1021,7 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
-       int clearMlocked = PageMlocked(page);
+       int wasMlocked = TestClearPageMlocked(page);
 
        kmemcheck_free_shadow(page, 0);
 
@@ -1041,7 +1040,7 @@ static void free_hot_cold_page(struct page *page, int cold)
        pcp = &zone_pcp(zone, get_cpu())->pcp;
        set_page_private(page, get_pageblock_migratetype(page));
        local_irq_save(flags);
-       if (unlikely(clearMlocked))
+       if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_event(PGFREE);