mm, vmscan: move lru_lock to the node
[linux-2.6-block.git] / mm / swap.c
index 90530ff8ed161864de461f208b8c511c369a167a..bf37e5cfae812f6325af2cb5c0cb6933ebab2f7c 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -62,12 +62,12 @@ static void __page_cache_release(struct page *page)
                struct lruvec *lruvec;
                unsigned long flags;
 
-               spin_lock_irqsave(&zone->lru_lock, flags);
+               spin_lock_irqsave(zone_lru_lock(zone), flags);
                lruvec = mem_cgroup_page_lruvec(page, zone);
                VM_BUG_ON_PAGE(!PageLRU(page), page);
                __ClearPageLRU(page);
                del_page_from_lru_list(page, lruvec, page_off_lru(page));
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(zone_lru_lock(zone), flags);
        }
        mem_cgroup_uncharge(page);
 }
@@ -189,16 +189,16 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
 
                if (pagezone != zone) {
                        if (zone)
-                               spin_unlock_irqrestore(&zone->lru_lock, flags);
+                               spin_unlock_irqrestore(zone_lru_lock(zone), flags);
                        zone = pagezone;
-                       spin_lock_irqsave(&zone->lru_lock, flags);
+                       spin_lock_irqsave(zone_lru_lock(zone), flags);
                }
 
                lruvec = mem_cgroup_page_lruvec(page, zone);
                (*move_fn)(page, lruvec, arg);
        }
        if (zone)
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(zone_lru_lock(zone), flags);
        release_pages(pvec->pages, pvec->nr, pvec->cold);
        pagevec_reinit(pvec);
 }
@@ -292,6 +292,7 @@ static bool need_activate_page_drain(int cpu)
 
 void activate_page(struct page *page)
 {
+       page = compound_head(page);
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 
@@ -316,9 +317,10 @@ void activate_page(struct page *page)
 {
        struct zone *zone = page_zone(page);
 
-       spin_lock_irq(&zone->lru_lock);
+       page = compound_head(page);
+       spin_lock_irq(zone_lru_lock(zone));
        __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
-       spin_unlock_irq(&zone->lru_lock);
+       spin_unlock_irq(zone_lru_lock(zone));
 }
 #endif
 
@@ -446,13 +448,13 @@ void add_page_to_unevictable_list(struct page *page)
        struct zone *zone = page_zone(page);
        struct lruvec *lruvec;
 
-       spin_lock_irq(&zone->lru_lock);
+       spin_lock_irq(zone_lru_lock(zone));
        lruvec = mem_cgroup_page_lruvec(page, zone);
        ClearPageActive(page);
        SetPageUnevictable(page);
        SetPageLRU(page);
        add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
-       spin_unlock_irq(&zone->lru_lock);
+       spin_unlock_irq(zone_lru_lock(zone));
 }
 
 /**
@@ -742,7 +744,7 @@ void release_pages(struct page **pages, int nr, bool cold)
                 * same zone. The lock is held only if zone != NULL.
                 */
                if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
-                       spin_unlock_irqrestore(&zone->lru_lock, flags);
+                       spin_unlock_irqrestore(zone_lru_lock(zone), flags);
                        zone = NULL;
                }
 
@@ -757,7 +759,7 @@ void release_pages(struct page **pages, int nr, bool cold)
 
                if (PageCompound(page)) {
                        if (zone) {
-                               spin_unlock_irqrestore(&zone->lru_lock, flags);
+                               spin_unlock_irqrestore(zone_lru_lock(zone), flags);
                                zone = NULL;
                        }
                        __put_compound_page(page);
@@ -769,11 +771,11 @@ void release_pages(struct page **pages, int nr, bool cold)
 
                        if (pagezone != zone) {
                                if (zone)
-                                       spin_unlock_irqrestore(&zone->lru_lock,
+                                       spin_unlock_irqrestore(zone_lru_lock(zone),
                                                                        flags);
                                lock_batch = 0;
                                zone = pagezone;
-                               spin_lock_irqsave(&zone->lru_lock, flags);
+                               spin_lock_irqsave(zone_lru_lock(zone), flags);
                        }
 
                        lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -788,7 +790,7 @@ void release_pages(struct page **pages, int nr, bool cold)
                list_add(&page->lru, &pages_to_free);
        }
        if (zone)
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(zone_lru_lock(zone), flags);
 
        mem_cgroup_uncharge_list(&pages_to_free);
        free_hot_cold_page_list(&pages_to_free, cold);
@@ -824,7 +826,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
        VM_BUG_ON_PAGE(PageCompound(page_tail), page);
        VM_BUG_ON_PAGE(PageLRU(page_tail), page);
        VM_BUG_ON(NR_CPUS != 1 &&
-                 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
+                 !spin_is_locked(zone_lru_lock(lruvec_zone(lruvec))));
 
        if (!list)
                SetPageLRU(page_tail);