mm: shrink_inactive_list() nr_scan accounting fix fix
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tue, 22 Sep 2009 00:01:36 +0000 (17:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Sep 2009 14:17:28 +0000 (07:17 -0700)
If sc->isolate_pages() return 0, we don't need to call shrink_page_list().
In past days, shrink_inactive_list() handled it properly.

But commit fb8d14e1 (three years ago commit!) breaked it.  current
shrink_inactive_list() always call shrink_page_list() although
isolate_pages() return 0.

This patch restore proper return value check.

Requirements:
  o "nr_taken == 0" condition should stay before calling shrink_page_list().
  o "nr_taken == 0" condition should stay after nr_scan related statistics
     modification.

Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index 45a150a3a442eb5c17f40909707945a885e2a3b4..d86a91f8c16bab842e1abf5d458d4ef4227e5f33 100644 (file)
@@ -1076,6 +1076,20 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
                             &page_list, &nr_scan, sc->order, mode,
                                zone, sc->mem_cgroup, 0, file);
+
+               if (scanning_global_lru(sc)) {
+                       zone->pages_scanned += nr_scan;
+                       if (current_is_kswapd())
+                               __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+                                                      nr_scan);
+                       else
+                               __count_zone_vm_events(PGSCAN_DIRECT, zone,
+                                                      nr_scan);
+               }
+
+               if (nr_taken == 0)
+                       goto done;
+
                nr_active = clear_active_flags(&page_list, count);
                __count_vm_events(PGDEACTIVATE, nr_active);
 
@@ -1088,8 +1102,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                __mod_zone_page_state(zone, NR_INACTIVE_ANON,
                                                -count[LRU_INACTIVE_ANON]);
 
-               if (scanning_global_lru(sc))
-                       zone->pages_scanned += nr_scan;
 
                reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
                reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
@@ -1123,18 +1135,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                }
 
                nr_reclaimed += nr_freed;
+
                local_irq_disable();
-               if (current_is_kswapd()) {
-                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+               if (current_is_kswapd())
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
-               } else if (scanning_global_lru(sc))
-                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-
                __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
-               if (nr_taken == 0)
-                       goto done;
-
                spin_lock(&zone->lru_lock);
                /*
                 * Put back any unfreeable pages.
@@ -1164,9 +1170,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        }
                }
        } while (nr_scanned < max_scan);
-       spin_unlock(&zone->lru_lock);
+
 done:
-       local_irq_enable();
+       spin_unlock_irq(&zone->lru_lock);
        pagevec_release(&pvec);
        return nr_reclaimed;
 }