mm, vmscan: remove highmem_file_pages
authorMel Gorman <mgorman@techsingularity.net>
Thu, 28 Jul 2016 22:47:29 +0000 (15:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Jul 2016 23:07:41 +0000 (16:07 -0700)
With the reintroduction of per-zone LRU stats, highmem_file_pages is
redundant so remove it.

[mgorman@techsingularity.net: wrong stat is being accumulated in highmem_dirtyable_memory]
Link: http://lkml.kernel.org/r/20160725092324.GM10438@techsingularity.netLink:
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm_inline.h
mm/page-writeback.c

index 9cc130f5feb29c18c9038c6f107af9b673f5fd80..71613e8a720f99b6870f1e3f8957d2761a6ac1a0 100644 (file)
@@ -4,22 +4,6 @@
 #include <linux/huge_mm.h>
 #include <linux/swap.h>
 
-#ifdef CONFIG_HIGHMEM
-extern atomic_t highmem_file_pages;
-
-static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
-                                                       int nr_pages)
-{
-       if (is_highmem_idx(zid) && is_file_lru(lru))
-               atomic_add(nr_pages, &highmem_file_pages);
-}
-#else
-static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
-                                                       int nr_pages)
-{
-}
-#endif
-
 /**
  * page_is_file_cache - should the page be on a file LRU or anon LRU?
  * @page: the page to test
@@ -47,7 +31,6 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
        __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
        __mod_zone_page_state(&pgdat->node_zones[zid],
                                NR_ZONE_LRU_BASE + lru, nr_pages);
-       acct_highmem_file_pages(zid, lru, nr_pages);
 }
 
 static __always_inline void update_lru_size(struct lruvec *lruvec,
index 573d138fa7a592e5ff2637d1636ec0c87b00585e..7b5920a3500f865577abd4743afa8e5e712d92d4 100644 (file)
@@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
 
        return nr_pages;
 }
-#ifdef CONFIG_HIGHMEM
-atomic_t highmem_file_pages;
-#endif
 
 static unsigned long highmem_dirtyable_memory(unsigned long total)
 {
 #ifdef CONFIG_HIGHMEM
        int node;
-       unsigned long x;
+       unsigned long x = 0;
        int i;
-       unsigned long dirtyable = 0;
 
        for_each_node_state(node, N_HIGH_MEMORY) {
                for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
@@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
                        nr_pages = zone_page_state(z, NR_FREE_PAGES);
                        /* watch for underflows */
                        nr_pages -= min(nr_pages, high_wmark_pages(z));
-                       dirtyable += nr_pages;
+                       nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
+                       nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
+                       x += nr_pages;
                }
        }
 
-       x = dirtyable + atomic_read(&highmem_file_pages);
-
        /*
         * Unreclaimable memory (kernel memory or anonymous memory
         * without swap) can bring down the dirtyable pages below