mm: delete NR_PAGES_SCANNED and pgdat_reclaimable()
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 3 May 2017 21:52:10 +0000 (14:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 May 2017 22:52:08 +0000 (15:52 -0700)
NR_PAGES_SCANNED counts number of pages scanned since the last page free
event in the allocator.  This was used primarily to measure the
reclaimability of zones and nodes, and determine when reclaim should
give up on them.  In that role, it has been replaced in the preceding
patches by a different mechanism.

Being implemented as an efficient vmstat counter, it was automatically
exported to userspace as well.  It's however unlikely that anyone
outside the kernel is using this counter in any meaningful way.

Remove the counter and the unused pgdat_reclaimable().

Link: http://lkml.kernel.org/r/20170228214007.5621-8-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Jia He <hejianet@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/internal.h
mm/page_alloc.c
mm/vmscan.c
mm/vmstat.c

index d2c50ab6ae40e579a8fb7f7a32e8442e16440f3f..04e0969966f67a98130de3e3062d83ebc6f63509 100644 (file)
@@ -149,7 +149,6 @@ enum node_stat_item {
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
-       NR_PAGES_SCANNED,       /* pages scanned since last reclaim */
        WORKINGSET_REFAULT,
        WORKINGSET_ACTIVATE,
        WORKINGSET_NODERECLAIM,
index e5a0e0ec2177bc661e785021a604efe9bd691a44..a36719572eb9e59a493871238fff112b9b7a2486 100644 (file)
@@ -91,7 +91,6 @@ extern unsigned long highest_memmap_pfn;
  */
 extern int isolate_lru_page(struct page *page);
 extern void putback_lru_page(struct page *page);
-extern bool pgdat_reclaimable(struct pglist_data *pgdat);
 
 /*
  * in mm/rmap.c:
index 42c0543e46c3f2ba5eb206f6767cf26d08799f92..6994f28f769c48a91b37a808923e28e864673564 100644 (file)
@@ -1090,14 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
-       unsigned long nr_scanned;
        bool isolated_pageblocks;
 
        spin_lock(&zone->lock);
        isolated_pageblocks = has_isolate_pageblock(zone);
-       nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
-       if (nr_scanned)
-               __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
 
        while (count) {
                struct page *page;
@@ -1150,12 +1146,7 @@ static void free_one_page(struct zone *zone,
                                unsigned int order,
                                int migratetype)
 {
-       unsigned long nr_scanned;
        spin_lock(&zone->lock);
-       nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
-       if (nr_scanned)
-               __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
-
        if (unlikely(has_isolate_pageblock(zone) ||
                is_migrate_isolate(migratetype))) {
                migratetype = get_pfnblock_migratetype(page, pfn);
@@ -4504,7 +4495,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
 #endif
                        " writeback_tmp:%lukB"
                        " unstable:%lukB"
-                       " pages_scanned:%lu"
                        " all_unreclaimable? %s"
                        "\n",
                        pgdat->node_id,
@@ -4527,7 +4517,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
 #endif
                        K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
                        K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
-                       node_page_state(pgdat, NR_PAGES_SCANNED),
                        pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
                                "yes" : "no");
        }
index 9117ae8d49eed25939fedcddfcc11c278bca6bd4..02f2eb51b33e8e2f89d152c99c4c0f3ddd29f8ad 100644 (file)
@@ -230,12 +230,6 @@ unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
        return nr;
 }
 
-bool pgdat_reclaimable(struct pglist_data *pgdat)
-{
-       return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
-               pgdat_reclaimable_pages(pgdat) * 6;
-}
-
 /**
  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
  * @lruvec: lru vector
@@ -1750,7 +1744,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        reclaim_stat->recent_scanned[file] += nr_taken;
 
        if (global_reclaim(sc)) {
-               __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
                if (current_is_kswapd())
                        __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
                else
@@ -1953,8 +1946,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
        reclaim_stat->recent_scanned[file] += nr_taken;
 
-       if (global_reclaim(sc))
-               __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
        __count_vm_events(PGREFILL, nr_scanned);
 
        spin_unlock_irq(&pgdat->lru_lock);
index baee70dafba898f4a8d4387460d29eaf66a3ddb0..c8d15051616b4b94329af25a914ce833d221302d 100644 (file)
@@ -954,7 +954,6 @@ const char * const vmstat_text[] = {
        "nr_unevictable",
        "nr_isolated_anon",
        "nr_isolated_file",
-       "nr_pages_scanned",
        "workingset_refault",
        "workingset_activate",
        "workingset_nodereclaim",
@@ -1378,7 +1377,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        min      %lu"
                   "\n        low      %lu"
                   "\n        high     %lu"
-                  "\n   node_scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu"
                   "\n        managed  %lu",
@@ -1386,7 +1384,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
-                  node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED),
                   zone->spanned_pages,
                   zone->present_pages,
                   zone->managed_pages);
@@ -1586,22 +1583,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
                val = atomic_long_read(&vm_zone_stat[i]);
                if (val < 0) {
-                       switch (i) {
-                       case NR_PAGES_SCANNED:
-                               /*
-                                * This is often seen to go negative in
-                                * recent kernels, but not to go permanently
-                                * negative.  Whilst it would be nicer not to
-                                * have exceptions, rooting them out would be
-                                * another task, of rather low priority.
-                                */
-                               break;
-                       default:
-                               pr_warn("%s: %s %ld\n",
-                                       __func__, vmstat_text[i], val);
-                               err = -EINVAL;
-                               break;
-                       }
+                       pr_warn("%s: %s %ld\n",
+                               __func__, vmstat_text[i], val);
+                       err = -EINVAL;
                }
        }
        if (err)