mm, vmstat: print node-based stats in zoneinfo file
[linux-2.6-block.git] / mm / vmstat.c
index de0c17076270b6a60e3ae544152f4c276285e551..ac509572a50b6adebe64e611dbb8171c37858949 100644 (file)
@@ -446,11 +446,6 @@ void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 }
 EXPORT_SYMBOL(mod_zone_page_state);
 
-void inc_zone_state(struct zone *zone, enum zone_stat_item item)
-{
-       mod_zone_state(zone, item, 1, 1);
-}
-
 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 {
        mod_zone_state(page_zone(page), item, 1, 1);
@@ -539,15 +534,6 @@ void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 }
 EXPORT_SYMBOL(mod_zone_page_state);
 
-void inc_zone_state(struct zone *zone, enum zone_stat_item item)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __inc_zone_state(zone, item);
-       local_irq_restore(flags);
-}
-
 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 {
        unsigned long flags;
@@ -935,27 +921,15 @@ int fragmentation_index(struct zone *zone, unsigned int order)
 const char * const vmstat_text[] = {
        /* enum zone_stat_item countes */
        "nr_free_pages",
-       "nr_alloc_batch",
        "nr_zone_anon_lru",
        "nr_zone_file_lru",
+       "nr_zone_write_pending",
        "nr_mlock",
-       "nr_anon_pages",
-       "nr_mapped",
-       "nr_file_pages",
-       "nr_dirty",
-       "nr_writeback",
        "nr_slab_reclaimable",
        "nr_slab_unreclaimable",
        "nr_page_table_pages",
        "nr_kernel_stack",
-       "nr_unstable",
        "nr_bounce",
-       "nr_vmscan_write",
-       "nr_vmscan_immediate_reclaim",
-       "nr_writeback_temp",
-       "nr_shmem",
-       "nr_dirtied",
-       "nr_written",
 #if IS_ENABLED(CONFIG_ZSMALLOC)
        "nr_zspages",
 #endif
@@ -967,12 +941,6 @@ const char * const vmstat_text[] = {
        "numa_local",
        "numa_other",
 #endif
-       "workingset_refault",
-       "workingset_activate",
-       "workingset_nodereclaim",
-       "nr_anon_transparent_hugepages",
-       "nr_shmem_hugepages",
-       "nr_shmem_pmdmapped",
        "nr_free_cma",
 
        /* Node-based counters */
@@ -984,6 +952,24 @@ const char * const vmstat_text[] = {
        "nr_isolated_anon",
        "nr_isolated_file",
        "nr_pages_scanned",
+       "workingset_refault",
+       "workingset_activate",
+       "workingset_nodereclaim",
+       "nr_anon_pages",
+       "nr_mapped",
+       "nr_file_pages",
+       "nr_dirty",
+       "nr_writeback",
+       "nr_writeback_temp",
+       "nr_shmem",
+       "nr_shmem_hugepages",
+       "nr_shmem_pmdmapped",
+       "nr_anon_transparent_hugepages",
+       "nr_unstable",
+       "nr_vmscan_write",
+       "nr_vmscan_immediate_reclaim",
+       "nr_dirtied",
+       "nr_written",
 
        /* enum writeback_stat_item counters */
        "nr_dirty_threshold",
@@ -997,6 +983,8 @@ const char * const vmstat_text[] = {
        "pswpout",
 
        TEXTS_FOR_ZONES("pgalloc")
+       TEXTS_FOR_ZONES("allocstall")
+       TEXTS_FOR_ZONES("pgskip")
 
        "pgfree",
        "pgactivate",
@@ -1022,7 +1010,6 @@ const char * const vmstat_text[] = {
        "kswapd_low_wmark_hit_quickly",
        "kswapd_high_wmark_hit_quickly",
        "pageoutrun",
-       "allocstall",
 
        "pgrotated",
 
@@ -1428,11 +1415,35 @@ static const struct file_operations pagetypeinfo_file_ops = {
        .release        = seq_release,
 };
 
+static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
+{
+       int zid;
+
+       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+               struct zone *compare = &pgdat->node_zones[zid];
+
+               if (populated_zone(compare))
+                       return zone == compare;
+       }
+
+       /* The zone must be somewhere! */
+       WARN_ON_ONCE(1);
+       return false;
+}
+
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                                                        struct zone *zone)
 {
        int i;
        seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
+       if (is_zone_first_populated(pgdat, zone)) {
+               seq_printf(m, "\n  per-node stats");
+               for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
+                       seq_printf(m, "\n      %-12s %lu",
+                               vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
+                               node_page_state(pgdat, i));
+               }
+       }
        seq_printf(m,
                   "\n  pages free     %lu"
                   "\n        min      %lu"
@@ -1645,10 +1656,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
                val = atomic_long_read(&vm_zone_stat[i]);
                if (val < 0) {
                        switch (i) {
-                       case NR_ALLOC_BATCH:
                        case NR_PAGES_SCANNED:
                                /*
-                                * These are often seen to go negative in
+                                * This is often seen to go negative in
                                 * recent kernels, but not to go permanently
                                 * negative.  Whilst it would be nicer not to
                                 * have exceptions, rooting them out would be