Merge tag 'tty-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[linux-2.6-block.git] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  *  Copyright (C) 2008-2014 Christoph Lameter
11  */
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/debugfs.h>
23 #include <linux/sched.h>
24 #include <linux/math64.h>
25 #include <linux/writeback.h>
26 #include <linux/compaction.h>
27 #include <linux/mm_inline.h>
28 #include <linux/page_ext.h>
29 #include <linux/page_owner.h>
30
31 #include "internal.h"
32
33 #ifdef CONFIG_VM_EVENT_COUNTERS
34 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35 EXPORT_PER_CPU_SYMBOL(vm_event_states);
36
37 static void sum_vm_events(unsigned long *ret)
38 {
39         int cpu;
40         int i;
41
42         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43
44         for_each_online_cpu(cpu) {
45                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46
47                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48                         ret[i] += this->event[i];
49         }
50 }
51
52 /*
53  * Accumulate the vm event counters across all CPUs.
54  * The result is unavoidably approximate - it can change
55  * during and after execution of this function.
56 */
57 void all_vm_events(unsigned long *ret)
58 {
59         get_online_cpus();
60         sum_vm_events(ret);
61         put_online_cpus();
62 }
63 EXPORT_SYMBOL_GPL(all_vm_events);
64
65 /*
66  * Fold the foreign cpu events into our own.
67  *
68  * This is adding to the events on one processor
69  * but keeps the global counts constant.
70  */
71 void vm_events_fold_cpu(int cpu)
72 {
73         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74         int i;
75
76         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77                 count_vm_events(i, fold_state->event[i]);
78                 fold_state->event[i] = 0;
79         }
80 }
81
82 #endif /* CONFIG_VM_EVENT_COUNTERS */
83
84 /*
85  * Manage combined zone based / global counters
86  *
87  * vm_stat contains the global counters
88  */
89 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90 EXPORT_SYMBOL(vm_stat);
91
92 #ifdef CONFIG_SMP
93
94 int calculate_pressure_threshold(struct zone *zone)
95 {
96         int threshold;
97         int watermark_distance;
98
99         /*
100          * As vmstats are not up to date, there is drift between the estimated
101          * and real values. For high thresholds and a high number of CPUs, it
102          * is possible for the min watermark to be breached while the estimated
103          * value looks fine. The pressure threshold is a reduced value such
104          * that even the maximum amount of drift will not accidentally breach
105          * the min watermark
106          */
107         watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
108         threshold = max(1, (int)(watermark_distance / num_online_cpus()));
109
110         /*
111          * Maximum threshold is 125
112          */
113         threshold = min(125, threshold);
114
115         return threshold;
116 }
117
118 int calculate_normal_threshold(struct zone *zone)
119 {
120         int threshold;
121         int mem;        /* memory in 128 MB units */
122
123         /*
124          * The threshold scales with the number of processors and the amount
125          * of memory per zone. More memory means that we can defer updates for
126          * longer, more processors could lead to more contention.
127          * fls() is used to have a cheap way of logarithmic scaling.
128          *
129          * Some sample thresholds:
130          *
131          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
132          * ------------------------------------------------------------------
133          * 8            1               1       0.9-1 GB        4
134          * 16           2               2       0.9-1 GB        4
135          * 20           2               2       1-2 GB          5
136          * 24           2               2       2-4 GB          6
137          * 28           2               2       4-8 GB          7
138          * 32           2               2       8-16 GB         8
139          * 4            2               2       <128M           1
140          * 30           4               3       2-4 GB          5
141          * 48           4               3       8-16 GB         8
142          * 32           8               4       1-2 GB          4
143          * 32           8               4       0.9-1GB         4
144          * 10           16              5       <128M           1
145          * 40           16              5       900M            4
146          * 70           64              7       2-4 GB          5
147          * 84           64              7       4-8 GB          6
148          * 108          512             9       4-8 GB          6
149          * 125          1024            10      8-16 GB         8
150          * 125          1024            10      16-32 GB        9
151          */
152
153         mem = zone->managed_pages >> (27 - PAGE_SHIFT);
154
155         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156
157         /*
158          * Maximum threshold is 125
159          */
160         threshold = min(125, threshold);
161
162         return threshold;
163 }
164
165 /*
166  * Refresh the thresholds for each zone.
167  */
168 void refresh_zone_stat_thresholds(void)
169 {
170         struct zone *zone;
171         int cpu;
172         int threshold;
173
174         for_each_populated_zone(zone) {
175                 unsigned long max_drift, tolerate_drift;
176
177                 threshold = calculate_normal_threshold(zone);
178
179                 for_each_online_cpu(cpu)
180                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181                                                         = threshold;
182
183                 /*
184                  * Only set percpu_drift_mark if there is a danger that
185                  * NR_FREE_PAGES reports the low watermark is ok when in fact
186                  * the min watermark could be breached by an allocation
187                  */
188                 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189                 max_drift = num_online_cpus() * threshold;
190                 if (max_drift > tolerate_drift)
191                         zone->percpu_drift_mark = high_wmark_pages(zone) +
192                                         max_drift;
193         }
194 }
195
196 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197                                 int (*calculate_pressure)(struct zone *))
198 {
199         struct zone *zone;
200         int cpu;
201         int threshold;
202         int i;
203
204         for (i = 0; i < pgdat->nr_zones; i++) {
205                 zone = &pgdat->node_zones[i];
206                 if (!zone->percpu_drift_mark)
207                         continue;
208
209                 threshold = (*calculate_pressure)(zone);
210                 for_each_online_cpu(cpu)
211                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
212                                                         = threshold;
213         }
214 }
215
216 /*
217  * For use when we know that interrupts are disabled,
218  * or when we know that preemption is disabled and that
219  * particular counter cannot be updated from interrupt context.
220  */
221 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222                            long delta)
223 {
224         struct per_cpu_pageset __percpu *pcp = zone->pageset;
225         s8 __percpu *p = pcp->vm_stat_diff + item;
226         long x;
227         long t;
228
229         x = delta + __this_cpu_read(*p);
230
231         t = __this_cpu_read(pcp->stat_threshold);
232
233         if (unlikely(x > t || x < -t)) {
234                 zone_page_state_add(x, zone, item);
235                 x = 0;
236         }
237         __this_cpu_write(*p, x);
238 }
239 EXPORT_SYMBOL(__mod_zone_page_state);
240
241 /*
242  * Optimized increment and decrement functions.
243  *
244  * These are only for a single page and therefore can take a struct page *
245  * argument instead of struct zone *. This allows the inclusion of the code
246  * generated for page_zone(page) into the optimized functions.
247  *
248  * No overflow check is necessary and therefore the differential can be
249  * incremented or decremented in place which may allow the compilers to
250  * generate better code.
251  * The increment or decrement is known and therefore one boundary check can
252  * be omitted.
253  *
254  * NOTE: These functions are very performance sensitive. Change only
255  * with care.
256  *
257  * Some processors have inc/dec instructions that are atomic vs an interrupt.
258  * However, the code must first determine the differential location in a zone
259  * based on the processor number and then inc/dec the counter. There is no
260  * guarantee without disabling preemption that the processor will not change
261  * in between and therefore the atomicity vs. interrupt cannot be exploited
262  * in a useful way here.
263  */
264 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
265 {
266         struct per_cpu_pageset __percpu *pcp = zone->pageset;
267         s8 __percpu *p = pcp->vm_stat_diff + item;
268         s8 v, t;
269
270         v = __this_cpu_inc_return(*p);
271         t = __this_cpu_read(pcp->stat_threshold);
272         if (unlikely(v > t)) {
273                 s8 overstep = t >> 1;
274
275                 zone_page_state_add(v + overstep, zone, item);
276                 __this_cpu_write(*p, -overstep);
277         }
278 }
279
280 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281 {
282         __inc_zone_state(page_zone(page), item);
283 }
284 EXPORT_SYMBOL(__inc_zone_page_state);
285
286 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
287 {
288         struct per_cpu_pageset __percpu *pcp = zone->pageset;
289         s8 __percpu *p = pcp->vm_stat_diff + item;
290         s8 v, t;
291
292         v = __this_cpu_dec_return(*p);
293         t = __this_cpu_read(pcp->stat_threshold);
294         if (unlikely(v < - t)) {
295                 s8 overstep = t >> 1;
296
297                 zone_page_state_add(v - overstep, zone, item);
298                 __this_cpu_write(*p, overstep);
299         }
300 }
301
302 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303 {
304         __dec_zone_state(page_zone(page), item);
305 }
306 EXPORT_SYMBOL(__dec_zone_page_state);
307
308 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
309 /*
310  * If we have cmpxchg_local support then we do not need to incur the overhead
311  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
312  *
313  * mod_state() modifies the zone counter state through atomic per cpu
314  * operations.
315  *
316  * Overstep mode specifies how overstep should handled:
317  *     0       No overstepping
318  *     1       Overstepping half of threshold
319  *     -1      Overstepping minus half of threshold
320 */
321 static inline void mod_state(struct zone *zone, enum zone_stat_item item,
322                              long delta, int overstep_mode)
323 {
324         struct per_cpu_pageset __percpu *pcp = zone->pageset;
325         s8 __percpu *p = pcp->vm_stat_diff + item;
326         long o, n, t, z;
327
328         do {
329                 z = 0;  /* overflow to zone counters */
330
331                 /*
332                  * The fetching of the stat_threshold is racy. We may apply
333                  * a counter threshold to the wrong the cpu if we get
334                  * rescheduled while executing here. However, the next
335                  * counter update will apply the threshold again and
336                  * therefore bring the counter under the threshold again.
337                  *
338                  * Most of the time the thresholds are the same anyways
339                  * for all cpus in a zone.
340                  */
341                 t = this_cpu_read(pcp->stat_threshold);
342
343                 o = this_cpu_read(*p);
344                 n = delta + o;
345
346                 if (n > t || n < -t) {
347                         int os = overstep_mode * (t >> 1) ;
348
349                         /* Overflow must be added to zone counters */
350                         z = n + os;
351                         n = -os;
352                 }
353         } while (this_cpu_cmpxchg(*p, o, n) != o);
354
355         if (z)
356                 zone_page_state_add(z, zone, item);
357 }
358
359 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360                          long delta)
361 {
362         mod_state(zone, item, delta, 0);
363 }
364 EXPORT_SYMBOL(mod_zone_page_state);
365
366 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
367 {
368         mod_state(zone, item, 1, 1);
369 }
370
371 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
372 {
373         mod_state(page_zone(page), item, 1, 1);
374 }
375 EXPORT_SYMBOL(inc_zone_page_state);
376
377 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
378 {
379         mod_state(page_zone(page), item, -1, -1);
380 }
381 EXPORT_SYMBOL(dec_zone_page_state);
382 #else
383 /*
384  * Use interrupt disable to serialize counter updates
385  */
386 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387                          long delta)
388 {
389         unsigned long flags;
390
391         local_irq_save(flags);
392         __mod_zone_page_state(zone, item, delta);
393         local_irq_restore(flags);
394 }
395 EXPORT_SYMBOL(mod_zone_page_state);
396
397 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398 {
399         unsigned long flags;
400
401         local_irq_save(flags);
402         __inc_zone_state(zone, item);
403         local_irq_restore(flags);
404 }
405
406 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
407 {
408         unsigned long flags;
409         struct zone *zone;
410
411         zone = page_zone(page);
412         local_irq_save(flags);
413         __inc_zone_state(zone, item);
414         local_irq_restore(flags);
415 }
416 EXPORT_SYMBOL(inc_zone_page_state);
417
418 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419 {
420         unsigned long flags;
421
422         local_irq_save(flags);
423         __dec_zone_page_state(page, item);
424         local_irq_restore(flags);
425 }
426 EXPORT_SYMBOL(dec_zone_page_state);
427 #endif
428
429
430 /*
431  * Fold a differential into the global counters.
432  * Returns the number of counters updated.
433  */
434 static int fold_diff(int *diff)
435 {
436         int i;
437         int changes = 0;
438
439         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
440                 if (diff[i]) {
441                         atomic_long_add(diff[i], &vm_stat[i]);
442                         changes++;
443         }
444         return changes;
445 }
446
447 /*
448  * Update the zone counters for the current cpu.
449  *
450  * Note that refresh_cpu_vm_stats strives to only access
451  * node local memory. The per cpu pagesets on remote zones are placed
452  * in the memory local to the processor using that pageset. So the
453  * loop over all zones will access a series of cachelines local to
454  * the processor.
455  *
456  * The call to zone_page_state_add updates the cachelines with the
457  * statistics in the remote zone struct as well as the global cachelines
458  * with the global counters. These could cause remote node cache line
459  * bouncing and will have to be only done when necessary.
460  *
461  * The function returns the number of global counters updated.
462  */
463 static int refresh_cpu_vm_stats(bool do_pagesets)
464 {
465         struct zone *zone;
466         int i;
467         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
468         int changes = 0;
469
470         for_each_populated_zone(zone) {
471                 struct per_cpu_pageset __percpu *p = zone->pageset;
472
473                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474                         int v;
475
476                         v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477                         if (v) {
478
479                                 atomic_long_add(v, &zone->vm_stat[i]);
480                                 global_diff[i] += v;
481 #ifdef CONFIG_NUMA
482                                 /* 3 seconds idle till flush */
483                                 __this_cpu_write(p->expire, 3);
484 #endif
485                         }
486                 }
487 #ifdef CONFIG_NUMA
488                 if (do_pagesets) {
489                         cond_resched();
490                         /*
491                          * Deal with draining the remote pageset of this
492                          * processor
493                          *
494                          * Check if there are pages remaining in this pageset
495                          * if not then there is nothing to expire.
496                          */
497                         if (!__this_cpu_read(p->expire) ||
498                                !__this_cpu_read(p->pcp.count))
499                                 continue;
500
501                         /*
502                          * We never drain zones local to this processor.
503                          */
504                         if (zone_to_nid(zone) == numa_node_id()) {
505                                 __this_cpu_write(p->expire, 0);
506                                 continue;
507                         }
508
509                         if (__this_cpu_dec_return(p->expire))
510                                 continue;
511
512                         if (__this_cpu_read(p->pcp.count)) {
513                                 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
514                                 changes++;
515                         }
516                 }
517 #endif
518         }
519         changes += fold_diff(global_diff);
520         return changes;
521 }
522
523 /*
524  * Fold the data for an offline cpu into the global array.
525  * There cannot be any access by the offline cpu and therefore
526  * synchronization is simplified.
527  */
528 void cpu_vm_stats_fold(int cpu)
529 {
530         struct zone *zone;
531         int i;
532         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
533
534         for_each_populated_zone(zone) {
535                 struct per_cpu_pageset *p;
536
537                 p = per_cpu_ptr(zone->pageset, cpu);
538
539                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
540                         if (p->vm_stat_diff[i]) {
541                                 int v;
542
543                                 v = p->vm_stat_diff[i];
544                                 p->vm_stat_diff[i] = 0;
545                                 atomic_long_add(v, &zone->vm_stat[i]);
546                                 global_diff[i] += v;
547                         }
548         }
549
550         fold_diff(global_diff);
551 }
552
553 /*
554  * this is only called if !populated_zone(zone), which implies no other users of
555  * pset->vm_stat_diff[] exsist.
556  */
557 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
558 {
559         int i;
560
561         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
562                 if (pset->vm_stat_diff[i]) {
563                         int v = pset->vm_stat_diff[i];
564                         pset->vm_stat_diff[i] = 0;
565                         atomic_long_add(v, &zone->vm_stat[i]);
566                         atomic_long_add(v, &vm_stat[i]);
567                 }
568 }
569 #endif
570
571 #ifdef CONFIG_NUMA
572 /*
573  * zonelist = the list of zones passed to the allocator
574  * z        = the zone from which the allocation occurred.
575  *
576  * Must be called with interrupts disabled.
577  *
578  * When __GFP_OTHER_NODE is set assume the node of the preferred
579  * zone is the local node. This is useful for daemons who allocate
580  * memory on behalf of other processes.
581  */
582 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
583 {
584         if (z->zone_pgdat == preferred_zone->zone_pgdat) {
585                 __inc_zone_state(z, NUMA_HIT);
586         } else {
587                 __inc_zone_state(z, NUMA_MISS);
588                 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
589         }
590         if (z->node == ((flags & __GFP_OTHER_NODE) ?
591                         preferred_zone->node : numa_node_id()))
592                 __inc_zone_state(z, NUMA_LOCAL);
593         else
594                 __inc_zone_state(z, NUMA_OTHER);
595 }
596
597 /*
598  * Determine the per node value of a stat item.
599  */
600 unsigned long node_page_state(int node, enum zone_stat_item item)
601 {
602         struct zone *zones = NODE_DATA(node)->node_zones;
603
604         return
605 #ifdef CONFIG_ZONE_DMA
606                 zone_page_state(&zones[ZONE_DMA], item) +
607 #endif
608 #ifdef CONFIG_ZONE_DMA32
609                 zone_page_state(&zones[ZONE_DMA32], item) +
610 #endif
611 #ifdef CONFIG_HIGHMEM
612                 zone_page_state(&zones[ZONE_HIGHMEM], item) +
613 #endif
614                 zone_page_state(&zones[ZONE_NORMAL], item) +
615                 zone_page_state(&zones[ZONE_MOVABLE], item);
616 }
617
618 #endif
619
620 #ifdef CONFIG_COMPACTION
621
622 struct contig_page_info {
623         unsigned long free_pages;
624         unsigned long free_blocks_total;
625         unsigned long free_blocks_suitable;
626 };
627
628 /*
629  * Calculate the number of free pages in a zone, how many contiguous
630  * pages are free and how many are large enough to satisfy an allocation of
631  * the target size. Note that this function makes no attempt to estimate
632  * how many suitable free blocks there *might* be if MOVABLE pages were
633  * migrated. Calculating that is possible, but expensive and can be
634  * figured out from userspace
635  */
636 static void fill_contig_page_info(struct zone *zone,
637                                 unsigned int suitable_order,
638                                 struct contig_page_info *info)
639 {
640         unsigned int order;
641
642         info->free_pages = 0;
643         info->free_blocks_total = 0;
644         info->free_blocks_suitable = 0;
645
646         for (order = 0; order < MAX_ORDER; order++) {
647                 unsigned long blocks;
648
649                 /* Count number of free blocks */
650                 blocks = zone->free_area[order].nr_free;
651                 info->free_blocks_total += blocks;
652
653                 /* Count free base pages */
654                 info->free_pages += blocks << order;
655
656                 /* Count the suitable free blocks */
657                 if (order >= suitable_order)
658                         info->free_blocks_suitable += blocks <<
659                                                 (order - suitable_order);
660         }
661 }
662
663 /*
664  * A fragmentation index only makes sense if an allocation of a requested
665  * size would fail. If that is true, the fragmentation index indicates
666  * whether external fragmentation or a lack of memory was the problem.
667  * The value can be used to determine if page reclaim or compaction
668  * should be used
669  */
670 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
671 {
672         unsigned long requested = 1UL << order;
673
674         if (!info->free_blocks_total)
675                 return 0;
676
677         /* Fragmentation index only makes sense when a request would fail */
678         if (info->free_blocks_suitable)
679                 return -1000;
680
681         /*
682          * Index is between 0 and 1 so return within 3 decimal places
683          *
684          * 0 => allocation would fail due to lack of memory
685          * 1 => allocation would fail due to fragmentation
686          */
687         return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
688 }
689
690 /* Same as __fragmentation index but allocs contig_page_info on stack */
691 int fragmentation_index(struct zone *zone, unsigned int order)
692 {
693         struct contig_page_info info;
694
695         fill_contig_page_info(zone, order, &info);
696         return __fragmentation_index(order, &info);
697 }
698 #endif
699
700 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
701 #ifdef CONFIG_ZONE_DMA
702 #define TEXT_FOR_DMA(xx) xx "_dma",
703 #else
704 #define TEXT_FOR_DMA(xx)
705 #endif
706
707 #ifdef CONFIG_ZONE_DMA32
708 #define TEXT_FOR_DMA32(xx) xx "_dma32",
709 #else
710 #define TEXT_FOR_DMA32(xx)
711 #endif
712
713 #ifdef CONFIG_HIGHMEM
714 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
715 #else
716 #define TEXT_FOR_HIGHMEM(xx)
717 #endif
718
719 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
720                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
721
722 const char * const vmstat_text[] = {
723         /* enum zone_stat_item countes */
724         "nr_free_pages",
725         "nr_alloc_batch",
726         "nr_inactive_anon",
727         "nr_active_anon",
728         "nr_inactive_file",
729         "nr_active_file",
730         "nr_unevictable",
731         "nr_mlock",
732         "nr_anon_pages",
733         "nr_mapped",
734         "nr_file_pages",
735         "nr_dirty",
736         "nr_writeback",
737         "nr_slab_reclaimable",
738         "nr_slab_unreclaimable",
739         "nr_page_table_pages",
740         "nr_kernel_stack",
741         "nr_unstable",
742         "nr_bounce",
743         "nr_vmscan_write",
744         "nr_vmscan_immediate_reclaim",
745         "nr_writeback_temp",
746         "nr_isolated_anon",
747         "nr_isolated_file",
748         "nr_shmem",
749         "nr_dirtied",
750         "nr_written",
751         "nr_pages_scanned",
752
753 #ifdef CONFIG_NUMA
754         "numa_hit",
755         "numa_miss",
756         "numa_foreign",
757         "numa_interleave",
758         "numa_local",
759         "numa_other",
760 #endif
761         "workingset_refault",
762         "workingset_activate",
763         "workingset_nodereclaim",
764         "nr_anon_transparent_hugepages",
765         "nr_free_cma",
766
767         /* enum writeback_stat_item counters */
768         "nr_dirty_threshold",
769         "nr_dirty_background_threshold",
770
771 #ifdef CONFIG_VM_EVENT_COUNTERS
772         /* enum vm_event_item counters */
773         "pgpgin",
774         "pgpgout",
775         "pswpin",
776         "pswpout",
777
778         TEXTS_FOR_ZONES("pgalloc")
779
780         "pgfree",
781         "pgactivate",
782         "pgdeactivate",
783
784         "pgfault",
785         "pgmajfault",
786         "pglazyfreed",
787
788         TEXTS_FOR_ZONES("pgrefill")
789         TEXTS_FOR_ZONES("pgsteal_kswapd")
790         TEXTS_FOR_ZONES("pgsteal_direct")
791         TEXTS_FOR_ZONES("pgscan_kswapd")
792         TEXTS_FOR_ZONES("pgscan_direct")
793         "pgscan_direct_throttle",
794
795 #ifdef CONFIG_NUMA
796         "zone_reclaim_failed",
797 #endif
798         "pginodesteal",
799         "slabs_scanned",
800         "kswapd_inodesteal",
801         "kswapd_low_wmark_hit_quickly",
802         "kswapd_high_wmark_hit_quickly",
803         "pageoutrun",
804         "allocstall",
805
806         "pgrotated",
807
808         "drop_pagecache",
809         "drop_slab",
810
811 #ifdef CONFIG_NUMA_BALANCING
812         "numa_pte_updates",
813         "numa_huge_pte_updates",
814         "numa_hint_faults",
815         "numa_hint_faults_local",
816         "numa_pages_migrated",
817 #endif
818 #ifdef CONFIG_MIGRATION
819         "pgmigrate_success",
820         "pgmigrate_fail",
821 #endif
822 #ifdef CONFIG_COMPACTION
823         "compact_migrate_scanned",
824         "compact_free_scanned",
825         "compact_isolated",
826         "compact_stall",
827         "compact_fail",
828         "compact_success",
829 #endif
830
831 #ifdef CONFIG_HUGETLB_PAGE
832         "htlb_buddy_alloc_success",
833         "htlb_buddy_alloc_fail",
834 #endif
835         "unevictable_pgs_culled",
836         "unevictable_pgs_scanned",
837         "unevictable_pgs_rescued",
838         "unevictable_pgs_mlocked",
839         "unevictable_pgs_munlocked",
840         "unevictable_pgs_cleared",
841         "unevictable_pgs_stranded",
842
843 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
844         "thp_fault_alloc",
845         "thp_fault_fallback",
846         "thp_collapse_alloc",
847         "thp_collapse_alloc_failed",
848         "thp_split_page",
849         "thp_split_page_failed",
850         "thp_split_pmd",
851         "thp_zero_page_alloc",
852         "thp_zero_page_alloc_failed",
853 #endif
854 #ifdef CONFIG_MEMORY_BALLOON
855         "balloon_inflate",
856         "balloon_deflate",
857 #ifdef CONFIG_BALLOON_COMPACTION
858         "balloon_migrate",
859 #endif
860 #endif /* CONFIG_MEMORY_BALLOON */
861 #ifdef CONFIG_DEBUG_TLBFLUSH
862 #ifdef CONFIG_SMP
863         "nr_tlb_remote_flush",
864         "nr_tlb_remote_flush_received",
865 #endif /* CONFIG_SMP */
866         "nr_tlb_local_flush_all",
867         "nr_tlb_local_flush_one",
868 #endif /* CONFIG_DEBUG_TLBFLUSH */
869
870 #ifdef CONFIG_DEBUG_VM_VMACACHE
871         "vmacache_find_calls",
872         "vmacache_find_hits",
873         "vmacache_full_flushes",
874 #endif
875 #endif /* CONFIG_VM_EVENTS_COUNTERS */
876 };
877 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
878
879
880 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
881      defined(CONFIG_PROC_FS)
882 static void *frag_start(struct seq_file *m, loff_t *pos)
883 {
884         pg_data_t *pgdat;
885         loff_t node = *pos;
886
887         for (pgdat = first_online_pgdat();
888              pgdat && node;
889              pgdat = next_online_pgdat(pgdat))
890                 --node;
891
892         return pgdat;
893 }
894
895 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
896 {
897         pg_data_t *pgdat = (pg_data_t *)arg;
898
899         (*pos)++;
900         return next_online_pgdat(pgdat);
901 }
902
903 static void frag_stop(struct seq_file *m, void *arg)
904 {
905 }
906
907 /* Walk all the zones in a node and print using a callback */
908 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
909                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
910 {
911         struct zone *zone;
912         struct zone *node_zones = pgdat->node_zones;
913         unsigned long flags;
914
915         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
916                 if (!populated_zone(zone))
917                         continue;
918
919                 spin_lock_irqsave(&zone->lock, flags);
920                 print(m, pgdat, zone);
921                 spin_unlock_irqrestore(&zone->lock, flags);
922         }
923 }
924 #endif
925
926 #ifdef CONFIG_PROC_FS
927 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
928                                                 struct zone *zone)
929 {
930         int order;
931
932         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
933         for (order = 0; order < MAX_ORDER; ++order)
934                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
935         seq_putc(m, '\n');
936 }
937
938 /*
939  * This walks the free areas for each zone.
940  */
941 static int frag_show(struct seq_file *m, void *arg)
942 {
943         pg_data_t *pgdat = (pg_data_t *)arg;
944         walk_zones_in_node(m, pgdat, frag_show_print);
945         return 0;
946 }
947
948 static void pagetypeinfo_showfree_print(struct seq_file *m,
949                                         pg_data_t *pgdat, struct zone *zone)
950 {
951         int order, mtype;
952
953         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
954                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
955                                         pgdat->node_id,
956                                         zone->name,
957                                         migratetype_names[mtype]);
958                 for (order = 0; order < MAX_ORDER; ++order) {
959                         unsigned long freecount = 0;
960                         struct free_area *area;
961                         struct list_head *curr;
962
963                         area = &(zone->free_area[order]);
964
965                         list_for_each(curr, &area->free_list[mtype])
966                                 freecount++;
967                         seq_printf(m, "%6lu ", freecount);
968                 }
969                 seq_putc(m, '\n');
970         }
971 }
972
973 /* Print out the free pages at each order for each migatetype */
974 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
975 {
976         int order;
977         pg_data_t *pgdat = (pg_data_t *)arg;
978
979         /* Print header */
980         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
981         for (order = 0; order < MAX_ORDER; ++order)
982                 seq_printf(m, "%6d ", order);
983         seq_putc(m, '\n');
984
985         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
986
987         return 0;
988 }
989
990 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
991                                         pg_data_t *pgdat, struct zone *zone)
992 {
993         int mtype;
994         unsigned long pfn;
995         unsigned long start_pfn = zone->zone_start_pfn;
996         unsigned long end_pfn = zone_end_pfn(zone);
997         unsigned long count[MIGRATE_TYPES] = { 0, };
998
999         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1000                 struct page *page;
1001
1002                 if (!pfn_valid(pfn))
1003                         continue;
1004
1005                 page = pfn_to_page(pfn);
1006
1007                 /* Watch for unexpected holes punched in the memmap */
1008                 if (!memmap_valid_within(pfn, page, zone))
1009                         continue;
1010
1011                 mtype = get_pageblock_migratetype(page);
1012
1013                 if (mtype < MIGRATE_TYPES)
1014                         count[mtype]++;
1015         }
1016
1017         /* Print counts */
1018         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1019         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1020                 seq_printf(m, "%12lu ", count[mtype]);
1021         seq_putc(m, '\n');
1022 }
1023
1024 /* Print out the free pages at each order for each migratetype */
1025 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1026 {
1027         int mtype;
1028         pg_data_t *pgdat = (pg_data_t *)arg;
1029
1030         seq_printf(m, "\n%-23s", "Number of blocks type ");
1031         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1032                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1033         seq_putc(m, '\n');
1034         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1035
1036         return 0;
1037 }
1038
1039 #ifdef CONFIG_PAGE_OWNER
1040 static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1041                                                         pg_data_t *pgdat,
1042                                                         struct zone *zone)
1043 {
1044         struct page *page;
1045         struct page_ext *page_ext;
1046         unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1047         unsigned long end_pfn = pfn + zone->spanned_pages;
1048         unsigned long count[MIGRATE_TYPES] = { 0, };
1049         int pageblock_mt, page_mt;
1050         int i;
1051
1052         /* Scan block by block. First and last block may be incomplete */
1053         pfn = zone->zone_start_pfn;
1054
1055         /*
1056          * Walk the zone in pageblock_nr_pages steps. If a page block spans
1057          * a zone boundary, it will be double counted between zones. This does
1058          * not matter as the mixed block count will still be correct
1059          */
1060         for (; pfn < end_pfn; ) {
1061                 if (!pfn_valid(pfn)) {
1062                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1063                         continue;
1064                 }
1065
1066                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1067                 block_end_pfn = min(block_end_pfn, end_pfn);
1068
1069                 page = pfn_to_page(pfn);
1070                 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1071
1072                 for (; pfn < block_end_pfn; pfn++) {
1073                         if (!pfn_valid_within(pfn))
1074                                 continue;
1075
1076                         page = pfn_to_page(pfn);
1077                         if (PageBuddy(page)) {
1078                                 pfn += (1UL << page_order(page)) - 1;
1079                                 continue;
1080                         }
1081
1082                         if (PageReserved(page))
1083                                 continue;
1084
1085                         page_ext = lookup_page_ext(page);
1086
1087                         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1088                                 continue;
1089
1090                         page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1091                         if (pageblock_mt != page_mt) {
1092                                 if (is_migrate_cma(pageblock_mt))
1093                                         count[MIGRATE_MOVABLE]++;
1094                                 else
1095                                         count[pageblock_mt]++;
1096
1097                                 pfn = block_end_pfn;
1098                                 break;
1099                         }
1100                         pfn += (1UL << page_ext->order) - 1;
1101                 }
1102         }
1103
1104         /* Print counts */
1105         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1106         for (i = 0; i < MIGRATE_TYPES; i++)
1107                 seq_printf(m, "%12lu ", count[i]);
1108         seq_putc(m, '\n');
1109 }
1110 #endif /* CONFIG_PAGE_OWNER */
1111
1112 /*
1113  * Print out the number of pageblocks for each migratetype that contain pages
1114  * of other types. This gives an indication of how well fallbacks are being
1115  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1116  * to determine what is going on
1117  */
1118 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1119 {
1120 #ifdef CONFIG_PAGE_OWNER
1121         int mtype;
1122
1123         if (!static_branch_unlikely(&page_owner_inited))
1124                 return;
1125
1126         drain_all_pages(NULL);
1127
1128         seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1129         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1130                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1131         seq_putc(m, '\n');
1132
1133         walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1134 #endif /* CONFIG_PAGE_OWNER */
1135 }
1136
1137 /*
1138  * This prints out statistics in relation to grouping pages by mobility.
1139  * It is expensive to collect so do not constantly read the file.
1140  */
1141 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1142 {
1143         pg_data_t *pgdat = (pg_data_t *)arg;
1144
1145         /* check memoryless node */
1146         if (!node_state(pgdat->node_id, N_MEMORY))
1147                 return 0;
1148
1149         seq_printf(m, "Page block order: %d\n", pageblock_order);
1150         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1151         seq_putc(m, '\n');
1152         pagetypeinfo_showfree(m, pgdat);
1153         pagetypeinfo_showblockcount(m, pgdat);
1154         pagetypeinfo_showmixedcount(m, pgdat);
1155
1156         return 0;
1157 }
1158
1159 static const struct seq_operations fragmentation_op = {
1160         .start  = frag_start,
1161         .next   = frag_next,
1162         .stop   = frag_stop,
1163         .show   = frag_show,
1164 };
1165
1166 static int fragmentation_open(struct inode *inode, struct file *file)
1167 {
1168         return seq_open(file, &fragmentation_op);
1169 }
1170
1171 static const struct file_operations fragmentation_file_operations = {
1172         .open           = fragmentation_open,
1173         .read           = seq_read,
1174         .llseek         = seq_lseek,
1175         .release        = seq_release,
1176 };
1177
1178 static const struct seq_operations pagetypeinfo_op = {
1179         .start  = frag_start,
1180         .next   = frag_next,
1181         .stop   = frag_stop,
1182         .show   = pagetypeinfo_show,
1183 };
1184
1185 static int pagetypeinfo_open(struct inode *inode, struct file *file)
1186 {
1187         return seq_open(file, &pagetypeinfo_op);
1188 }
1189
1190 static const struct file_operations pagetypeinfo_file_ops = {
1191         .open           = pagetypeinfo_open,
1192         .read           = seq_read,
1193         .llseek         = seq_lseek,
1194         .release        = seq_release,
1195 };
1196
1197 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1198                                                         struct zone *zone)
1199 {
1200         int i;
1201         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1202         seq_printf(m,
1203                    "\n  pages free     %lu"
1204                    "\n        min      %lu"
1205                    "\n        low      %lu"
1206                    "\n        high     %lu"
1207                    "\n        scanned  %lu"
1208                    "\n        spanned  %lu"
1209                    "\n        present  %lu"
1210                    "\n        managed  %lu",
1211                    zone_page_state(zone, NR_FREE_PAGES),
1212                    min_wmark_pages(zone),
1213                    low_wmark_pages(zone),
1214                    high_wmark_pages(zone),
1215                    zone_page_state(zone, NR_PAGES_SCANNED),
1216                    zone->spanned_pages,
1217                    zone->present_pages,
1218                    zone->managed_pages);
1219
1220         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1221                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
1222                                 zone_page_state(zone, i));
1223
1224         seq_printf(m,
1225                    "\n        protection: (%ld",
1226                    zone->lowmem_reserve[0]);
1227         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1228                 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1229         seq_printf(m,
1230                    ")"
1231                    "\n  pagesets");
1232         for_each_online_cpu(i) {
1233                 struct per_cpu_pageset *pageset;
1234
1235                 pageset = per_cpu_ptr(zone->pageset, i);
1236                 seq_printf(m,
1237                            "\n    cpu: %i"
1238                            "\n              count: %i"
1239                            "\n              high:  %i"
1240                            "\n              batch: %i",
1241                            i,
1242                            pageset->pcp.count,
1243                            pageset->pcp.high,
1244                            pageset->pcp.batch);
1245 #ifdef CONFIG_SMP
1246                 seq_printf(m, "\n  vm stats threshold: %d",
1247                                 pageset->stat_threshold);
1248 #endif
1249         }
1250         seq_printf(m,
1251                    "\n  all_unreclaimable: %u"
1252                    "\n  start_pfn:         %lu"
1253                    "\n  inactive_ratio:    %u",
1254                    !zone_reclaimable(zone),
1255                    zone->zone_start_pfn,
1256                    zone->inactive_ratio);
1257         seq_putc(m, '\n');
1258 }
1259
1260 /*
1261  * Output information about zones in @pgdat.
1262  */
1263 static int zoneinfo_show(struct seq_file *m, void *arg)
1264 {
1265         pg_data_t *pgdat = (pg_data_t *)arg;
1266         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1267         return 0;
1268 }
1269
1270 static const struct seq_operations zoneinfo_op = {
1271         .start  = frag_start, /* iterate over all zones. The same as in
1272                                * fragmentation. */
1273         .next   = frag_next,
1274         .stop   = frag_stop,
1275         .show   = zoneinfo_show,
1276 };
1277
1278 static int zoneinfo_open(struct inode *inode, struct file *file)
1279 {
1280         return seq_open(file, &zoneinfo_op);
1281 }
1282
1283 static const struct file_operations proc_zoneinfo_file_operations = {
1284         .open           = zoneinfo_open,
1285         .read           = seq_read,
1286         .llseek         = seq_lseek,
1287         .release        = seq_release,
1288 };
1289
1290 enum writeback_stat_item {
1291         NR_DIRTY_THRESHOLD,
1292         NR_DIRTY_BG_THRESHOLD,
1293         NR_VM_WRITEBACK_STAT_ITEMS,
1294 };
1295
1296 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1297 {
1298         unsigned long *v;
1299         int i, stat_items_size;
1300
1301         if (*pos >= ARRAY_SIZE(vmstat_text))
1302                 return NULL;
1303         stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1304                           NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1305
1306 #ifdef CONFIG_VM_EVENT_COUNTERS
1307         stat_items_size += sizeof(struct vm_event_state);
1308 #endif
1309
1310         v = kmalloc(stat_items_size, GFP_KERNEL);
1311         m->private = v;
1312         if (!v)
1313                 return ERR_PTR(-ENOMEM);
1314         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1315                 v[i] = global_page_state(i);
1316         v += NR_VM_ZONE_STAT_ITEMS;
1317
1318         global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1319                             v + NR_DIRTY_THRESHOLD);
1320         v += NR_VM_WRITEBACK_STAT_ITEMS;
1321
1322 #ifdef CONFIG_VM_EVENT_COUNTERS
1323         all_vm_events(v);
1324         v[PGPGIN] /= 2;         /* sectors -> kbytes */
1325         v[PGPGOUT] /= 2;
1326 #endif
1327         return (unsigned long *)m->private + *pos;
1328 }
1329
1330 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1331 {
1332         (*pos)++;
1333         if (*pos >= ARRAY_SIZE(vmstat_text))
1334                 return NULL;
1335         return (unsigned long *)m->private + *pos;
1336 }
1337
1338 static int vmstat_show(struct seq_file *m, void *arg)
1339 {
1340         unsigned long *l = arg;
1341         unsigned long off = l - (unsigned long *)m->private;
1342
1343         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1344         return 0;
1345 }
1346
1347 static void vmstat_stop(struct seq_file *m, void *arg)
1348 {
1349         kfree(m->private);
1350         m->private = NULL;
1351 }
1352
1353 static const struct seq_operations vmstat_op = {
1354         .start  = vmstat_start,
1355         .next   = vmstat_next,
1356         .stop   = vmstat_stop,
1357         .show   = vmstat_show,
1358 };
1359
1360 static int vmstat_open(struct inode *inode, struct file *file)
1361 {
1362         return seq_open(file, &vmstat_op);
1363 }
1364
1365 static const struct file_operations proc_vmstat_file_operations = {
1366         .open           = vmstat_open,
1367         .read           = seq_read,
1368         .llseek         = seq_lseek,
1369         .release        = seq_release,
1370 };
1371 #endif /* CONFIG_PROC_FS */
1372
1373 #ifdef CONFIG_SMP
1374 static struct workqueue_struct *vmstat_wq;
1375 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1376 int sysctl_stat_interval __read_mostly = HZ;
1377 static cpumask_var_t cpu_stat_off;
1378
1379 static void vmstat_update(struct work_struct *w)
1380 {
1381         if (refresh_cpu_vm_stats(true)) {
1382                 /*
1383                  * Counters were updated so we expect more updates
1384                  * to occur in the future. Keep on running the
1385                  * update worker thread.
1386                  * If we were marked on cpu_stat_off clear the flag
1387                  * so that vmstat_shepherd doesn't schedule us again.
1388                  */
1389                 if (!cpumask_test_and_clear_cpu(smp_processor_id(),
1390                                                 cpu_stat_off)) {
1391                         queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1392                                 this_cpu_ptr(&vmstat_work),
1393                                 round_jiffies_relative(sysctl_stat_interval));
1394                 }
1395         } else {
1396                 /*
1397                  * We did not update any counters so the app may be in
1398                  * a mode where it does not cause counter updates.
1399                  * We may be uselessly running vmstat_update.
1400                  * Defer the checking for differentials to the
1401                  * shepherd thread on a different processor.
1402                  */
1403                 cpumask_set_cpu(smp_processor_id(), cpu_stat_off);
1404         }
1405 }
1406
1407 /*
1408  * Switch off vmstat processing and then fold all the remaining differentials
1409  * until the diffs stay at zero. The function is used by NOHZ and can only be
1410  * invoked when tick processing is not active.
1411  */
1412 /*
1413  * Check if the diffs for a certain cpu indicate that
1414  * an update is needed.
1415  */
1416 static bool need_update(int cpu)
1417 {
1418         struct zone *zone;
1419
1420         for_each_populated_zone(zone) {
1421                 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1422
1423                 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1424                 /*
1425                  * The fast way of checking if there are any vmstat diffs.
1426                  * This works because the diffs are byte sized items.
1427                  */
1428                 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1429                         return true;
1430
1431         }
1432         return false;
1433 }
1434
1435 void quiet_vmstat(void)
1436 {
1437         if (system_state != SYSTEM_RUNNING)
1438                 return;
1439
1440         /*
1441          * If we are already in hands of the shepherd then there
1442          * is nothing for us to do here.
1443          */
1444         if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
1445                 return;
1446
1447         if (!need_update(smp_processor_id()))
1448                 return;
1449
1450         /*
1451          * Just refresh counters and do not care about the pending delayed
1452          * vmstat_update. It doesn't fire that often to matter and canceling
1453          * it would be too expensive from this path.
1454          * vmstat_shepherd will take care about that for us.
1455          */
1456         refresh_cpu_vm_stats(false);
1457 }
1458
1459
1460 /*
1461  * Shepherd worker thread that checks the
1462  * differentials of processors that have their worker
1463  * threads for vm statistics updates disabled because of
1464  * inactivity.
1465  */
1466 static void vmstat_shepherd(struct work_struct *w);
1467
1468 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1469
1470 static void vmstat_shepherd(struct work_struct *w)
1471 {
1472         int cpu;
1473
1474         get_online_cpus();
1475         /* Check processors whose vmstat worker threads have been disabled */
1476         for_each_cpu(cpu, cpu_stat_off) {
1477                 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1478
1479                 if (need_update(cpu)) {
1480                         if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1481                                 queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
1482                 } else {
1483                         /*
1484                          * Cancel the work if quiet_vmstat has put this
1485                          * cpu on cpu_stat_off because the work item might
1486                          * be still scheduled
1487                          */
1488                         cancel_delayed_work(dw);
1489                 }
1490         }
1491         put_online_cpus();
1492
1493         schedule_delayed_work(&shepherd,
1494                 round_jiffies_relative(sysctl_stat_interval));
1495 }
1496
1497 static void __init start_shepherd_timer(void)
1498 {
1499         int cpu;
1500
1501         for_each_possible_cpu(cpu)
1502                 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1503                         vmstat_update);
1504
1505         if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1506                 BUG();
1507         cpumask_copy(cpu_stat_off, cpu_online_mask);
1508
1509         vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1510         schedule_delayed_work(&shepherd,
1511                 round_jiffies_relative(sysctl_stat_interval));
1512 }
1513
1514 static void vmstat_cpu_dead(int node)
1515 {
1516         int cpu;
1517
1518         get_online_cpus();
1519         for_each_online_cpu(cpu)
1520                 if (cpu_to_node(cpu) == node)
1521                         goto end;
1522
1523         node_clear_state(node, N_CPU);
1524 end:
1525         put_online_cpus();
1526 }
1527
1528 /*
1529  * Use the cpu notifier to insure that the thresholds are recalculated
1530  * when necessary.
1531  */
1532 static int vmstat_cpuup_callback(struct notifier_block *nfb,
1533                 unsigned long action,
1534                 void *hcpu)
1535 {
1536         long cpu = (long)hcpu;
1537
1538         switch (action) {
1539         case CPU_ONLINE:
1540         case CPU_ONLINE_FROZEN:
1541                 refresh_zone_stat_thresholds();
1542                 node_set_state(cpu_to_node(cpu), N_CPU);
1543                 cpumask_set_cpu(cpu, cpu_stat_off);
1544                 break;
1545         case CPU_DOWN_PREPARE:
1546         case CPU_DOWN_PREPARE_FROZEN:
1547                 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1548                 cpumask_clear_cpu(cpu, cpu_stat_off);
1549                 break;
1550         case CPU_DOWN_FAILED:
1551         case CPU_DOWN_FAILED_FROZEN:
1552                 cpumask_set_cpu(cpu, cpu_stat_off);
1553                 break;
1554         case CPU_DEAD:
1555         case CPU_DEAD_FROZEN:
1556                 refresh_zone_stat_thresholds();
1557                 vmstat_cpu_dead(cpu_to_node(cpu));
1558                 break;
1559         default:
1560                 break;
1561         }
1562         return NOTIFY_OK;
1563 }
1564
1565 static struct notifier_block vmstat_notifier =
1566         { &vmstat_cpuup_callback, NULL, 0 };
1567 #endif
1568
1569 static int __init setup_vmstat(void)
1570 {
1571 #ifdef CONFIG_SMP
1572         cpu_notifier_register_begin();
1573         __register_cpu_notifier(&vmstat_notifier);
1574
1575         start_shepherd_timer();
1576         cpu_notifier_register_done();
1577 #endif
1578 #ifdef CONFIG_PROC_FS
1579         proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1580         proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1581         proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1582         proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1583 #endif
1584         return 0;
1585 }
1586 module_init(setup_vmstat)
1587
1588 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1589
1590 /*
1591  * Return an index indicating how much of the available free memory is
1592  * unusable for an allocation of the requested size.
1593  */
1594 static int unusable_free_index(unsigned int order,
1595                                 struct contig_page_info *info)
1596 {
1597         /* No free memory is interpreted as all free memory is unusable */
1598         if (info->free_pages == 0)
1599                 return 1000;
1600
1601         /*
1602          * Index should be a value between 0 and 1. Return a value to 3
1603          * decimal places.
1604          *
1605          * 0 => no fragmentation
1606          * 1 => high fragmentation
1607          */
1608         return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1609
1610 }
1611
1612 static void unusable_show_print(struct seq_file *m,
1613                                         pg_data_t *pgdat, struct zone *zone)
1614 {
1615         unsigned int order;
1616         int index;
1617         struct contig_page_info info;
1618
1619         seq_printf(m, "Node %d, zone %8s ",
1620                                 pgdat->node_id,
1621                                 zone->name);
1622         for (order = 0; order < MAX_ORDER; ++order) {
1623                 fill_contig_page_info(zone, order, &info);
1624                 index = unusable_free_index(order, &info);
1625                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1626         }
1627
1628         seq_putc(m, '\n');
1629 }
1630
1631 /*
1632  * Display unusable free space index
1633  *
1634  * The unusable free space index measures how much of the available free
1635  * memory cannot be used to satisfy an allocation of a given size and is a
1636  * value between 0 and 1. The higher the value, the more of free memory is
1637  * unusable and by implication, the worse the external fragmentation is. This
1638  * can be expressed as a percentage by multiplying by 100.
1639  */
1640 static int unusable_show(struct seq_file *m, void *arg)
1641 {
1642         pg_data_t *pgdat = (pg_data_t *)arg;
1643
1644         /* check memoryless node */
1645         if (!node_state(pgdat->node_id, N_MEMORY))
1646                 return 0;
1647
1648         walk_zones_in_node(m, pgdat, unusable_show_print);
1649
1650         return 0;
1651 }
1652
1653 static const struct seq_operations unusable_op = {
1654         .start  = frag_start,
1655         .next   = frag_next,
1656         .stop   = frag_stop,
1657         .show   = unusable_show,
1658 };
1659
1660 static int unusable_open(struct inode *inode, struct file *file)
1661 {
1662         return seq_open(file, &unusable_op);
1663 }
1664
1665 static const struct file_operations unusable_file_ops = {
1666         .open           = unusable_open,
1667         .read           = seq_read,
1668         .llseek         = seq_lseek,
1669         .release        = seq_release,
1670 };
1671
1672 static void extfrag_show_print(struct seq_file *m,
1673                                         pg_data_t *pgdat, struct zone *zone)
1674 {
1675         unsigned int order;
1676         int index;
1677
1678         /* Alloc on stack as interrupts are disabled for zone walk */
1679         struct contig_page_info info;
1680
1681         seq_printf(m, "Node %d, zone %8s ",
1682                                 pgdat->node_id,
1683                                 zone->name);
1684         for (order = 0; order < MAX_ORDER; ++order) {
1685                 fill_contig_page_info(zone, order, &info);
1686                 index = __fragmentation_index(order, &info);
1687                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1688         }
1689
1690         seq_putc(m, '\n');
1691 }
1692
1693 /*
1694  * Display fragmentation index for orders that allocations would fail for
1695  */
1696 static int extfrag_show(struct seq_file *m, void *arg)
1697 {
1698         pg_data_t *pgdat = (pg_data_t *)arg;
1699
1700         walk_zones_in_node(m, pgdat, extfrag_show_print);
1701
1702         return 0;
1703 }
1704
1705 static const struct seq_operations extfrag_op = {
1706         .start  = frag_start,
1707         .next   = frag_next,
1708         .stop   = frag_stop,
1709         .show   = extfrag_show,
1710 };
1711
1712 static int extfrag_open(struct inode *inode, struct file *file)
1713 {
1714         return seq_open(file, &extfrag_op);
1715 }
1716
1717 static const struct file_operations extfrag_file_ops = {
1718         .open           = extfrag_open,
1719         .read           = seq_read,
1720         .llseek         = seq_lseek,
1721         .release        = seq_release,
1722 };
1723
1724 static int __init extfrag_debug_init(void)
1725 {
1726         struct dentry *extfrag_debug_root;
1727
1728         extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1729         if (!extfrag_debug_root)
1730                 return -ENOMEM;
1731
1732         if (!debugfs_create_file("unusable_index", 0444,
1733                         extfrag_debug_root, NULL, &unusable_file_ops))
1734                 goto fail;
1735
1736         if (!debugfs_create_file("extfrag_index", 0444,
1737                         extfrag_debug_root, NULL, &extfrag_file_ops))
1738                 goto fail;
1739
1740         return 0;
1741 fail:
1742         debugfs_remove_recursive(extfrag_debug_root);
1743         return -ENOMEM;
1744 }
1745
1746 module_init(extfrag_debug_init);
1747 #endif