Documentation: add new page_owner document
[linux-block.git] / mm / vmstat.c
CommitLineData
f6ac2354
CL
1/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
2244b95a
CL
6 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
7cc36bbd 10 * Copyright (C) 2008-2014 Christoph Lameter
f6ac2354 11 */
8f32f7e5 12#include <linux/fs.h>
f6ac2354 13#include <linux/mm.h>
4e950f6f 14#include <linux/err.h>
2244b95a 15#include <linux/module.h>
5a0e3ad6 16#include <linux/slab.h>
df9ecaba 17#include <linux/cpu.h>
7cc36bbd 18#include <linux/cpumask.h>
c748e134 19#include <linux/vmstat.h>
e8edc6e0 20#include <linux/sched.h>
f1a5ab12 21#include <linux/math64.h>
79da826a 22#include <linux/writeback.h>
36deb0be 23#include <linux/compaction.h>
6e543d57 24#include <linux/mm_inline.h>
48c96a36
JK
25#include <linux/page_ext.h>
26#include <linux/page_owner.h>
6e543d57
LD
27
28#include "internal.h"
f6ac2354 29
f8891e5e
CL
30#ifdef CONFIG_VM_EVENT_COUNTERS
31DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
32EXPORT_PER_CPU_SYMBOL(vm_event_states);
33
31f961a8 34static void sum_vm_events(unsigned long *ret)
f8891e5e 35{
9eccf2a8 36 int cpu;
f8891e5e
CL
37 int i;
38
39 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
40
31f961a8 41 for_each_online_cpu(cpu) {
f8891e5e
CL
42 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
43
f8891e5e
CL
44 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
45 ret[i] += this->event[i];
46 }
47}
48
49/*
50 * Accumulate the vm event counters across all CPUs.
51 * The result is unavoidably approximate - it can change
52 * during and after execution of this function.
53*/
54void all_vm_events(unsigned long *ret)
55{
b5be1132 56 get_online_cpus();
31f961a8 57 sum_vm_events(ret);
b5be1132 58 put_online_cpus();
f8891e5e 59}
32dd66fc 60EXPORT_SYMBOL_GPL(all_vm_events);
f8891e5e 61
f8891e5e
CL
62/*
63 * Fold the foreign cpu events into our own.
64 *
65 * This is adding to the events on one processor
66 * but keeps the global counts constant.
67 */
68void vm_events_fold_cpu(int cpu)
69{
70 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
71 int i;
72
73 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
74 count_vm_events(i, fold_state->event[i]);
75 fold_state->event[i] = 0;
76 }
77}
f8891e5e
CL
78
79#endif /* CONFIG_VM_EVENT_COUNTERS */
80
2244b95a
CL
81/*
82 * Manage combined zone based / global counters
83 *
84 * vm_stat contains the global counters
85 */
a1cb2c60 86atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
2244b95a
CL
87EXPORT_SYMBOL(vm_stat);
88
89#ifdef CONFIG_SMP
90
b44129b3 91int calculate_pressure_threshold(struct zone *zone)
88f5acf8
MG
92{
93 int threshold;
94 int watermark_distance;
95
96 /*
97 * As vmstats are not up to date, there is drift between the estimated
98 * and real values. For high thresholds and a high number of CPUs, it
99 * is possible for the min watermark to be breached while the estimated
100 * value looks fine. The pressure threshold is a reduced value such
101 * that even the maximum amount of drift will not accidentally breach
102 * the min watermark
103 */
104 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
105 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
106
107 /*
108 * Maximum threshold is 125
109 */
110 threshold = min(125, threshold);
111
112 return threshold;
113}
114
b44129b3 115int calculate_normal_threshold(struct zone *zone)
df9ecaba
CL
116{
117 int threshold;
118 int mem; /* memory in 128 MB units */
119
120 /*
121 * The threshold scales with the number of processors and the amount
122 * of memory per zone. More memory means that we can defer updates for
123 * longer, more processors could lead to more contention.
124 * fls() is used to have a cheap way of logarithmic scaling.
125 *
126 * Some sample thresholds:
127 *
128 * Threshold Processors (fls) Zonesize fls(mem+1)
129 * ------------------------------------------------------------------
130 * 8 1 1 0.9-1 GB 4
131 * 16 2 2 0.9-1 GB 4
132 * 20 2 2 1-2 GB 5
133 * 24 2 2 2-4 GB 6
134 * 28 2 2 4-8 GB 7
135 * 32 2 2 8-16 GB 8
136 * 4 2 2 <128M 1
137 * 30 4 3 2-4 GB 5
138 * 48 4 3 8-16 GB 8
139 * 32 8 4 1-2 GB 4
140 * 32 8 4 0.9-1GB 4
141 * 10 16 5 <128M 1
142 * 40 16 5 900M 4
143 * 70 64 7 2-4 GB 5
144 * 84 64 7 4-8 GB 6
145 * 108 512 9 4-8 GB 6
146 * 125 1024 10 8-16 GB 8
147 * 125 1024 10 16-32 GB 9
148 */
149
b40da049 150 mem = zone->managed_pages >> (27 - PAGE_SHIFT);
df9ecaba
CL
151
152 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
153
154 /*
155 * Maximum threshold is 125
156 */
157 threshold = min(125, threshold);
158
159 return threshold;
160}
2244b95a
CL
161
162/*
df9ecaba 163 * Refresh the thresholds for each zone.
2244b95a 164 */
a6cccdc3 165void refresh_zone_stat_thresholds(void)
2244b95a 166{
df9ecaba
CL
167 struct zone *zone;
168 int cpu;
169 int threshold;
170
ee99c71c 171 for_each_populated_zone(zone) {
aa454840
CL
172 unsigned long max_drift, tolerate_drift;
173
b44129b3 174 threshold = calculate_normal_threshold(zone);
df9ecaba
CL
175
176 for_each_online_cpu(cpu)
99dcc3e5
CL
177 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
178 = threshold;
aa454840
CL
179
180 /*
181 * Only set percpu_drift_mark if there is a danger that
182 * NR_FREE_PAGES reports the low watermark is ok when in fact
183 * the min watermark could be breached by an allocation
184 */
185 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
186 max_drift = num_online_cpus() * threshold;
187 if (max_drift > tolerate_drift)
188 zone->percpu_drift_mark = high_wmark_pages(zone) +
189 max_drift;
df9ecaba 190 }
2244b95a
CL
191}
192
b44129b3
MG
193void set_pgdat_percpu_threshold(pg_data_t *pgdat,
194 int (*calculate_pressure)(struct zone *))
88f5acf8
MG
195{
196 struct zone *zone;
197 int cpu;
198 int threshold;
199 int i;
200
88f5acf8
MG
201 for (i = 0; i < pgdat->nr_zones; i++) {
202 zone = &pgdat->node_zones[i];
203 if (!zone->percpu_drift_mark)
204 continue;
205
b44129b3 206 threshold = (*calculate_pressure)(zone);
bb0b6dff 207 for_each_online_cpu(cpu)
88f5acf8
MG
208 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
209 = threshold;
210 }
88f5acf8
MG
211}
212
2244b95a 213/*
bea04b07
JZ
214 * For use when we know that interrupts are disabled,
215 * or when we know that preemption is disabled and that
216 * particular counter cannot be updated from interrupt context.
2244b95a
CL
217 */
218void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
219 int delta)
220{
12938a92
CL
221 struct per_cpu_pageset __percpu *pcp = zone->pageset;
222 s8 __percpu *p = pcp->vm_stat_diff + item;
2244b95a 223 long x;
12938a92
CL
224 long t;
225
226 x = delta + __this_cpu_read(*p);
2244b95a 227
12938a92 228 t = __this_cpu_read(pcp->stat_threshold);
2244b95a 229
12938a92 230 if (unlikely(x > t || x < -t)) {
2244b95a
CL
231 zone_page_state_add(x, zone, item);
232 x = 0;
233 }
12938a92 234 __this_cpu_write(*p, x);
2244b95a
CL
235}
236EXPORT_SYMBOL(__mod_zone_page_state);
237
2244b95a
CL
238/*
239 * Optimized increment and decrement functions.
240 *
241 * These are only for a single page and therefore can take a struct page *
242 * argument instead of struct zone *. This allows the inclusion of the code
243 * generated for page_zone(page) into the optimized functions.
244 *
245 * No overflow check is necessary and therefore the differential can be
246 * incremented or decremented in place which may allow the compilers to
247 * generate better code.
2244b95a
CL
248 * The increment or decrement is known and therefore one boundary check can
249 * be omitted.
250 *
df9ecaba
CL
251 * NOTE: These functions are very performance sensitive. Change only
252 * with care.
253 *
2244b95a
CL
254 * Some processors have inc/dec instructions that are atomic vs an interrupt.
255 * However, the code must first determine the differential location in a zone
256 * based on the processor number and then inc/dec the counter. There is no
257 * guarantee without disabling preemption that the processor will not change
258 * in between and therefore the atomicity vs. interrupt cannot be exploited
259 * in a useful way here.
260 */
c8785385 261void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a 262{
12938a92
CL
263 struct per_cpu_pageset __percpu *pcp = zone->pageset;
264 s8 __percpu *p = pcp->vm_stat_diff + item;
265 s8 v, t;
2244b95a 266
908ee0f1 267 v = __this_cpu_inc_return(*p);
12938a92
CL
268 t = __this_cpu_read(pcp->stat_threshold);
269 if (unlikely(v > t)) {
270 s8 overstep = t >> 1;
df9ecaba 271
12938a92
CL
272 zone_page_state_add(v + overstep, zone, item);
273 __this_cpu_write(*p, -overstep);
2244b95a
CL
274 }
275}
ca889e6c
CL
276
277void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
278{
279 __inc_zone_state(page_zone(page), item);
280}
2244b95a
CL
281EXPORT_SYMBOL(__inc_zone_page_state);
282
c8785385 283void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a 284{
12938a92
CL
285 struct per_cpu_pageset __percpu *pcp = zone->pageset;
286 s8 __percpu *p = pcp->vm_stat_diff + item;
287 s8 v, t;
2244b95a 288
908ee0f1 289 v = __this_cpu_dec_return(*p);
12938a92
CL
290 t = __this_cpu_read(pcp->stat_threshold);
291 if (unlikely(v < - t)) {
292 s8 overstep = t >> 1;
2244b95a 293
12938a92
CL
294 zone_page_state_add(v - overstep, zone, item);
295 __this_cpu_write(*p, overstep);
2244b95a
CL
296 }
297}
c8785385
CL
298
299void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
300{
301 __dec_zone_state(page_zone(page), item);
302}
2244b95a
CL
303EXPORT_SYMBOL(__dec_zone_page_state);
304
4156153c 305#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
7c839120
CL
306/*
307 * If we have cmpxchg_local support then we do not need to incur the overhead
308 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
309 *
310 * mod_state() modifies the zone counter state through atomic per cpu
311 * operations.
312 *
313 * Overstep mode specifies how overstep should handled:
314 * 0 No overstepping
315 * 1 Overstepping half of threshold
316 * -1 Overstepping minus half of threshold
317*/
318static inline void mod_state(struct zone *zone,
319 enum zone_stat_item item, int delta, int overstep_mode)
320{
321 struct per_cpu_pageset __percpu *pcp = zone->pageset;
322 s8 __percpu *p = pcp->vm_stat_diff + item;
323 long o, n, t, z;
324
325 do {
326 z = 0; /* overflow to zone counters */
327
328 /*
329 * The fetching of the stat_threshold is racy. We may apply
330 * a counter threshold to the wrong the cpu if we get
d3bc2367
CL
331 * rescheduled while executing here. However, the next
332 * counter update will apply the threshold again and
333 * therefore bring the counter under the threshold again.
334 *
335 * Most of the time the thresholds are the same anyways
336 * for all cpus in a zone.
7c839120
CL
337 */
338 t = this_cpu_read(pcp->stat_threshold);
339
340 o = this_cpu_read(*p);
341 n = delta + o;
342
343 if (n > t || n < -t) {
344 int os = overstep_mode * (t >> 1) ;
345
346 /* Overflow must be added to zone counters */
347 z = n + os;
348 n = -os;
349 }
350 } while (this_cpu_cmpxchg(*p, o, n) != o);
351
352 if (z)
353 zone_page_state_add(z, zone, item);
354}
355
356void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
357 int delta)
358{
359 mod_state(zone, item, delta, 0);
360}
361EXPORT_SYMBOL(mod_zone_page_state);
362
363void inc_zone_state(struct zone *zone, enum zone_stat_item item)
364{
365 mod_state(zone, item, 1, 1);
366}
367
368void inc_zone_page_state(struct page *page, enum zone_stat_item item)
369{
370 mod_state(page_zone(page), item, 1, 1);
371}
372EXPORT_SYMBOL(inc_zone_page_state);
373
374void dec_zone_page_state(struct page *page, enum zone_stat_item item)
375{
376 mod_state(page_zone(page), item, -1, -1);
377}
378EXPORT_SYMBOL(dec_zone_page_state);
379#else
380/*
381 * Use interrupt disable to serialize counter updates
382 */
383void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
384 int delta)
385{
386 unsigned long flags;
387
388 local_irq_save(flags);
389 __mod_zone_page_state(zone, item, delta);
390 local_irq_restore(flags);
391}
392EXPORT_SYMBOL(mod_zone_page_state);
393
ca889e6c
CL
394void inc_zone_state(struct zone *zone, enum zone_stat_item item)
395{
396 unsigned long flags;
397
398 local_irq_save(flags);
399 __inc_zone_state(zone, item);
400 local_irq_restore(flags);
401}
402
2244b95a
CL
403void inc_zone_page_state(struct page *page, enum zone_stat_item item)
404{
405 unsigned long flags;
406 struct zone *zone;
2244b95a
CL
407
408 zone = page_zone(page);
409 local_irq_save(flags);
ca889e6c 410 __inc_zone_state(zone, item);
2244b95a
CL
411 local_irq_restore(flags);
412}
413EXPORT_SYMBOL(inc_zone_page_state);
414
415void dec_zone_page_state(struct page *page, enum zone_stat_item item)
416{
417 unsigned long flags;
2244b95a 418
2244b95a 419 local_irq_save(flags);
a302eb4e 420 __dec_zone_page_state(page, item);
2244b95a
CL
421 local_irq_restore(flags);
422}
423EXPORT_SYMBOL(dec_zone_page_state);
7c839120 424#endif
2244b95a 425
7cc36bbd
CL
426
427/*
428 * Fold a differential into the global counters.
429 * Returns the number of counters updated.
430 */
431static int fold_diff(int *diff)
4edb0748
CL
432{
433 int i;
7cc36bbd 434 int changes = 0;
4edb0748
CL
435
436 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
7cc36bbd 437 if (diff[i]) {
4edb0748 438 atomic_long_add(diff[i], &vm_stat[i]);
7cc36bbd
CL
439 changes++;
440 }
441 return changes;
4edb0748
CL
442}
443
2244b95a 444/*
2bb921e5 445 * Update the zone counters for the current cpu.
a7f75e25 446 *
4037d452
CL
447 * Note that refresh_cpu_vm_stats strives to only access
448 * node local memory. The per cpu pagesets on remote zones are placed
449 * in the memory local to the processor using that pageset. So the
450 * loop over all zones will access a series of cachelines local to
451 * the processor.
452 *
453 * The call to zone_page_state_add updates the cachelines with the
454 * statistics in the remote zone struct as well as the global cachelines
455 * with the global counters. These could cause remote node cache line
456 * bouncing and will have to be only done when necessary.
7cc36bbd
CL
457 *
458 * The function returns the number of global counters updated.
2244b95a 459 */
7cc36bbd 460static int refresh_cpu_vm_stats(void)
2244b95a
CL
461{
462 struct zone *zone;
463 int i;
a7f75e25 464 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
7cc36bbd 465 int changes = 0;
2244b95a 466
ee99c71c 467 for_each_populated_zone(zone) {
fbc2edb0 468 struct per_cpu_pageset __percpu *p = zone->pageset;
2244b95a 469
fbc2edb0
CL
470 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
471 int v;
2244b95a 472
fbc2edb0
CL
473 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
474 if (v) {
a7f75e25 475
a7f75e25
CL
476 atomic_long_add(v, &zone->vm_stat[i]);
477 global_diff[i] += v;
4037d452
CL
478#ifdef CONFIG_NUMA
479 /* 3 seconds idle till flush */
fbc2edb0 480 __this_cpu_write(p->expire, 3);
4037d452 481#endif
2244b95a 482 }
fbc2edb0 483 }
468fd62e 484 cond_resched();
4037d452
CL
485#ifdef CONFIG_NUMA
486 /*
487 * Deal with draining the remote pageset of this
488 * processor
489 *
490 * Check if there are pages remaining in this pageset
491 * if not then there is nothing to expire.
492 */
fbc2edb0
CL
493 if (!__this_cpu_read(p->expire) ||
494 !__this_cpu_read(p->pcp.count))
4037d452
CL
495 continue;
496
497 /*
498 * We never drain zones local to this processor.
499 */
500 if (zone_to_nid(zone) == numa_node_id()) {
fbc2edb0 501 __this_cpu_write(p->expire, 0);
4037d452
CL
502 continue;
503 }
504
fbc2edb0 505 if (__this_cpu_dec_return(p->expire))
4037d452
CL
506 continue;
507
7cc36bbd 508 if (__this_cpu_read(p->pcp.count)) {
7c8e0181 509 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
7cc36bbd
CL
510 changes++;
511 }
4037d452 512#endif
2244b95a 513 }
7cc36bbd
CL
514 changes += fold_diff(global_diff);
515 return changes;
2244b95a
CL
516}
517
2bb921e5
CL
518/*
519 * Fold the data for an offline cpu into the global array.
520 * There cannot be any access by the offline cpu and therefore
521 * synchronization is simplified.
522 */
523void cpu_vm_stats_fold(int cpu)
524{
525 struct zone *zone;
526 int i;
527 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
528
529 for_each_populated_zone(zone) {
530 struct per_cpu_pageset *p;
531
532 p = per_cpu_ptr(zone->pageset, cpu);
533
534 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
535 if (p->vm_stat_diff[i]) {
536 int v;
537
538 v = p->vm_stat_diff[i];
539 p->vm_stat_diff[i] = 0;
540 atomic_long_add(v, &zone->vm_stat[i]);
541 global_diff[i] += v;
542 }
543 }
544
4edb0748 545 fold_diff(global_diff);
2bb921e5
CL
546}
547
40f4b1ea
CS
548/*
549 * this is only called if !populated_zone(zone), which implies no other users of
550 * pset->vm_stat_diff[] exsist.
551 */
5a883813
MK
552void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
553{
554 int i;
555
556 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
557 if (pset->vm_stat_diff[i]) {
558 int v = pset->vm_stat_diff[i];
559 pset->vm_stat_diff[i] = 0;
560 atomic_long_add(v, &zone->vm_stat[i]);
561 atomic_long_add(v, &vm_stat[i]);
562 }
563}
2244b95a
CL
564#endif
565
ca889e6c
CL
566#ifdef CONFIG_NUMA
567/*
568 * zonelist = the list of zones passed to the allocator
569 * z = the zone from which the allocation occurred.
570 *
571 * Must be called with interrupts disabled.
78afd561
AK
572 *
573 * When __GFP_OTHER_NODE is set assume the node of the preferred
574 * zone is the local node. This is useful for daemons who allocate
575 * memory on behalf of other processes.
ca889e6c 576 */
78afd561 577void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
ca889e6c 578{
18ea7e71 579 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
ca889e6c
CL
580 __inc_zone_state(z, NUMA_HIT);
581 } else {
582 __inc_zone_state(z, NUMA_MISS);
18ea7e71 583 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
ca889e6c 584 }
78afd561
AK
585 if (z->node == ((flags & __GFP_OTHER_NODE) ?
586 preferred_zone->node : numa_node_id()))
ca889e6c
CL
587 __inc_zone_state(z, NUMA_LOCAL);
588 else
589 __inc_zone_state(z, NUMA_OTHER);
590}
591#endif
592
d7a5752c 593#ifdef CONFIG_COMPACTION
36deb0be 594
d7a5752c
MG
595struct contig_page_info {
596 unsigned long free_pages;
597 unsigned long free_blocks_total;
598 unsigned long free_blocks_suitable;
599};
600
601/*
602 * Calculate the number of free pages in a zone, how many contiguous
603 * pages are free and how many are large enough to satisfy an allocation of
604 * the target size. Note that this function makes no attempt to estimate
605 * how many suitable free blocks there *might* be if MOVABLE pages were
606 * migrated. Calculating that is possible, but expensive and can be
607 * figured out from userspace
608 */
609static void fill_contig_page_info(struct zone *zone,
610 unsigned int suitable_order,
611 struct contig_page_info *info)
612{
613 unsigned int order;
614
615 info->free_pages = 0;
616 info->free_blocks_total = 0;
617 info->free_blocks_suitable = 0;
618
619 for (order = 0; order < MAX_ORDER; order++) {
620 unsigned long blocks;
621
622 /* Count number of free blocks */
623 blocks = zone->free_area[order].nr_free;
624 info->free_blocks_total += blocks;
625
626 /* Count free base pages */
627 info->free_pages += blocks << order;
628
629 /* Count the suitable free blocks */
630 if (order >= suitable_order)
631 info->free_blocks_suitable += blocks <<
632 (order - suitable_order);
633 }
634}
f1a5ab12
MG
635
636/*
637 * A fragmentation index only makes sense if an allocation of a requested
638 * size would fail. If that is true, the fragmentation index indicates
639 * whether external fragmentation or a lack of memory was the problem.
640 * The value can be used to determine if page reclaim or compaction
641 * should be used
642 */
56de7263 643static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
f1a5ab12
MG
644{
645 unsigned long requested = 1UL << order;
646
647 if (!info->free_blocks_total)
648 return 0;
649
650 /* Fragmentation index only makes sense when a request would fail */
651 if (info->free_blocks_suitable)
652 return -1000;
653
654 /*
655 * Index is between 0 and 1 so return within 3 decimal places
656 *
657 * 0 => allocation would fail due to lack of memory
658 * 1 => allocation would fail due to fragmentation
659 */
660 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
661}
56de7263
MG
662
663/* Same as __fragmentation index but allocs contig_page_info on stack */
664int fragmentation_index(struct zone *zone, unsigned int order)
665{
666 struct contig_page_info info;
667
668 fill_contig_page_info(zone, order, &info);
669 return __fragmentation_index(order, &info);
670}
d7a5752c
MG
671#endif
672
673#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
8f32f7e5 674#include <linux/proc_fs.h>
f6ac2354
CL
675#include <linux/seq_file.h>
676
467c996c
MG
677static char * const migratetype_names[MIGRATE_TYPES] = {
678 "Unmovable",
679 "Reclaimable",
680 "Movable",
681 "Reserve",
47118af0
MN
682#ifdef CONFIG_CMA
683 "CMA",
684#endif
194159fb 685#ifdef CONFIG_MEMORY_ISOLATION
91446b06 686 "Isolate",
194159fb 687#endif
467c996c
MG
688};
689
f6ac2354
CL
690static void *frag_start(struct seq_file *m, loff_t *pos)
691{
692 pg_data_t *pgdat;
693 loff_t node = *pos;
694 for (pgdat = first_online_pgdat();
695 pgdat && node;
696 pgdat = next_online_pgdat(pgdat))
697 --node;
698
699 return pgdat;
700}
701
702static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
703{
704 pg_data_t *pgdat = (pg_data_t *)arg;
705
706 (*pos)++;
707 return next_online_pgdat(pgdat);
708}
709
710static void frag_stop(struct seq_file *m, void *arg)
711{
712}
713
467c996c
MG
714/* Walk all the zones in a node and print using a callback */
715static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
716 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
f6ac2354 717{
f6ac2354
CL
718 struct zone *zone;
719 struct zone *node_zones = pgdat->node_zones;
720 unsigned long flags;
f6ac2354
CL
721
722 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
723 if (!populated_zone(zone))
724 continue;
725
726 spin_lock_irqsave(&zone->lock, flags);
467c996c 727 print(m, pgdat, zone);
f6ac2354 728 spin_unlock_irqrestore(&zone->lock, flags);
467c996c
MG
729 }
730}
d7a5752c 731#endif
467c996c 732
0d6617c7 733#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
fa25c503
KM
734#ifdef CONFIG_ZONE_DMA
735#define TEXT_FOR_DMA(xx) xx "_dma",
736#else
737#define TEXT_FOR_DMA(xx)
738#endif
739
740#ifdef CONFIG_ZONE_DMA32
741#define TEXT_FOR_DMA32(xx) xx "_dma32",
742#else
743#define TEXT_FOR_DMA32(xx)
744#endif
745
746#ifdef CONFIG_HIGHMEM
747#define TEXT_FOR_HIGHMEM(xx) xx "_high",
748#else
749#define TEXT_FOR_HIGHMEM(xx)
750#endif
751
752#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
753 TEXT_FOR_HIGHMEM(xx) xx "_movable",
754
755const char * const vmstat_text[] = {
09316c09 756 /* enum zone_stat_item countes */
fa25c503 757 "nr_free_pages",
81c0a2bb 758 "nr_alloc_batch",
fa25c503
KM
759 "nr_inactive_anon",
760 "nr_active_anon",
761 "nr_inactive_file",
762 "nr_active_file",
763 "nr_unevictable",
764 "nr_mlock",
765 "nr_anon_pages",
766 "nr_mapped",
767 "nr_file_pages",
768 "nr_dirty",
769 "nr_writeback",
770 "nr_slab_reclaimable",
771 "nr_slab_unreclaimable",
772 "nr_page_table_pages",
773 "nr_kernel_stack",
774 "nr_unstable",
775 "nr_bounce",
776 "nr_vmscan_write",
49ea7eb6 777 "nr_vmscan_immediate_reclaim",
fa25c503
KM
778 "nr_writeback_temp",
779 "nr_isolated_anon",
780 "nr_isolated_file",
781 "nr_shmem",
782 "nr_dirtied",
783 "nr_written",
0d5d823a 784 "nr_pages_scanned",
fa25c503
KM
785
786#ifdef CONFIG_NUMA
787 "numa_hit",
788 "numa_miss",
789 "numa_foreign",
790 "numa_interleave",
791 "numa_local",
792 "numa_other",
793#endif
a528910e
JW
794 "workingset_refault",
795 "workingset_activate",
449dd698 796 "workingset_nodereclaim",
fa25c503 797 "nr_anon_transparent_hugepages",
d1ce749a 798 "nr_free_cma",
09316c09
KK
799
800 /* enum writeback_stat_item counters */
fa25c503
KM
801 "nr_dirty_threshold",
802 "nr_dirty_background_threshold",
803
804#ifdef CONFIG_VM_EVENT_COUNTERS
09316c09 805 /* enum vm_event_item counters */
fa25c503
KM
806 "pgpgin",
807 "pgpgout",
808 "pswpin",
809 "pswpout",
810
811 TEXTS_FOR_ZONES("pgalloc")
812
813 "pgfree",
814 "pgactivate",
815 "pgdeactivate",
816
817 "pgfault",
818 "pgmajfault",
819
820 TEXTS_FOR_ZONES("pgrefill")
904249aa
YH
821 TEXTS_FOR_ZONES("pgsteal_kswapd")
822 TEXTS_FOR_ZONES("pgsteal_direct")
fa25c503
KM
823 TEXTS_FOR_ZONES("pgscan_kswapd")
824 TEXTS_FOR_ZONES("pgscan_direct")
68243e76 825 "pgscan_direct_throttle",
fa25c503
KM
826
827#ifdef CONFIG_NUMA
828 "zone_reclaim_failed",
829#endif
830 "pginodesteal",
831 "slabs_scanned",
fa25c503
KM
832 "kswapd_inodesteal",
833 "kswapd_low_wmark_hit_quickly",
834 "kswapd_high_wmark_hit_quickly",
fa25c503
KM
835 "pageoutrun",
836 "allocstall",
837
838 "pgrotated",
839
5509a5d2
DH
840 "drop_pagecache",
841 "drop_slab",
842
03c5a6e1
MG
843#ifdef CONFIG_NUMA_BALANCING
844 "numa_pte_updates",
72403b4a 845 "numa_huge_pte_updates",
03c5a6e1
MG
846 "numa_hint_faults",
847 "numa_hint_faults_local",
848 "numa_pages_migrated",
849#endif
5647bc29
MG
850#ifdef CONFIG_MIGRATION
851 "pgmigrate_success",
852 "pgmigrate_fail",
853#endif
fa25c503 854#ifdef CONFIG_COMPACTION
397487db
MG
855 "compact_migrate_scanned",
856 "compact_free_scanned",
857 "compact_isolated",
fa25c503
KM
858 "compact_stall",
859 "compact_fail",
860 "compact_success",
861#endif
862
863#ifdef CONFIG_HUGETLB_PAGE
864 "htlb_buddy_alloc_success",
865 "htlb_buddy_alloc_fail",
866#endif
867 "unevictable_pgs_culled",
868 "unevictable_pgs_scanned",
869 "unevictable_pgs_rescued",
870 "unevictable_pgs_mlocked",
871 "unevictable_pgs_munlocked",
872 "unevictable_pgs_cleared",
873 "unevictable_pgs_stranded",
fa25c503
KM
874
875#ifdef CONFIG_TRANSPARENT_HUGEPAGE
876 "thp_fault_alloc",
877 "thp_fault_fallback",
878 "thp_collapse_alloc",
879 "thp_collapse_alloc_failed",
880 "thp_split",
d8a8e1f0
KS
881 "thp_zero_page_alloc",
882 "thp_zero_page_alloc_failed",
fa25c503 883#endif
09316c09
KK
884#ifdef CONFIG_MEMORY_BALLOON
885 "balloon_inflate",
886 "balloon_deflate",
887#ifdef CONFIG_BALLOON_COMPACTION
888 "balloon_migrate",
889#endif
890#endif /* CONFIG_MEMORY_BALLOON */
ec659934 891#ifdef CONFIG_DEBUG_TLBFLUSH
6df46865 892#ifdef CONFIG_SMP
9824cf97
DH
893 "nr_tlb_remote_flush",
894 "nr_tlb_remote_flush_received",
ec659934 895#endif /* CONFIG_SMP */
9824cf97
DH
896 "nr_tlb_local_flush_all",
897 "nr_tlb_local_flush_one",
ec659934 898#endif /* CONFIG_DEBUG_TLBFLUSH */
fa25c503 899
4f115147
DB
900#ifdef CONFIG_DEBUG_VM_VMACACHE
901 "vmacache_find_calls",
902 "vmacache_find_hits",
903#endif
fa25c503
KM
904#endif /* CONFIG_VM_EVENTS_COUNTERS */
905};
0d6617c7 906#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
fa25c503
KM
907
908
d7a5752c 909#ifdef CONFIG_PROC_FS
467c996c
MG
910static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
911 struct zone *zone)
912{
913 int order;
914
915 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
916 for (order = 0; order < MAX_ORDER; ++order)
917 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
918 seq_putc(m, '\n');
919}
920
921/*
922 * This walks the free areas for each zone.
923 */
924static int frag_show(struct seq_file *m, void *arg)
925{
926 pg_data_t *pgdat = (pg_data_t *)arg;
927 walk_zones_in_node(m, pgdat, frag_show_print);
928 return 0;
929}
930
931static void pagetypeinfo_showfree_print(struct seq_file *m,
932 pg_data_t *pgdat, struct zone *zone)
933{
934 int order, mtype;
935
936 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
937 seq_printf(m, "Node %4d, zone %8s, type %12s ",
938 pgdat->node_id,
939 zone->name,
940 migratetype_names[mtype]);
941 for (order = 0; order < MAX_ORDER; ++order) {
942 unsigned long freecount = 0;
943 struct free_area *area;
944 struct list_head *curr;
945
946 area = &(zone->free_area[order]);
947
948 list_for_each(curr, &area->free_list[mtype])
949 freecount++;
950 seq_printf(m, "%6lu ", freecount);
951 }
f6ac2354
CL
952 seq_putc(m, '\n');
953 }
467c996c
MG
954}
955
956/* Print out the free pages at each order for each migatetype */
957static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
958{
959 int order;
960 pg_data_t *pgdat = (pg_data_t *)arg;
961
962 /* Print header */
963 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
964 for (order = 0; order < MAX_ORDER; ++order)
965 seq_printf(m, "%6d ", order);
966 seq_putc(m, '\n');
967
968 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
969
970 return 0;
971}
972
973static void pagetypeinfo_showblockcount_print(struct seq_file *m,
974 pg_data_t *pgdat, struct zone *zone)
975{
976 int mtype;
977 unsigned long pfn;
978 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 979 unsigned long end_pfn = zone_end_pfn(zone);
467c996c
MG
980 unsigned long count[MIGRATE_TYPES] = { 0, };
981
982 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
983 struct page *page;
984
985 if (!pfn_valid(pfn))
986 continue;
987
988 page = pfn_to_page(pfn);
eb33575c
MG
989
990 /* Watch for unexpected holes punched in the memmap */
991 if (!memmap_valid_within(pfn, page, zone))
e80d6a24 992 continue;
eb33575c 993
467c996c
MG
994 mtype = get_pageblock_migratetype(page);
995
e80d6a24
MG
996 if (mtype < MIGRATE_TYPES)
997 count[mtype]++;
467c996c
MG
998 }
999
1000 /* Print counts */
1001 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1002 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1003 seq_printf(m, "%12lu ", count[mtype]);
1004 seq_putc(m, '\n');
1005}
1006
1007/* Print out the free pages at each order for each migratetype */
1008static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1009{
1010 int mtype;
1011 pg_data_t *pgdat = (pg_data_t *)arg;
1012
1013 seq_printf(m, "\n%-23s", "Number of blocks type ");
1014 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1015 seq_printf(m, "%12s ", migratetype_names[mtype]);
1016 seq_putc(m, '\n');
1017 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1018
1019 return 0;
1020}
1021
48c96a36
JK
1022#ifdef CONFIG_PAGE_OWNER
1023static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1024 pg_data_t *pgdat,
1025 struct zone *zone)
1026{
1027 struct page *page;
1028 struct page_ext *page_ext;
1029 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1030 unsigned long end_pfn = pfn + zone->spanned_pages;
1031 unsigned long count[MIGRATE_TYPES] = { 0, };
1032 int pageblock_mt, page_mt;
1033 int i;
1034
1035 /* Scan block by block. First and last block may be incomplete */
1036 pfn = zone->zone_start_pfn;
1037
1038 /*
1039 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1040 * a zone boundary, it will be double counted between zones. This does
1041 * not matter as the mixed block count will still be correct
1042 */
1043 for (; pfn < end_pfn; ) {
1044 if (!pfn_valid(pfn)) {
1045 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1046 continue;
1047 }
1048
1049 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1050 block_end_pfn = min(block_end_pfn, end_pfn);
1051
1052 page = pfn_to_page(pfn);
1053 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1054
1055 for (; pfn < block_end_pfn; pfn++) {
1056 if (!pfn_valid_within(pfn))
1057 continue;
1058
1059 page = pfn_to_page(pfn);
1060 if (PageBuddy(page)) {
1061 pfn += (1UL << page_order(page)) - 1;
1062 continue;
1063 }
1064
1065 if (PageReserved(page))
1066 continue;
1067
1068 page_ext = lookup_page_ext(page);
1069
1070 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1071 continue;
1072
1073 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1074 if (pageblock_mt != page_mt) {
1075 if (is_migrate_cma(pageblock_mt))
1076 count[MIGRATE_MOVABLE]++;
1077 else
1078 count[pageblock_mt]++;
1079
1080 pfn = block_end_pfn;
1081 break;
1082 }
1083 pfn += (1UL << page_ext->order) - 1;
1084 }
1085 }
1086
1087 /* Print counts */
1088 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1089 for (i = 0; i < MIGRATE_TYPES; i++)
1090 seq_printf(m, "%12lu ", count[i]);
1091 seq_putc(m, '\n');
1092}
1093#endif /* CONFIG_PAGE_OWNER */
1094
1095/*
1096 * Print out the number of pageblocks for each migratetype that contain pages
1097 * of other types. This gives an indication of how well fallbacks are being
1098 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1099 * to determine what is going on
1100 */
1101static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1102{
1103#ifdef CONFIG_PAGE_OWNER
1104 int mtype;
1105
1106 if (!page_owner_inited)
1107 return;
1108
1109 drain_all_pages(NULL);
1110
1111 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1112 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1113 seq_printf(m, "%12s ", migratetype_names[mtype]);
1114 seq_putc(m, '\n');
1115
1116 walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1117#endif /* CONFIG_PAGE_OWNER */
1118}
1119
467c996c
MG
1120/*
1121 * This prints out statistics in relation to grouping pages by mobility.
1122 * It is expensive to collect so do not constantly read the file.
1123 */
1124static int pagetypeinfo_show(struct seq_file *m, void *arg)
1125{
1126 pg_data_t *pgdat = (pg_data_t *)arg;
1127
41b25a37 1128 /* check memoryless node */
a47b53c5 1129 if (!node_state(pgdat->node_id, N_MEMORY))
41b25a37
KM
1130 return 0;
1131
467c996c
MG
1132 seq_printf(m, "Page block order: %d\n", pageblock_order);
1133 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1134 seq_putc(m, '\n');
1135 pagetypeinfo_showfree(m, pgdat);
1136 pagetypeinfo_showblockcount(m, pgdat);
48c96a36 1137 pagetypeinfo_showmixedcount(m, pgdat);
467c996c 1138
f6ac2354
CL
1139 return 0;
1140}
1141
8f32f7e5 1142static const struct seq_operations fragmentation_op = {
f6ac2354
CL
1143 .start = frag_start,
1144 .next = frag_next,
1145 .stop = frag_stop,
1146 .show = frag_show,
1147};
1148
8f32f7e5
AD
1149static int fragmentation_open(struct inode *inode, struct file *file)
1150{
1151 return seq_open(file, &fragmentation_op);
1152}
1153
1154static const struct file_operations fragmentation_file_operations = {
1155 .open = fragmentation_open,
1156 .read = seq_read,
1157 .llseek = seq_lseek,
1158 .release = seq_release,
1159};
1160
74e2e8e8 1161static const struct seq_operations pagetypeinfo_op = {
467c996c
MG
1162 .start = frag_start,
1163 .next = frag_next,
1164 .stop = frag_stop,
1165 .show = pagetypeinfo_show,
1166};
1167
74e2e8e8
AD
1168static int pagetypeinfo_open(struct inode *inode, struct file *file)
1169{
1170 return seq_open(file, &pagetypeinfo_op);
1171}
1172
1173static const struct file_operations pagetypeinfo_file_ops = {
1174 .open = pagetypeinfo_open,
1175 .read = seq_read,
1176 .llseek = seq_lseek,
1177 .release = seq_release,
1178};
1179
467c996c
MG
1180static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1181 struct zone *zone)
f6ac2354 1182{
467c996c
MG
1183 int i;
1184 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1185 seq_printf(m,
1186 "\n pages free %lu"
1187 "\n min %lu"
1188 "\n low %lu"
1189 "\n high %lu"
08d9ae7c 1190 "\n scanned %lu"
467c996c 1191 "\n spanned %lu"
9feedc9d
JL
1192 "\n present %lu"
1193 "\n managed %lu",
88f5acf8 1194 zone_page_state(zone, NR_FREE_PAGES),
41858966
MG
1195 min_wmark_pages(zone),
1196 low_wmark_pages(zone),
1197 high_wmark_pages(zone),
0d5d823a 1198 zone_page_state(zone, NR_PAGES_SCANNED),
467c996c 1199 zone->spanned_pages,
9feedc9d
JL
1200 zone->present_pages,
1201 zone->managed_pages);
467c996c
MG
1202
1203 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1204 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1205 zone_page_state(zone, i));
1206
1207 seq_printf(m,
3484b2de 1208 "\n protection: (%ld",
467c996c
MG
1209 zone->lowmem_reserve[0]);
1210 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
3484b2de 1211 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
467c996c
MG
1212 seq_printf(m,
1213 ")"
1214 "\n pagesets");
1215 for_each_online_cpu(i) {
1216 struct per_cpu_pageset *pageset;
467c996c 1217
99dcc3e5 1218 pageset = per_cpu_ptr(zone->pageset, i);
3dfa5721
CL
1219 seq_printf(m,
1220 "\n cpu: %i"
1221 "\n count: %i"
1222 "\n high: %i"
1223 "\n batch: %i",
1224 i,
1225 pageset->pcp.count,
1226 pageset->pcp.high,
1227 pageset->pcp.batch);
df9ecaba 1228#ifdef CONFIG_SMP
467c996c
MG
1229 seq_printf(m, "\n vm stats threshold: %d",
1230 pageset->stat_threshold);
df9ecaba 1231#endif
f6ac2354 1232 }
467c996c
MG
1233 seq_printf(m,
1234 "\n all_unreclaimable: %u"
556adecb
RR
1235 "\n start_pfn: %lu"
1236 "\n inactive_ratio: %u",
6e543d57 1237 !zone_reclaimable(zone),
556adecb
RR
1238 zone->zone_start_pfn,
1239 zone->inactive_ratio);
467c996c
MG
1240 seq_putc(m, '\n');
1241}
1242
1243/*
1244 * Output information about zones in @pgdat.
1245 */
1246static int zoneinfo_show(struct seq_file *m, void *arg)
1247{
1248 pg_data_t *pgdat = (pg_data_t *)arg;
1249 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
f6ac2354
CL
1250 return 0;
1251}
1252
5c9fe628 1253static const struct seq_operations zoneinfo_op = {
f6ac2354
CL
1254 .start = frag_start, /* iterate over all zones. The same as in
1255 * fragmentation. */
1256 .next = frag_next,
1257 .stop = frag_stop,
1258 .show = zoneinfo_show,
1259};
1260
5c9fe628
AD
1261static int zoneinfo_open(struct inode *inode, struct file *file)
1262{
1263 return seq_open(file, &zoneinfo_op);
1264}
1265
1266static const struct file_operations proc_zoneinfo_file_operations = {
1267 .open = zoneinfo_open,
1268 .read = seq_read,
1269 .llseek = seq_lseek,
1270 .release = seq_release,
1271};
1272
79da826a
MR
1273enum writeback_stat_item {
1274 NR_DIRTY_THRESHOLD,
1275 NR_DIRTY_BG_THRESHOLD,
1276 NR_VM_WRITEBACK_STAT_ITEMS,
1277};
1278
f6ac2354
CL
1279static void *vmstat_start(struct seq_file *m, loff_t *pos)
1280{
2244b95a 1281 unsigned long *v;
79da826a 1282 int i, stat_items_size;
f6ac2354
CL
1283
1284 if (*pos >= ARRAY_SIZE(vmstat_text))
1285 return NULL;
79da826a
MR
1286 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1287 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
f6ac2354 1288
f8891e5e 1289#ifdef CONFIG_VM_EVENT_COUNTERS
79da826a 1290 stat_items_size += sizeof(struct vm_event_state);
f8891e5e 1291#endif
79da826a
MR
1292
1293 v = kmalloc(stat_items_size, GFP_KERNEL);
2244b95a
CL
1294 m->private = v;
1295 if (!v)
f6ac2354 1296 return ERR_PTR(-ENOMEM);
2244b95a
CL
1297 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1298 v[i] = global_page_state(i);
79da826a
MR
1299 v += NR_VM_ZONE_STAT_ITEMS;
1300
1301 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1302 v + NR_DIRTY_THRESHOLD);
1303 v += NR_VM_WRITEBACK_STAT_ITEMS;
1304
f8891e5e 1305#ifdef CONFIG_VM_EVENT_COUNTERS
79da826a
MR
1306 all_vm_events(v);
1307 v[PGPGIN] /= 2; /* sectors -> kbytes */
1308 v[PGPGOUT] /= 2;
f8891e5e 1309#endif
ff8b16d7 1310 return (unsigned long *)m->private + *pos;
f6ac2354
CL
1311}
1312
1313static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1314{
1315 (*pos)++;
1316 if (*pos >= ARRAY_SIZE(vmstat_text))
1317 return NULL;
1318 return (unsigned long *)m->private + *pos;
1319}
1320
1321static int vmstat_show(struct seq_file *m, void *arg)
1322{
1323 unsigned long *l = arg;
1324 unsigned long off = l - (unsigned long *)m->private;
1325
1326 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1327 return 0;
1328}
1329
1330static void vmstat_stop(struct seq_file *m, void *arg)
1331{
1332 kfree(m->private);
1333 m->private = NULL;
1334}
1335
b6aa44ab 1336static const struct seq_operations vmstat_op = {
f6ac2354
CL
1337 .start = vmstat_start,
1338 .next = vmstat_next,
1339 .stop = vmstat_stop,
1340 .show = vmstat_show,
1341};
1342
b6aa44ab
AD
1343static int vmstat_open(struct inode *inode, struct file *file)
1344{
1345 return seq_open(file, &vmstat_op);
1346}
1347
1348static const struct file_operations proc_vmstat_file_operations = {
1349 .open = vmstat_open,
1350 .read = seq_read,
1351 .llseek = seq_lseek,
1352 .release = seq_release,
1353};
f6ac2354
CL
1354#endif /* CONFIG_PROC_FS */
1355
df9ecaba 1356#ifdef CONFIG_SMP
d1187ed2 1357static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
77461ab3 1358int sysctl_stat_interval __read_mostly = HZ;
7cc36bbd 1359static cpumask_var_t cpu_stat_off;
d1187ed2
CL
1360
1361static void vmstat_update(struct work_struct *w)
1362{
7cc36bbd
CL
1363 if (refresh_cpu_vm_stats())
1364 /*
1365 * Counters were updated so we expect more updates
1366 * to occur in the future. Keep on running the
1367 * update worker thread.
1368 */
1369 schedule_delayed_work(this_cpu_ptr(&vmstat_work),
1370 round_jiffies_relative(sysctl_stat_interval));
1371 else {
1372 /*
1373 * We did not update any counters so the app may be in
1374 * a mode where it does not cause counter updates.
1375 * We may be uselessly running vmstat_update.
1376 * Defer the checking for differentials to the
1377 * shepherd thread on a different processor.
1378 */
1379 int r;
1380 /*
1381 * Shepherd work thread does not race since it never
1382 * changes the bit if its zero but the cpu
1383 * online / off line code may race if
1384 * worker threads are still allowed during
1385 * shutdown / startup.
1386 */
1387 r = cpumask_test_and_set_cpu(smp_processor_id(),
1388 cpu_stat_off);
1389 VM_BUG_ON(r);
1390 }
1391}
1392
1393/*
1394 * Check if the diffs for a certain cpu indicate that
1395 * an update is needed.
1396 */
1397static bool need_update(int cpu)
1398{
1399 struct zone *zone;
1400
1401 for_each_populated_zone(zone) {
1402 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1403
1404 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1405 /*
1406 * The fast way of checking if there are any vmstat diffs.
1407 * This works because the diffs are byte sized items.
1408 */
1409 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1410 return true;
1411
1412 }
1413 return false;
1414}
1415
1416
1417/*
1418 * Shepherd worker thread that checks the
1419 * differentials of processors that have their worker
1420 * threads for vm statistics updates disabled because of
1421 * inactivity.
1422 */
1423static void vmstat_shepherd(struct work_struct *w);
1424
1425static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
1426
1427static void vmstat_shepherd(struct work_struct *w)
1428{
1429 int cpu;
1430
1431 get_online_cpus();
1432 /* Check processors whose vmstat worker threads have been disabled */
1433 for_each_cpu(cpu, cpu_stat_off)
1434 if (need_update(cpu) &&
1435 cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1436
1437 schedule_delayed_work_on(cpu, &per_cpu(vmstat_work, cpu),
1438 __round_jiffies_relative(sysctl_stat_interval, cpu));
1439
1440 put_online_cpus();
1441
1442 schedule_delayed_work(&shepherd,
98f4ebb2 1443 round_jiffies_relative(sysctl_stat_interval));
7cc36bbd 1444
d1187ed2
CL
1445}
1446
7cc36bbd 1447static void __init start_shepherd_timer(void)
d1187ed2 1448{
7cc36bbd
CL
1449 int cpu;
1450
1451 for_each_possible_cpu(cpu)
1452 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1453 vmstat_update);
1454
1455 if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1456 BUG();
1457 cpumask_copy(cpu_stat_off, cpu_online_mask);
d1187ed2 1458
7cc36bbd
CL
1459 schedule_delayed_work(&shepherd,
1460 round_jiffies_relative(sysctl_stat_interval));
d1187ed2
CL
1461}
1462
807a1bd2
TK
1463static void vmstat_cpu_dead(int node)
1464{
1465 int cpu;
1466
1467 get_online_cpus();
1468 for_each_online_cpu(cpu)
1469 if (cpu_to_node(cpu) == node)
1470 goto end;
1471
1472 node_clear_state(node, N_CPU);
1473end:
1474 put_online_cpus();
1475}
1476
df9ecaba
CL
1477/*
1478 * Use the cpu notifier to insure that the thresholds are recalculated
1479 * when necessary.
1480 */
0db0628d 1481static int vmstat_cpuup_callback(struct notifier_block *nfb,
df9ecaba
CL
1482 unsigned long action,
1483 void *hcpu)
1484{
d1187ed2
CL
1485 long cpu = (long)hcpu;
1486
df9ecaba 1487 switch (action) {
d1187ed2
CL
1488 case CPU_ONLINE:
1489 case CPU_ONLINE_FROZEN:
5ee28a44 1490 refresh_zone_stat_thresholds();
ad596925 1491 node_set_state(cpu_to_node(cpu), N_CPU);
7cc36bbd 1492 cpumask_set_cpu(cpu, cpu_stat_off);
d1187ed2
CL
1493 break;
1494 case CPU_DOWN_PREPARE:
1495 case CPU_DOWN_PREPARE_FROZEN:
afe2c511 1496 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
7cc36bbd 1497 cpumask_clear_cpu(cpu, cpu_stat_off);
d1187ed2
CL
1498 break;
1499 case CPU_DOWN_FAILED:
1500 case CPU_DOWN_FAILED_FROZEN:
7cc36bbd 1501 cpumask_set_cpu(cpu, cpu_stat_off);
d1187ed2 1502 break;
ce421c79 1503 case CPU_DEAD:
8bb78442 1504 case CPU_DEAD_FROZEN:
ce421c79 1505 refresh_zone_stat_thresholds();
807a1bd2 1506 vmstat_cpu_dead(cpu_to_node(cpu));
ce421c79
AW
1507 break;
1508 default:
1509 break;
df9ecaba
CL
1510 }
1511 return NOTIFY_OK;
1512}
1513
0db0628d 1514static struct notifier_block vmstat_notifier =
df9ecaba 1515 { &vmstat_cpuup_callback, NULL, 0 };
8f32f7e5 1516#endif
df9ecaba 1517
e2fc88d0 1518static int __init setup_vmstat(void)
df9ecaba 1519{
8f32f7e5 1520#ifdef CONFIG_SMP
0be94bad
SB
1521 cpu_notifier_register_begin();
1522 __register_cpu_notifier(&vmstat_notifier);
d1187ed2 1523
7cc36bbd 1524 start_shepherd_timer();
0be94bad 1525 cpu_notifier_register_done();
8f32f7e5
AD
1526#endif
1527#ifdef CONFIG_PROC_FS
1528 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74e2e8e8 1529 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
b6aa44ab 1530 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
5c9fe628 1531 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
8f32f7e5 1532#endif
df9ecaba
CL
1533 return 0;
1534}
1535module_init(setup_vmstat)
d7a5752c
MG
1536
1537#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1538#include <linux/debugfs.h>
1539
d7a5752c
MG
1540
1541/*
1542 * Return an index indicating how much of the available free memory is
1543 * unusable for an allocation of the requested size.
1544 */
1545static int unusable_free_index(unsigned int order,
1546 struct contig_page_info *info)
1547{
1548 /* No free memory is interpreted as all free memory is unusable */
1549 if (info->free_pages == 0)
1550 return 1000;
1551
1552 /*
1553 * Index should be a value between 0 and 1. Return a value to 3
1554 * decimal places.
1555 *
1556 * 0 => no fragmentation
1557 * 1 => high fragmentation
1558 */
1559 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1560
1561}
1562
1563static void unusable_show_print(struct seq_file *m,
1564 pg_data_t *pgdat, struct zone *zone)
1565{
1566 unsigned int order;
1567 int index;
1568 struct contig_page_info info;
1569
1570 seq_printf(m, "Node %d, zone %8s ",
1571 pgdat->node_id,
1572 zone->name);
1573 for (order = 0; order < MAX_ORDER; ++order) {
1574 fill_contig_page_info(zone, order, &info);
1575 index = unusable_free_index(order, &info);
1576 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1577 }
1578
1579 seq_putc(m, '\n');
1580}
1581
1582/*
1583 * Display unusable free space index
1584 *
1585 * The unusable free space index measures how much of the available free
1586 * memory cannot be used to satisfy an allocation of a given size and is a
1587 * value between 0 and 1. The higher the value, the more of free memory is
1588 * unusable and by implication, the worse the external fragmentation is. This
1589 * can be expressed as a percentage by multiplying by 100.
1590 */
1591static int unusable_show(struct seq_file *m, void *arg)
1592{
1593 pg_data_t *pgdat = (pg_data_t *)arg;
1594
1595 /* check memoryless node */
a47b53c5 1596 if (!node_state(pgdat->node_id, N_MEMORY))
d7a5752c
MG
1597 return 0;
1598
1599 walk_zones_in_node(m, pgdat, unusable_show_print);
1600
1601 return 0;
1602}
1603
1604static const struct seq_operations unusable_op = {
1605 .start = frag_start,
1606 .next = frag_next,
1607 .stop = frag_stop,
1608 .show = unusable_show,
1609};
1610
1611static int unusable_open(struct inode *inode, struct file *file)
1612{
1613 return seq_open(file, &unusable_op);
1614}
1615
1616static const struct file_operations unusable_file_ops = {
1617 .open = unusable_open,
1618 .read = seq_read,
1619 .llseek = seq_lseek,
1620 .release = seq_release,
1621};
1622
f1a5ab12
MG
1623static void extfrag_show_print(struct seq_file *m,
1624 pg_data_t *pgdat, struct zone *zone)
1625{
1626 unsigned int order;
1627 int index;
1628
1629 /* Alloc on stack as interrupts are disabled for zone walk */
1630 struct contig_page_info info;
1631
1632 seq_printf(m, "Node %d, zone %8s ",
1633 pgdat->node_id,
1634 zone->name);
1635 for (order = 0; order < MAX_ORDER; ++order) {
1636 fill_contig_page_info(zone, order, &info);
56de7263 1637 index = __fragmentation_index(order, &info);
f1a5ab12
MG
1638 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1639 }
1640
1641 seq_putc(m, '\n');
1642}
1643
1644/*
1645 * Display fragmentation index for orders that allocations would fail for
1646 */
1647static int extfrag_show(struct seq_file *m, void *arg)
1648{
1649 pg_data_t *pgdat = (pg_data_t *)arg;
1650
1651 walk_zones_in_node(m, pgdat, extfrag_show_print);
1652
1653 return 0;
1654}
1655
1656static const struct seq_operations extfrag_op = {
1657 .start = frag_start,
1658 .next = frag_next,
1659 .stop = frag_stop,
1660 .show = extfrag_show,
1661};
1662
1663static int extfrag_open(struct inode *inode, struct file *file)
1664{
1665 return seq_open(file, &extfrag_op);
1666}
1667
1668static const struct file_operations extfrag_file_ops = {
1669 .open = extfrag_open,
1670 .read = seq_read,
1671 .llseek = seq_lseek,
1672 .release = seq_release,
1673};
1674
d7a5752c
MG
1675static int __init extfrag_debug_init(void)
1676{
bde8bd8a
S
1677 struct dentry *extfrag_debug_root;
1678
d7a5752c
MG
1679 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1680 if (!extfrag_debug_root)
1681 return -ENOMEM;
1682
1683 if (!debugfs_create_file("unusable_index", 0444,
1684 extfrag_debug_root, NULL, &unusable_file_ops))
bde8bd8a 1685 goto fail;
d7a5752c 1686
f1a5ab12
MG
1687 if (!debugfs_create_file("extfrag_index", 0444,
1688 extfrag_debug_root, NULL, &extfrag_file_ops))
bde8bd8a 1689 goto fail;
f1a5ab12 1690
d7a5752c 1691 return 0;
bde8bd8a
S
1692fail:
1693 debugfs_remove_recursive(extfrag_debug_root);
1694 return -ENOMEM;
d7a5752c
MG
1695}
1696
1697module_init(extfrag_debug_init);
1698#endif