mm: move definition for LRU isolation modes to a header
[linux-2.6-block.git] / mm / vmstat.c
CommitLineData
f6ac2354
CL
1/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
2244b95a
CL
6 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
f6ac2354 10 */
8f32f7e5 11#include <linux/fs.h>
f6ac2354 12#include <linux/mm.h>
4e950f6f 13#include <linux/err.h>
2244b95a 14#include <linux/module.h>
5a0e3ad6 15#include <linux/slab.h>
df9ecaba 16#include <linux/cpu.h>
c748e134 17#include <linux/vmstat.h>
e8edc6e0 18#include <linux/sched.h>
f1a5ab12 19#include <linux/math64.h>
f6ac2354 20
f8891e5e
CL
21#ifdef CONFIG_VM_EVENT_COUNTERS
22DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
23EXPORT_PER_CPU_SYMBOL(vm_event_states);
24
174596a0 25static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
f8891e5e 26{
9eccf2a8 27 int cpu;
f8891e5e
CL
28 int i;
29
30 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
31
aa85ea5b 32 for_each_cpu(cpu, cpumask) {
f8891e5e
CL
33 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
34
f8891e5e
CL
35 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
36 ret[i] += this->event[i];
37 }
38}
39
40/*
41 * Accumulate the vm event counters across all CPUs.
42 * The result is unavoidably approximate - it can change
43 * during and after execution of this function.
44*/
45void all_vm_events(unsigned long *ret)
46{
b5be1132 47 get_online_cpus();
174596a0 48 sum_vm_events(ret, cpu_online_mask);
b5be1132 49 put_online_cpus();
f8891e5e 50}
32dd66fc 51EXPORT_SYMBOL_GPL(all_vm_events);
f8891e5e
CL
52
53#ifdef CONFIG_HOTPLUG
54/*
55 * Fold the foreign cpu events into our own.
56 *
57 * This is adding to the events on one processor
58 * but keeps the global counts constant.
59 */
60void vm_events_fold_cpu(int cpu)
61{
62 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
63 int i;
64
65 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
66 count_vm_events(i, fold_state->event[i]);
67 fold_state->event[i] = 0;
68 }
69}
70#endif /* CONFIG_HOTPLUG */
71
72#endif /* CONFIG_VM_EVENT_COUNTERS */
73
2244b95a
CL
74/*
75 * Manage combined zone based / global counters
76 *
77 * vm_stat contains the global counters
78 */
79atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80EXPORT_SYMBOL(vm_stat);
81
82#ifdef CONFIG_SMP
83
df9ecaba
CL
84static int calculate_threshold(struct zone *zone)
85{
86 int threshold;
87 int mem; /* memory in 128 MB units */
88
89 /*
90 * The threshold scales with the number of processors and the amount
91 * of memory per zone. More memory means that we can defer updates for
92 * longer, more processors could lead to more contention.
93 * fls() is used to have a cheap way of logarithmic scaling.
94 *
95 * Some sample thresholds:
96 *
97 * Threshold Processors (fls) Zonesize fls(mem+1)
98 * ------------------------------------------------------------------
99 * 8 1 1 0.9-1 GB 4
100 * 16 2 2 0.9-1 GB 4
101 * 20 2 2 1-2 GB 5
102 * 24 2 2 2-4 GB 6
103 * 28 2 2 4-8 GB 7
104 * 32 2 2 8-16 GB 8
105 * 4 2 2 <128M 1
106 * 30 4 3 2-4 GB 5
107 * 48 4 3 8-16 GB 8
108 * 32 8 4 1-2 GB 4
109 * 32 8 4 0.9-1GB 4
110 * 10 16 5 <128M 1
111 * 40 16 5 900M 4
112 * 70 64 7 2-4 GB 5
113 * 84 64 7 4-8 GB 6
114 * 108 512 9 4-8 GB 6
115 * 125 1024 10 8-16 GB 8
116 * 125 1024 10 16-32 GB 9
117 */
118
119 mem = zone->present_pages >> (27 - PAGE_SHIFT);
120
121 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
122
123 /*
124 * Maximum threshold is 125
125 */
126 threshold = min(125, threshold);
127
128 return threshold;
129}
2244b95a
CL
130
131/*
df9ecaba 132 * Refresh the thresholds for each zone.
2244b95a 133 */
df9ecaba 134static void refresh_zone_stat_thresholds(void)
2244b95a 135{
df9ecaba
CL
136 struct zone *zone;
137 int cpu;
138 int threshold;
139
ee99c71c 140 for_each_populated_zone(zone) {
df9ecaba
CL
141 threshold = calculate_threshold(zone);
142
143 for_each_online_cpu(cpu)
99dcc3e5
CL
144 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
145 = threshold;
df9ecaba 146 }
2244b95a
CL
147}
148
149/*
150 * For use when we know that interrupts are disabled.
151 */
152void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
153 int delta)
154{
99dcc3e5
CL
155 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
156
df9ecaba 157 s8 *p = pcp->vm_stat_diff + item;
2244b95a
CL
158 long x;
159
2244b95a
CL
160 x = delta + *p;
161
df9ecaba 162 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
2244b95a
CL
163 zone_page_state_add(x, zone, item);
164 x = 0;
165 }
2244b95a
CL
166 *p = x;
167}
168EXPORT_SYMBOL(__mod_zone_page_state);
169
170/*
171 * For an unknown interrupt state
172 */
173void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
174 int delta)
175{
176 unsigned long flags;
177
178 local_irq_save(flags);
179 __mod_zone_page_state(zone, item, delta);
180 local_irq_restore(flags);
181}
182EXPORT_SYMBOL(mod_zone_page_state);
183
184/*
185 * Optimized increment and decrement functions.
186 *
187 * These are only for a single page and therefore can take a struct page *
188 * argument instead of struct zone *. This allows the inclusion of the code
189 * generated for page_zone(page) into the optimized functions.
190 *
191 * No overflow check is necessary and therefore the differential can be
192 * incremented or decremented in place which may allow the compilers to
193 * generate better code.
2244b95a
CL
194 * The increment or decrement is known and therefore one boundary check can
195 * be omitted.
196 *
df9ecaba
CL
197 * NOTE: These functions are very performance sensitive. Change only
198 * with care.
199 *
2244b95a
CL
200 * Some processors have inc/dec instructions that are atomic vs an interrupt.
201 * However, the code must first determine the differential location in a zone
202 * based on the processor number and then inc/dec the counter. There is no
203 * guarantee without disabling preemption that the processor will not change
204 * in between and therefore the atomicity vs. interrupt cannot be exploited
205 * in a useful way here.
206 */
c8785385 207void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a 208{
99dcc3e5 209 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
df9ecaba 210 s8 *p = pcp->vm_stat_diff + item;
2244b95a
CL
211
212 (*p)++;
213
df9ecaba
CL
214 if (unlikely(*p > pcp->stat_threshold)) {
215 int overstep = pcp->stat_threshold / 2;
216
217 zone_page_state_add(*p + overstep, zone, item);
218 *p = -overstep;
2244b95a
CL
219 }
220}
ca889e6c
CL
221
222void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
223{
224 __inc_zone_state(page_zone(page), item);
225}
2244b95a
CL
226EXPORT_SYMBOL(__inc_zone_page_state);
227
c8785385 228void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a 229{
99dcc3e5 230 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
df9ecaba 231 s8 *p = pcp->vm_stat_diff + item;
2244b95a
CL
232
233 (*p)--;
234
df9ecaba
CL
235 if (unlikely(*p < - pcp->stat_threshold)) {
236 int overstep = pcp->stat_threshold / 2;
237
238 zone_page_state_add(*p - overstep, zone, item);
239 *p = overstep;
2244b95a
CL
240 }
241}
c8785385
CL
242
243void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
244{
245 __dec_zone_state(page_zone(page), item);
246}
2244b95a
CL
247EXPORT_SYMBOL(__dec_zone_page_state);
248
ca889e6c
CL
249void inc_zone_state(struct zone *zone, enum zone_stat_item item)
250{
251 unsigned long flags;
252
253 local_irq_save(flags);
254 __inc_zone_state(zone, item);
255 local_irq_restore(flags);
256}
257
2244b95a
CL
258void inc_zone_page_state(struct page *page, enum zone_stat_item item)
259{
260 unsigned long flags;
261 struct zone *zone;
2244b95a
CL
262
263 zone = page_zone(page);
264 local_irq_save(flags);
ca889e6c 265 __inc_zone_state(zone, item);
2244b95a
CL
266 local_irq_restore(flags);
267}
268EXPORT_SYMBOL(inc_zone_page_state);
269
270void dec_zone_page_state(struct page *page, enum zone_stat_item item)
271{
272 unsigned long flags;
2244b95a 273
2244b95a 274 local_irq_save(flags);
a302eb4e 275 __dec_zone_page_state(page, item);
2244b95a
CL
276 local_irq_restore(flags);
277}
278EXPORT_SYMBOL(dec_zone_page_state);
279
280/*
281 * Update the zone counters for one cpu.
4037d452 282 *
a7f75e25
CL
283 * The cpu specified must be either the current cpu or a processor that
284 * is not online. If it is the current cpu then the execution thread must
285 * be pinned to the current cpu.
286 *
4037d452
CL
287 * Note that refresh_cpu_vm_stats strives to only access
288 * node local memory. The per cpu pagesets on remote zones are placed
289 * in the memory local to the processor using that pageset. So the
290 * loop over all zones will access a series of cachelines local to
291 * the processor.
292 *
293 * The call to zone_page_state_add updates the cachelines with the
294 * statistics in the remote zone struct as well as the global cachelines
295 * with the global counters. These could cause remote node cache line
296 * bouncing and will have to be only done when necessary.
2244b95a
CL
297 */
298void refresh_cpu_vm_stats(int cpu)
299{
300 struct zone *zone;
301 int i;
a7f75e25 302 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
2244b95a 303
ee99c71c 304 for_each_populated_zone(zone) {
4037d452 305 struct per_cpu_pageset *p;
2244b95a 306
99dcc3e5 307 p = per_cpu_ptr(zone->pageset, cpu);
2244b95a
CL
308
309 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
4037d452 310 if (p->vm_stat_diff[i]) {
a7f75e25
CL
311 unsigned long flags;
312 int v;
313
2244b95a 314 local_irq_save(flags);
a7f75e25 315 v = p->vm_stat_diff[i];
4037d452 316 p->vm_stat_diff[i] = 0;
a7f75e25
CL
317 local_irq_restore(flags);
318 atomic_long_add(v, &zone->vm_stat[i]);
319 global_diff[i] += v;
4037d452
CL
320#ifdef CONFIG_NUMA
321 /* 3 seconds idle till flush */
322 p->expire = 3;
323#endif
2244b95a 324 }
468fd62e 325 cond_resched();
4037d452
CL
326#ifdef CONFIG_NUMA
327 /*
328 * Deal with draining the remote pageset of this
329 * processor
330 *
331 * Check if there are pages remaining in this pageset
332 * if not then there is nothing to expire.
333 */
3dfa5721 334 if (!p->expire || !p->pcp.count)
4037d452
CL
335 continue;
336
337 /*
338 * We never drain zones local to this processor.
339 */
340 if (zone_to_nid(zone) == numa_node_id()) {
341 p->expire = 0;
342 continue;
343 }
344
345 p->expire--;
346 if (p->expire)
347 continue;
348
3dfa5721
CL
349 if (p->pcp.count)
350 drain_zone_pages(zone, &p->pcp);
4037d452 351#endif
2244b95a 352 }
a7f75e25
CL
353
354 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
355 if (global_diff[i])
356 atomic_long_add(global_diff[i], &vm_stat[i]);
2244b95a
CL
357}
358
2244b95a
CL
359#endif
360
ca889e6c
CL
361#ifdef CONFIG_NUMA
362/*
363 * zonelist = the list of zones passed to the allocator
364 * z = the zone from which the allocation occurred.
365 *
366 * Must be called with interrupts disabled.
367 */
18ea7e71 368void zone_statistics(struct zone *preferred_zone, struct zone *z)
ca889e6c 369{
18ea7e71 370 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
ca889e6c
CL
371 __inc_zone_state(z, NUMA_HIT);
372 } else {
373 __inc_zone_state(z, NUMA_MISS);
18ea7e71 374 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
ca889e6c 375 }
5d292343 376 if (z->node == numa_node_id())
ca889e6c
CL
377 __inc_zone_state(z, NUMA_LOCAL);
378 else
379 __inc_zone_state(z, NUMA_OTHER);
380}
381#endif
382
d7a5752c
MG
383#ifdef CONFIG_COMPACTION
384struct contig_page_info {
385 unsigned long free_pages;
386 unsigned long free_blocks_total;
387 unsigned long free_blocks_suitable;
388};
389
390/*
391 * Calculate the number of free pages in a zone, how many contiguous
392 * pages are free and how many are large enough to satisfy an allocation of
393 * the target size. Note that this function makes no attempt to estimate
394 * how many suitable free blocks there *might* be if MOVABLE pages were
395 * migrated. Calculating that is possible, but expensive and can be
396 * figured out from userspace
397 */
398static void fill_contig_page_info(struct zone *zone,
399 unsigned int suitable_order,
400 struct contig_page_info *info)
401{
402 unsigned int order;
403
404 info->free_pages = 0;
405 info->free_blocks_total = 0;
406 info->free_blocks_suitable = 0;
407
408 for (order = 0; order < MAX_ORDER; order++) {
409 unsigned long blocks;
410
411 /* Count number of free blocks */
412 blocks = zone->free_area[order].nr_free;
413 info->free_blocks_total += blocks;
414
415 /* Count free base pages */
416 info->free_pages += blocks << order;
417
418 /* Count the suitable free blocks */
419 if (order >= suitable_order)
420 info->free_blocks_suitable += blocks <<
421 (order - suitable_order);
422 }
423}
f1a5ab12
MG
424
425/*
426 * A fragmentation index only makes sense if an allocation of a requested
427 * size would fail. If that is true, the fragmentation index indicates
428 * whether external fragmentation or a lack of memory was the problem.
429 * The value can be used to determine if page reclaim or compaction
430 * should be used
431 */
432int fragmentation_index(unsigned int order, struct contig_page_info *info)
433{
434 unsigned long requested = 1UL << order;
435
436 if (!info->free_blocks_total)
437 return 0;
438
439 /* Fragmentation index only makes sense when a request would fail */
440 if (info->free_blocks_suitable)
441 return -1000;
442
443 /*
444 * Index is between 0 and 1 so return within 3 decimal places
445 *
446 * 0 => allocation would fail due to lack of memory
447 * 1 => allocation would fail due to fragmentation
448 */
449 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
450}
d7a5752c
MG
451#endif
452
453#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
8f32f7e5 454#include <linux/proc_fs.h>
f6ac2354
CL
455#include <linux/seq_file.h>
456
467c996c
MG
457static char * const migratetype_names[MIGRATE_TYPES] = {
458 "Unmovable",
459 "Reclaimable",
460 "Movable",
461 "Reserve",
91446b06 462 "Isolate",
467c996c
MG
463};
464
f6ac2354
CL
465static void *frag_start(struct seq_file *m, loff_t *pos)
466{
467 pg_data_t *pgdat;
468 loff_t node = *pos;
469 for (pgdat = first_online_pgdat();
470 pgdat && node;
471 pgdat = next_online_pgdat(pgdat))
472 --node;
473
474 return pgdat;
475}
476
477static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
478{
479 pg_data_t *pgdat = (pg_data_t *)arg;
480
481 (*pos)++;
482 return next_online_pgdat(pgdat);
483}
484
485static void frag_stop(struct seq_file *m, void *arg)
486{
487}
488
467c996c
MG
489/* Walk all the zones in a node and print using a callback */
490static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
491 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
f6ac2354 492{
f6ac2354
CL
493 struct zone *zone;
494 struct zone *node_zones = pgdat->node_zones;
495 unsigned long flags;
f6ac2354
CL
496
497 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
498 if (!populated_zone(zone))
499 continue;
500
501 spin_lock_irqsave(&zone->lock, flags);
467c996c 502 print(m, pgdat, zone);
f6ac2354 503 spin_unlock_irqrestore(&zone->lock, flags);
467c996c
MG
504 }
505}
d7a5752c 506#endif
467c996c 507
d7a5752c 508#ifdef CONFIG_PROC_FS
467c996c
MG
509static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
510 struct zone *zone)
511{
512 int order;
513
514 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
515 for (order = 0; order < MAX_ORDER; ++order)
516 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
517 seq_putc(m, '\n');
518}
519
520/*
521 * This walks the free areas for each zone.
522 */
523static int frag_show(struct seq_file *m, void *arg)
524{
525 pg_data_t *pgdat = (pg_data_t *)arg;
526 walk_zones_in_node(m, pgdat, frag_show_print);
527 return 0;
528}
529
530static void pagetypeinfo_showfree_print(struct seq_file *m,
531 pg_data_t *pgdat, struct zone *zone)
532{
533 int order, mtype;
534
535 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
536 seq_printf(m, "Node %4d, zone %8s, type %12s ",
537 pgdat->node_id,
538 zone->name,
539 migratetype_names[mtype]);
540 for (order = 0; order < MAX_ORDER; ++order) {
541 unsigned long freecount = 0;
542 struct free_area *area;
543 struct list_head *curr;
544
545 area = &(zone->free_area[order]);
546
547 list_for_each(curr, &area->free_list[mtype])
548 freecount++;
549 seq_printf(m, "%6lu ", freecount);
550 }
f6ac2354
CL
551 seq_putc(m, '\n');
552 }
467c996c
MG
553}
554
555/* Print out the free pages at each order for each migatetype */
556static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
557{
558 int order;
559 pg_data_t *pgdat = (pg_data_t *)arg;
560
561 /* Print header */
562 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
563 for (order = 0; order < MAX_ORDER; ++order)
564 seq_printf(m, "%6d ", order);
565 seq_putc(m, '\n');
566
567 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
568
569 return 0;
570}
571
572static void pagetypeinfo_showblockcount_print(struct seq_file *m,
573 pg_data_t *pgdat, struct zone *zone)
574{
575 int mtype;
576 unsigned long pfn;
577 unsigned long start_pfn = zone->zone_start_pfn;
578 unsigned long end_pfn = start_pfn + zone->spanned_pages;
579 unsigned long count[MIGRATE_TYPES] = { 0, };
580
581 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
582 struct page *page;
583
584 if (!pfn_valid(pfn))
585 continue;
586
587 page = pfn_to_page(pfn);
eb33575c
MG
588
589 /* Watch for unexpected holes punched in the memmap */
590 if (!memmap_valid_within(pfn, page, zone))
e80d6a24 591 continue;
eb33575c 592
467c996c
MG
593 mtype = get_pageblock_migratetype(page);
594
e80d6a24
MG
595 if (mtype < MIGRATE_TYPES)
596 count[mtype]++;
467c996c
MG
597 }
598
599 /* Print counts */
600 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
601 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
602 seq_printf(m, "%12lu ", count[mtype]);
603 seq_putc(m, '\n');
604}
605
606/* Print out the free pages at each order for each migratetype */
607static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
608{
609 int mtype;
610 pg_data_t *pgdat = (pg_data_t *)arg;
611
612 seq_printf(m, "\n%-23s", "Number of blocks type ");
613 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
614 seq_printf(m, "%12s ", migratetype_names[mtype]);
615 seq_putc(m, '\n');
616 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
617
618 return 0;
619}
620
621/*
622 * This prints out statistics in relation to grouping pages by mobility.
623 * It is expensive to collect so do not constantly read the file.
624 */
625static int pagetypeinfo_show(struct seq_file *m, void *arg)
626{
627 pg_data_t *pgdat = (pg_data_t *)arg;
628
41b25a37
KM
629 /* check memoryless node */
630 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
631 return 0;
632
467c996c
MG
633 seq_printf(m, "Page block order: %d\n", pageblock_order);
634 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
635 seq_putc(m, '\n');
636 pagetypeinfo_showfree(m, pgdat);
637 pagetypeinfo_showblockcount(m, pgdat);
638
f6ac2354
CL
639 return 0;
640}
641
8f32f7e5 642static const struct seq_operations fragmentation_op = {
f6ac2354
CL
643 .start = frag_start,
644 .next = frag_next,
645 .stop = frag_stop,
646 .show = frag_show,
647};
648
8f32f7e5
AD
649static int fragmentation_open(struct inode *inode, struct file *file)
650{
651 return seq_open(file, &fragmentation_op);
652}
653
654static const struct file_operations fragmentation_file_operations = {
655 .open = fragmentation_open,
656 .read = seq_read,
657 .llseek = seq_lseek,
658 .release = seq_release,
659};
660
74e2e8e8 661static const struct seq_operations pagetypeinfo_op = {
467c996c
MG
662 .start = frag_start,
663 .next = frag_next,
664 .stop = frag_stop,
665 .show = pagetypeinfo_show,
666};
667
74e2e8e8
AD
668static int pagetypeinfo_open(struct inode *inode, struct file *file)
669{
670 return seq_open(file, &pagetypeinfo_op);
671}
672
673static const struct file_operations pagetypeinfo_file_ops = {
674 .open = pagetypeinfo_open,
675 .read = seq_read,
676 .llseek = seq_lseek,
677 .release = seq_release,
678};
679
4b51d669
CL
680#ifdef CONFIG_ZONE_DMA
681#define TEXT_FOR_DMA(xx) xx "_dma",
682#else
683#define TEXT_FOR_DMA(xx)
684#endif
685
27bf71c2
CL
686#ifdef CONFIG_ZONE_DMA32
687#define TEXT_FOR_DMA32(xx) xx "_dma32",
688#else
689#define TEXT_FOR_DMA32(xx)
690#endif
691
692#ifdef CONFIG_HIGHMEM
693#define TEXT_FOR_HIGHMEM(xx) xx "_high",
694#else
695#define TEXT_FOR_HIGHMEM(xx)
696#endif
697
4b51d669 698#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
2a1e274a 699 TEXT_FOR_HIGHMEM(xx) xx "_movable",
27bf71c2 700
15ad7cdc 701static const char * const vmstat_text[] = {
2244b95a 702 /* Zoned VM counters */
d23ad423 703 "nr_free_pages",
4f98a2fe
RR
704 "nr_inactive_anon",
705 "nr_active_anon",
706 "nr_inactive_file",
707 "nr_active_file",
7b854121 708 "nr_unevictable",
5344b7e6 709 "nr_mlock",
f3dbd344 710 "nr_anon_pages",
65ba55f5 711 "nr_mapped",
347ce434 712 "nr_file_pages",
51ed4491
CL
713 "nr_dirty",
714 "nr_writeback",
972d1a7b
CL
715 "nr_slab_reclaimable",
716 "nr_slab_unreclaimable",
df849a15 717 "nr_page_table_pages",
c6a7f572 718 "nr_kernel_stack",
f6ac2354 719 "nr_unstable",
d2c5e30c 720 "nr_bounce",
e129b5c2 721 "nr_vmscan_write",
fc3ba692 722 "nr_writeback_temp",
a731286d
KM
723 "nr_isolated_anon",
724 "nr_isolated_file",
4b02108a 725 "nr_shmem",
ca889e6c
CL
726#ifdef CONFIG_NUMA
727 "numa_hit",
728 "numa_miss",
729 "numa_foreign",
730 "numa_interleave",
731 "numa_local",
732 "numa_other",
733#endif
734
f8891e5e 735#ifdef CONFIG_VM_EVENT_COUNTERS
f6ac2354
CL
736 "pgpgin",
737 "pgpgout",
738 "pswpin",
739 "pswpout",
740
27bf71c2 741 TEXTS_FOR_ZONES("pgalloc")
f6ac2354
CL
742
743 "pgfree",
744 "pgactivate",
745 "pgdeactivate",
746
747 "pgfault",
748 "pgmajfault",
749
27bf71c2
CL
750 TEXTS_FOR_ZONES("pgrefill")
751 TEXTS_FOR_ZONES("pgsteal")
752 TEXTS_FOR_ZONES("pgscan_kswapd")
753 TEXTS_FOR_ZONES("pgscan_direct")
f6ac2354 754
24cf7251
MG
755#ifdef CONFIG_NUMA
756 "zone_reclaim_failed",
757#endif
f6ac2354
CL
758 "pginodesteal",
759 "slabs_scanned",
760 "kswapd_steal",
761 "kswapd_inodesteal",
bb3ab596
KM
762 "kswapd_low_wmark_hit_quickly",
763 "kswapd_high_wmark_hit_quickly",
764 "kswapd_skip_congestion_wait",
f6ac2354
CL
765 "pageoutrun",
766 "allocstall",
767
768 "pgrotated",
3b116300
AL
769#ifdef CONFIG_HUGETLB_PAGE
770 "htlb_buddy_alloc_success",
771 "htlb_buddy_alloc_fail",
772#endif
bbfd28ee
LS
773 "unevictable_pgs_culled",
774 "unevictable_pgs_scanned",
775 "unevictable_pgs_rescued",
5344b7e6
NP
776 "unevictable_pgs_mlocked",
777 "unevictable_pgs_munlocked",
778 "unevictable_pgs_cleared",
779 "unevictable_pgs_stranded",
985737cf 780 "unevictable_pgs_mlockfreed",
bbfd28ee 781#endif
f6ac2354
CL
782};
783
467c996c
MG
784static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
785 struct zone *zone)
f6ac2354 786{
467c996c
MG
787 int i;
788 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
789 seq_printf(m,
790 "\n pages free %lu"
791 "\n min %lu"
792 "\n low %lu"
793 "\n high %lu"
08d9ae7c 794 "\n scanned %lu"
467c996c
MG
795 "\n spanned %lu"
796 "\n present %lu",
797 zone_page_state(zone, NR_FREE_PAGES),
41858966
MG
798 min_wmark_pages(zone),
799 low_wmark_pages(zone),
800 high_wmark_pages(zone),
467c996c 801 zone->pages_scanned,
467c996c
MG
802 zone->spanned_pages,
803 zone->present_pages);
804
805 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
806 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
807 zone_page_state(zone, i));
808
809 seq_printf(m,
810 "\n protection: (%lu",
811 zone->lowmem_reserve[0]);
812 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
813 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
814 seq_printf(m,
815 ")"
816 "\n pagesets");
817 for_each_online_cpu(i) {
818 struct per_cpu_pageset *pageset;
467c996c 819
99dcc3e5 820 pageset = per_cpu_ptr(zone->pageset, i);
3dfa5721
CL
821 seq_printf(m,
822 "\n cpu: %i"
823 "\n count: %i"
824 "\n high: %i"
825 "\n batch: %i",
826 i,
827 pageset->pcp.count,
828 pageset->pcp.high,
829 pageset->pcp.batch);
df9ecaba 830#ifdef CONFIG_SMP
467c996c
MG
831 seq_printf(m, "\n vm stats threshold: %d",
832 pageset->stat_threshold);
df9ecaba 833#endif
f6ac2354 834 }
467c996c
MG
835 seq_printf(m,
836 "\n all_unreclaimable: %u"
837 "\n prev_priority: %i"
556adecb
RR
838 "\n start_pfn: %lu"
839 "\n inactive_ratio: %u",
93e4a89a 840 zone->all_unreclaimable,
467c996c 841 zone->prev_priority,
556adecb
RR
842 zone->zone_start_pfn,
843 zone->inactive_ratio);
467c996c
MG
844 seq_putc(m, '\n');
845}
846
847/*
848 * Output information about zones in @pgdat.
849 */
850static int zoneinfo_show(struct seq_file *m, void *arg)
851{
852 pg_data_t *pgdat = (pg_data_t *)arg;
853 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
f6ac2354
CL
854 return 0;
855}
856
5c9fe628 857static const struct seq_operations zoneinfo_op = {
f6ac2354
CL
858 .start = frag_start, /* iterate over all zones. The same as in
859 * fragmentation. */
860 .next = frag_next,
861 .stop = frag_stop,
862 .show = zoneinfo_show,
863};
864
5c9fe628
AD
865static int zoneinfo_open(struct inode *inode, struct file *file)
866{
867 return seq_open(file, &zoneinfo_op);
868}
869
870static const struct file_operations proc_zoneinfo_file_operations = {
871 .open = zoneinfo_open,
872 .read = seq_read,
873 .llseek = seq_lseek,
874 .release = seq_release,
875};
876
f6ac2354
CL
877static void *vmstat_start(struct seq_file *m, loff_t *pos)
878{
2244b95a 879 unsigned long *v;
f8891e5e
CL
880#ifdef CONFIG_VM_EVENT_COUNTERS
881 unsigned long *e;
882#endif
2244b95a 883 int i;
f6ac2354
CL
884
885 if (*pos >= ARRAY_SIZE(vmstat_text))
886 return NULL;
887
f8891e5e 888#ifdef CONFIG_VM_EVENT_COUNTERS
2244b95a 889 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
f8891e5e
CL
890 + sizeof(struct vm_event_state), GFP_KERNEL);
891#else
892 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
893 GFP_KERNEL);
894#endif
2244b95a
CL
895 m->private = v;
896 if (!v)
f6ac2354 897 return ERR_PTR(-ENOMEM);
2244b95a
CL
898 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
899 v[i] = global_page_state(i);
f8891e5e
CL
900#ifdef CONFIG_VM_EVENT_COUNTERS
901 e = v + NR_VM_ZONE_STAT_ITEMS;
902 all_vm_events(e);
903 e[PGPGIN] /= 2; /* sectors -> kbytes */
904 e[PGPGOUT] /= 2;
905#endif
2244b95a 906 return v + *pos;
f6ac2354
CL
907}
908
909static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
910{
911 (*pos)++;
912 if (*pos >= ARRAY_SIZE(vmstat_text))
913 return NULL;
914 return (unsigned long *)m->private + *pos;
915}
916
917static int vmstat_show(struct seq_file *m, void *arg)
918{
919 unsigned long *l = arg;
920 unsigned long off = l - (unsigned long *)m->private;
921
922 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
923 return 0;
924}
925
926static void vmstat_stop(struct seq_file *m, void *arg)
927{
928 kfree(m->private);
929 m->private = NULL;
930}
931
b6aa44ab 932static const struct seq_operations vmstat_op = {
f6ac2354
CL
933 .start = vmstat_start,
934 .next = vmstat_next,
935 .stop = vmstat_stop,
936 .show = vmstat_show,
937};
938
b6aa44ab
AD
939static int vmstat_open(struct inode *inode, struct file *file)
940{
941 return seq_open(file, &vmstat_op);
942}
943
944static const struct file_operations proc_vmstat_file_operations = {
945 .open = vmstat_open,
946 .read = seq_read,
947 .llseek = seq_lseek,
948 .release = seq_release,
949};
f6ac2354
CL
950#endif /* CONFIG_PROC_FS */
951
df9ecaba 952#ifdef CONFIG_SMP
d1187ed2 953static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
77461ab3 954int sysctl_stat_interval __read_mostly = HZ;
d1187ed2
CL
955
956static void vmstat_update(struct work_struct *w)
957{
958 refresh_cpu_vm_stats(smp_processor_id());
77461ab3 959 schedule_delayed_work(&__get_cpu_var(vmstat_work),
98f4ebb2 960 round_jiffies_relative(sysctl_stat_interval));
d1187ed2
CL
961}
962
42614fcd 963static void __cpuinit start_cpu_timer(int cpu)
d1187ed2 964{
1871e52c 965 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
d1187ed2 966
1871e52c
TH
967 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
968 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
d1187ed2
CL
969}
970
df9ecaba
CL
971/*
972 * Use the cpu notifier to insure that the thresholds are recalculated
973 * when necessary.
974 */
975static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
976 unsigned long action,
977 void *hcpu)
978{
d1187ed2
CL
979 long cpu = (long)hcpu;
980
df9ecaba 981 switch (action) {
d1187ed2
CL
982 case CPU_ONLINE:
983 case CPU_ONLINE_FROZEN:
984 start_cpu_timer(cpu);
ad596925 985 node_set_state(cpu_to_node(cpu), N_CPU);
d1187ed2
CL
986 break;
987 case CPU_DOWN_PREPARE:
988 case CPU_DOWN_PREPARE_FROZEN:
989 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
990 per_cpu(vmstat_work, cpu).work.func = NULL;
991 break;
992 case CPU_DOWN_FAILED:
993 case CPU_DOWN_FAILED_FROZEN:
994 start_cpu_timer(cpu);
995 break;
ce421c79 996 case CPU_DEAD:
8bb78442 997 case CPU_DEAD_FROZEN:
ce421c79
AW
998 refresh_zone_stat_thresholds();
999 break;
1000 default:
1001 break;
df9ecaba
CL
1002 }
1003 return NOTIFY_OK;
1004}
1005
1006static struct notifier_block __cpuinitdata vmstat_notifier =
1007 { &vmstat_cpuup_callback, NULL, 0 };
8f32f7e5 1008#endif
df9ecaba 1009
e2fc88d0 1010static int __init setup_vmstat(void)
df9ecaba 1011{
8f32f7e5 1012#ifdef CONFIG_SMP
d1187ed2
CL
1013 int cpu;
1014
df9ecaba
CL
1015 refresh_zone_stat_thresholds();
1016 register_cpu_notifier(&vmstat_notifier);
d1187ed2
CL
1017
1018 for_each_online_cpu(cpu)
1019 start_cpu_timer(cpu);
8f32f7e5
AD
1020#endif
1021#ifdef CONFIG_PROC_FS
1022 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74e2e8e8 1023 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
b6aa44ab 1024 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
5c9fe628 1025 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
8f32f7e5 1026#endif
df9ecaba
CL
1027 return 0;
1028}
1029module_init(setup_vmstat)
d7a5752c
MG
1030
1031#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1032#include <linux/debugfs.h>
1033
1034static struct dentry *extfrag_debug_root;
1035
1036/*
1037 * Return an index indicating how much of the available free memory is
1038 * unusable for an allocation of the requested size.
1039 */
1040static int unusable_free_index(unsigned int order,
1041 struct contig_page_info *info)
1042{
1043 /* No free memory is interpreted as all free memory is unusable */
1044 if (info->free_pages == 0)
1045 return 1000;
1046
1047 /*
1048 * Index should be a value between 0 and 1. Return a value to 3
1049 * decimal places.
1050 *
1051 * 0 => no fragmentation
1052 * 1 => high fragmentation
1053 */
1054 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1055
1056}
1057
1058static void unusable_show_print(struct seq_file *m,
1059 pg_data_t *pgdat, struct zone *zone)
1060{
1061 unsigned int order;
1062 int index;
1063 struct contig_page_info info;
1064
1065 seq_printf(m, "Node %d, zone %8s ",
1066 pgdat->node_id,
1067 zone->name);
1068 for (order = 0; order < MAX_ORDER; ++order) {
1069 fill_contig_page_info(zone, order, &info);
1070 index = unusable_free_index(order, &info);
1071 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1072 }
1073
1074 seq_putc(m, '\n');
1075}
1076
1077/*
1078 * Display unusable free space index
1079 *
1080 * The unusable free space index measures how much of the available free
1081 * memory cannot be used to satisfy an allocation of a given size and is a
1082 * value between 0 and 1. The higher the value, the more of free memory is
1083 * unusable and by implication, the worse the external fragmentation is. This
1084 * can be expressed as a percentage by multiplying by 100.
1085 */
1086static int unusable_show(struct seq_file *m, void *arg)
1087{
1088 pg_data_t *pgdat = (pg_data_t *)arg;
1089
1090 /* check memoryless node */
1091 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1092 return 0;
1093
1094 walk_zones_in_node(m, pgdat, unusable_show_print);
1095
1096 return 0;
1097}
1098
1099static const struct seq_operations unusable_op = {
1100 .start = frag_start,
1101 .next = frag_next,
1102 .stop = frag_stop,
1103 .show = unusable_show,
1104};
1105
1106static int unusable_open(struct inode *inode, struct file *file)
1107{
1108 return seq_open(file, &unusable_op);
1109}
1110
1111static const struct file_operations unusable_file_ops = {
1112 .open = unusable_open,
1113 .read = seq_read,
1114 .llseek = seq_lseek,
1115 .release = seq_release,
1116};
1117
f1a5ab12
MG
1118static void extfrag_show_print(struct seq_file *m,
1119 pg_data_t *pgdat, struct zone *zone)
1120{
1121 unsigned int order;
1122 int index;
1123
1124 /* Alloc on stack as interrupts are disabled for zone walk */
1125 struct contig_page_info info;
1126
1127 seq_printf(m, "Node %d, zone %8s ",
1128 pgdat->node_id,
1129 zone->name);
1130 for (order = 0; order < MAX_ORDER; ++order) {
1131 fill_contig_page_info(zone, order, &info);
1132 index = fragmentation_index(order, &info);
1133 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1134 }
1135
1136 seq_putc(m, '\n');
1137}
1138
1139/*
1140 * Display fragmentation index for orders that allocations would fail for
1141 */
1142static int extfrag_show(struct seq_file *m, void *arg)
1143{
1144 pg_data_t *pgdat = (pg_data_t *)arg;
1145
1146 walk_zones_in_node(m, pgdat, extfrag_show_print);
1147
1148 return 0;
1149}
1150
1151static const struct seq_operations extfrag_op = {
1152 .start = frag_start,
1153 .next = frag_next,
1154 .stop = frag_stop,
1155 .show = extfrag_show,
1156};
1157
1158static int extfrag_open(struct inode *inode, struct file *file)
1159{
1160 return seq_open(file, &extfrag_op);
1161}
1162
1163static const struct file_operations extfrag_file_ops = {
1164 .open = extfrag_open,
1165 .read = seq_read,
1166 .llseek = seq_lseek,
1167 .release = seq_release,
1168};
1169
d7a5752c
MG
1170static int __init extfrag_debug_init(void)
1171{
1172 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1173 if (!extfrag_debug_root)
1174 return -ENOMEM;
1175
1176 if (!debugfs_create_file("unusable_index", 0444,
1177 extfrag_debug_root, NULL, &unusable_file_ops))
1178 return -ENOMEM;
1179
f1a5ab12
MG
1180 if (!debugfs_create_file("extfrag_index", 0444,
1181 extfrag_debug_root, NULL, &extfrag_file_ops))
1182 return -ENOMEM;
1183
d7a5752c
MG
1184 return 0;
1185}
1186
1187module_init(extfrag_debug_init);
1188#endif