4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
10 * Copyright (C) 2008-2014 Christoph Lameter
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 #include <linux/writeback.h>
23 #include <linux/compaction.h>
24 #include <linux/mm_inline.h>
25 #include <linux/page_ext.h>
26 #include <linux/page_owner.h>
30 #ifdef CONFIG_VM_EVENT_COUNTERS
31 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
32 EXPORT_PER_CPU_SYMBOL(vm_event_states);
34 static void sum_vm_events(unsigned long *ret)
39 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
41 for_each_online_cpu(cpu) {
42 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
44 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
45 ret[i] += this->event[i];
50 * Accumulate the vm event counters across all CPUs.
51 * The result is unavoidably approximate - it can change
52 * during and after execution of this function.
54 void all_vm_events(unsigned long *ret)
60 EXPORT_SYMBOL_GPL(all_vm_events);
63 * Fold the foreign cpu events into our own.
65 * This is adding to the events on one processor
66 * but keeps the global counts constant.
68 void vm_events_fold_cpu(int cpu)
70 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
73 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
74 count_vm_events(i, fold_state->event[i]);
75 fold_state->event[i] = 0;
79 #endif /* CONFIG_VM_EVENT_COUNTERS */
82 * Manage combined zone based / global counters
84 * vm_stat contains the global counters
86 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
87 EXPORT_SYMBOL(vm_stat);
91 int calculate_pressure_threshold(struct zone *zone)
94 int watermark_distance;
97 * As vmstats are not up to date, there is drift between the estimated
98 * and real values. For high thresholds and a high number of CPUs, it
99 * is possible for the min watermark to be breached while the estimated
100 * value looks fine. The pressure threshold is a reduced value such
101 * that even the maximum amount of drift will not accidentally breach
104 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
105 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
108 * Maximum threshold is 125
110 threshold = min(125, threshold);
115 int calculate_normal_threshold(struct zone *zone)
118 int mem; /* memory in 128 MB units */
121 * The threshold scales with the number of processors and the amount
122 * of memory per zone. More memory means that we can defer updates for
123 * longer, more processors could lead to more contention.
124 * fls() is used to have a cheap way of logarithmic scaling.
126 * Some sample thresholds:
128 * Threshold Processors (fls) Zonesize fls(mem+1)
129 * ------------------------------------------------------------------
146 * 125 1024 10 8-16 GB 8
147 * 125 1024 10 16-32 GB 9
150 mem = zone->managed_pages >> (27 - PAGE_SHIFT);
152 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
155 * Maximum threshold is 125
157 threshold = min(125, threshold);
163 * Refresh the thresholds for each zone.
165 void refresh_zone_stat_thresholds(void)
171 for_each_populated_zone(zone) {
172 unsigned long max_drift, tolerate_drift;
174 threshold = calculate_normal_threshold(zone);
176 for_each_online_cpu(cpu)
177 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181 * Only set percpu_drift_mark if there is a danger that
182 * NR_FREE_PAGES reports the low watermark is ok when in fact
183 * the min watermark could be breached by an allocation
185 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
186 max_drift = num_online_cpus() * threshold;
187 if (max_drift > tolerate_drift)
188 zone->percpu_drift_mark = high_wmark_pages(zone) +
193 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
194 int (*calculate_pressure)(struct zone *))
201 for (i = 0; i < pgdat->nr_zones; i++) {
202 zone = &pgdat->node_zones[i];
203 if (!zone->percpu_drift_mark)
206 threshold = (*calculate_pressure)(zone);
207 for_each_online_cpu(cpu)
208 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
214 * For use when we know that interrupts are disabled,
215 * or when we know that preemption is disabled and that
216 * particular counter cannot be updated from interrupt context.
218 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
221 struct per_cpu_pageset __percpu *pcp = zone->pageset;
222 s8 __percpu *p = pcp->vm_stat_diff + item;
226 x = delta + __this_cpu_read(*p);
228 t = __this_cpu_read(pcp->stat_threshold);
230 if (unlikely(x > t || x < -t)) {
231 zone_page_state_add(x, zone, item);
234 __this_cpu_write(*p, x);
236 EXPORT_SYMBOL(__mod_zone_page_state);
239 * Optimized increment and decrement functions.
241 * These are only for a single page and therefore can take a struct page *
242 * argument instead of struct zone *. This allows the inclusion of the code
243 * generated for page_zone(page) into the optimized functions.
245 * No overflow check is necessary and therefore the differential can be
246 * incremented or decremented in place which may allow the compilers to
247 * generate better code.
248 * The increment or decrement is known and therefore one boundary check can
251 * NOTE: These functions are very performance sensitive. Change only
254 * Some processors have inc/dec instructions that are atomic vs an interrupt.
255 * However, the code must first determine the differential location in a zone
256 * based on the processor number and then inc/dec the counter. There is no
257 * guarantee without disabling preemption that the processor will not change
258 * in between and therefore the atomicity vs. interrupt cannot be exploited
259 * in a useful way here.
261 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
263 struct per_cpu_pageset __percpu *pcp = zone->pageset;
264 s8 __percpu *p = pcp->vm_stat_diff + item;
267 v = __this_cpu_inc_return(*p);
268 t = __this_cpu_read(pcp->stat_threshold);
269 if (unlikely(v > t)) {
270 s8 overstep = t >> 1;
272 zone_page_state_add(v + overstep, zone, item);
273 __this_cpu_write(*p, -overstep);
277 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
279 __inc_zone_state(page_zone(page), item);
281 EXPORT_SYMBOL(__inc_zone_page_state);
283 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
285 struct per_cpu_pageset __percpu *pcp = zone->pageset;
286 s8 __percpu *p = pcp->vm_stat_diff + item;
289 v = __this_cpu_dec_return(*p);
290 t = __this_cpu_read(pcp->stat_threshold);
291 if (unlikely(v < - t)) {
292 s8 overstep = t >> 1;
294 zone_page_state_add(v - overstep, zone, item);
295 __this_cpu_write(*p, overstep);
299 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
301 __dec_zone_state(page_zone(page), item);
303 EXPORT_SYMBOL(__dec_zone_page_state);
305 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
307 * If we have cmpxchg_local support then we do not need to incur the overhead
308 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
310 * mod_state() modifies the zone counter state through atomic per cpu
313 * Overstep mode specifies how overstep should handled:
315 * 1 Overstepping half of threshold
316 * -1 Overstepping minus half of threshold
318 static inline void mod_state(struct zone *zone,
319 enum zone_stat_item item, int delta, int overstep_mode)
321 struct per_cpu_pageset __percpu *pcp = zone->pageset;
322 s8 __percpu *p = pcp->vm_stat_diff + item;
326 z = 0; /* overflow to zone counters */
329 * The fetching of the stat_threshold is racy. We may apply
330 * a counter threshold to the wrong the cpu if we get
331 * rescheduled while executing here. However, the next
332 * counter update will apply the threshold again and
333 * therefore bring the counter under the threshold again.
335 * Most of the time the thresholds are the same anyways
336 * for all cpus in a zone.
338 t = this_cpu_read(pcp->stat_threshold);
340 o = this_cpu_read(*p);
343 if (n > t || n < -t) {
344 int os = overstep_mode * (t >> 1) ;
346 /* Overflow must be added to zone counters */
350 } while (this_cpu_cmpxchg(*p, o, n) != o);
353 zone_page_state_add(z, zone, item);
356 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
359 mod_state(zone, item, delta, 0);
361 EXPORT_SYMBOL(mod_zone_page_state);
363 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
365 mod_state(zone, item, 1, 1);
368 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
370 mod_state(page_zone(page), item, 1, 1);
372 EXPORT_SYMBOL(inc_zone_page_state);
374 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
376 mod_state(page_zone(page), item, -1, -1);
378 EXPORT_SYMBOL(dec_zone_page_state);
381 * Use interrupt disable to serialize counter updates
383 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
388 local_irq_save(flags);
389 __mod_zone_page_state(zone, item, delta);
390 local_irq_restore(flags);
392 EXPORT_SYMBOL(mod_zone_page_state);
394 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398 local_irq_save(flags);
399 __inc_zone_state(zone, item);
400 local_irq_restore(flags);
403 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
408 zone = page_zone(page);
409 local_irq_save(flags);
410 __inc_zone_state(zone, item);
411 local_irq_restore(flags);
413 EXPORT_SYMBOL(inc_zone_page_state);
415 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419 local_irq_save(flags);
420 __dec_zone_page_state(page, item);
421 local_irq_restore(flags);
423 EXPORT_SYMBOL(dec_zone_page_state);
428 * Fold a differential into the global counters.
429 * Returns the number of counters updated.
431 static int fold_diff(int *diff)
436 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
438 atomic_long_add(diff[i], &vm_stat[i]);
445 * Update the zone counters for the current cpu.
447 * Note that refresh_cpu_vm_stats strives to only access
448 * node local memory. The per cpu pagesets on remote zones are placed
449 * in the memory local to the processor using that pageset. So the
450 * loop over all zones will access a series of cachelines local to
453 * The call to zone_page_state_add updates the cachelines with the
454 * statistics in the remote zone struct as well as the global cachelines
455 * with the global counters. These could cause remote node cache line
456 * bouncing and will have to be only done when necessary.
458 * The function returns the number of global counters updated.
460 static int refresh_cpu_vm_stats(void)
464 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
467 for_each_populated_zone(zone) {
468 struct per_cpu_pageset __percpu *p = zone->pageset;
470 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
473 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
476 atomic_long_add(v, &zone->vm_stat[i]);
479 /* 3 seconds idle till flush */
480 __this_cpu_write(p->expire, 3);
487 * Deal with draining the remote pageset of this
490 * Check if there are pages remaining in this pageset
491 * if not then there is nothing to expire.
493 if (!__this_cpu_read(p->expire) ||
494 !__this_cpu_read(p->pcp.count))
498 * We never drain zones local to this processor.
500 if (zone_to_nid(zone) == numa_node_id()) {
501 __this_cpu_write(p->expire, 0);
505 if (__this_cpu_dec_return(p->expire))
508 if (__this_cpu_read(p->pcp.count)) {
509 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
514 changes += fold_diff(global_diff);
519 * Fold the data for an offline cpu into the global array.
520 * There cannot be any access by the offline cpu and therefore
521 * synchronization is simplified.
523 void cpu_vm_stats_fold(int cpu)
527 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
529 for_each_populated_zone(zone) {
530 struct per_cpu_pageset *p;
532 p = per_cpu_ptr(zone->pageset, cpu);
534 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
535 if (p->vm_stat_diff[i]) {
538 v = p->vm_stat_diff[i];
539 p->vm_stat_diff[i] = 0;
540 atomic_long_add(v, &zone->vm_stat[i]);
545 fold_diff(global_diff);
549 * this is only called if !populated_zone(zone), which implies no other users of
550 * pset->vm_stat_diff[] exsist.
552 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
556 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
557 if (pset->vm_stat_diff[i]) {
558 int v = pset->vm_stat_diff[i];
559 pset->vm_stat_diff[i] = 0;
560 atomic_long_add(v, &zone->vm_stat[i]);
561 atomic_long_add(v, &vm_stat[i]);
568 * zonelist = the list of zones passed to the allocator
569 * z = the zone from which the allocation occurred.
571 * Must be called with interrupts disabled.
573 * When __GFP_OTHER_NODE is set assume the node of the preferred
574 * zone is the local node. This is useful for daemons who allocate
575 * memory on behalf of other processes.
577 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
579 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
580 __inc_zone_state(z, NUMA_HIT);
582 __inc_zone_state(z, NUMA_MISS);
583 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
585 if (z->node == ((flags & __GFP_OTHER_NODE) ?
586 preferred_zone->node : numa_node_id()))
587 __inc_zone_state(z, NUMA_LOCAL);
589 __inc_zone_state(z, NUMA_OTHER);
593 #ifdef CONFIG_COMPACTION
595 struct contig_page_info {
596 unsigned long free_pages;
597 unsigned long free_blocks_total;
598 unsigned long free_blocks_suitable;
602 * Calculate the number of free pages in a zone, how many contiguous
603 * pages are free and how many are large enough to satisfy an allocation of
604 * the target size. Note that this function makes no attempt to estimate
605 * how many suitable free blocks there *might* be if MOVABLE pages were
606 * migrated. Calculating that is possible, but expensive and can be
607 * figured out from userspace
609 static void fill_contig_page_info(struct zone *zone,
610 unsigned int suitable_order,
611 struct contig_page_info *info)
615 info->free_pages = 0;
616 info->free_blocks_total = 0;
617 info->free_blocks_suitable = 0;
619 for (order = 0; order < MAX_ORDER; order++) {
620 unsigned long blocks;
622 /* Count number of free blocks */
623 blocks = zone->free_area[order].nr_free;
624 info->free_blocks_total += blocks;
626 /* Count free base pages */
627 info->free_pages += blocks << order;
629 /* Count the suitable free blocks */
630 if (order >= suitable_order)
631 info->free_blocks_suitable += blocks <<
632 (order - suitable_order);
637 * A fragmentation index only makes sense if an allocation of a requested
638 * size would fail. If that is true, the fragmentation index indicates
639 * whether external fragmentation or a lack of memory was the problem.
640 * The value can be used to determine if page reclaim or compaction
643 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
645 unsigned long requested = 1UL << order;
647 if (!info->free_blocks_total)
650 /* Fragmentation index only makes sense when a request would fail */
651 if (info->free_blocks_suitable)
655 * Index is between 0 and 1 so return within 3 decimal places
657 * 0 => allocation would fail due to lack of memory
658 * 1 => allocation would fail due to fragmentation
660 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
663 /* Same as __fragmentation index but allocs contig_page_info on stack */
664 int fragmentation_index(struct zone *zone, unsigned int order)
666 struct contig_page_info info;
668 fill_contig_page_info(zone, order, &info);
669 return __fragmentation_index(order, &info);
673 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
674 #include <linux/proc_fs.h>
675 #include <linux/seq_file.h>
677 static char * const migratetype_names[MIGRATE_TYPES] = {
685 #ifdef CONFIG_MEMORY_ISOLATION
690 static void *frag_start(struct seq_file *m, loff_t *pos)
694 for (pgdat = first_online_pgdat();
696 pgdat = next_online_pgdat(pgdat))
702 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
704 pg_data_t *pgdat = (pg_data_t *)arg;
707 return next_online_pgdat(pgdat);
710 static void frag_stop(struct seq_file *m, void *arg)
714 /* Walk all the zones in a node and print using a callback */
715 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
716 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
719 struct zone *node_zones = pgdat->node_zones;
722 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
723 if (!populated_zone(zone))
726 spin_lock_irqsave(&zone->lock, flags);
727 print(m, pgdat, zone);
728 spin_unlock_irqrestore(&zone->lock, flags);
733 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
734 #ifdef CONFIG_ZONE_DMA
735 #define TEXT_FOR_DMA(xx) xx "_dma",
737 #define TEXT_FOR_DMA(xx)
740 #ifdef CONFIG_ZONE_DMA32
741 #define TEXT_FOR_DMA32(xx) xx "_dma32",
743 #define TEXT_FOR_DMA32(xx)
746 #ifdef CONFIG_HIGHMEM
747 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
749 #define TEXT_FOR_HIGHMEM(xx)
752 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
753 TEXT_FOR_HIGHMEM(xx) xx "_movable",
755 const char * const vmstat_text[] = {
756 /* enum zone_stat_item countes */
770 "nr_slab_reclaimable",
771 "nr_slab_unreclaimable",
772 "nr_page_table_pages",
777 "nr_vmscan_immediate_reclaim",
794 "workingset_refault",
795 "workingset_activate",
796 "workingset_nodereclaim",
797 "nr_anon_transparent_hugepages",
800 /* enum writeback_stat_item counters */
801 "nr_dirty_threshold",
802 "nr_dirty_background_threshold",
804 #ifdef CONFIG_VM_EVENT_COUNTERS
805 /* enum vm_event_item counters */
811 TEXTS_FOR_ZONES("pgalloc")
820 TEXTS_FOR_ZONES("pgrefill")
821 TEXTS_FOR_ZONES("pgsteal_kswapd")
822 TEXTS_FOR_ZONES("pgsteal_direct")
823 TEXTS_FOR_ZONES("pgscan_kswapd")
824 TEXTS_FOR_ZONES("pgscan_direct")
825 "pgscan_direct_throttle",
828 "zone_reclaim_failed",
833 "kswapd_low_wmark_hit_quickly",
834 "kswapd_high_wmark_hit_quickly",
843 #ifdef CONFIG_NUMA_BALANCING
845 "numa_huge_pte_updates",
847 "numa_hint_faults_local",
848 "numa_pages_migrated",
850 #ifdef CONFIG_MIGRATION
854 #ifdef CONFIG_COMPACTION
855 "compact_migrate_scanned",
856 "compact_free_scanned",
863 #ifdef CONFIG_HUGETLB_PAGE
864 "htlb_buddy_alloc_success",
865 "htlb_buddy_alloc_fail",
867 "unevictable_pgs_culled",
868 "unevictable_pgs_scanned",
869 "unevictable_pgs_rescued",
870 "unevictable_pgs_mlocked",
871 "unevictable_pgs_munlocked",
872 "unevictable_pgs_cleared",
873 "unevictable_pgs_stranded",
875 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
877 "thp_fault_fallback",
878 "thp_collapse_alloc",
879 "thp_collapse_alloc_failed",
881 "thp_zero_page_alloc",
882 "thp_zero_page_alloc_failed",
884 #ifdef CONFIG_MEMORY_BALLOON
887 #ifdef CONFIG_BALLOON_COMPACTION
890 #endif /* CONFIG_MEMORY_BALLOON */
891 #ifdef CONFIG_DEBUG_TLBFLUSH
893 "nr_tlb_remote_flush",
894 "nr_tlb_remote_flush_received",
895 #endif /* CONFIG_SMP */
896 "nr_tlb_local_flush_all",
897 "nr_tlb_local_flush_one",
898 #endif /* CONFIG_DEBUG_TLBFLUSH */
900 #ifdef CONFIG_DEBUG_VM_VMACACHE
901 "vmacache_find_calls",
902 "vmacache_find_hits",
904 #endif /* CONFIG_VM_EVENTS_COUNTERS */
906 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
909 #ifdef CONFIG_PROC_FS
910 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
915 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
916 for (order = 0; order < MAX_ORDER; ++order)
917 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
922 * This walks the free areas for each zone.
924 static int frag_show(struct seq_file *m, void *arg)
926 pg_data_t *pgdat = (pg_data_t *)arg;
927 walk_zones_in_node(m, pgdat, frag_show_print);
931 static void pagetypeinfo_showfree_print(struct seq_file *m,
932 pg_data_t *pgdat, struct zone *zone)
936 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
937 seq_printf(m, "Node %4d, zone %8s, type %12s ",
940 migratetype_names[mtype]);
941 for (order = 0; order < MAX_ORDER; ++order) {
942 unsigned long freecount = 0;
943 struct free_area *area;
944 struct list_head *curr;
946 area = &(zone->free_area[order]);
948 list_for_each(curr, &area->free_list[mtype])
950 seq_printf(m, "%6lu ", freecount);
956 /* Print out the free pages at each order for each migatetype */
957 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
960 pg_data_t *pgdat = (pg_data_t *)arg;
963 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
964 for (order = 0; order < MAX_ORDER; ++order)
965 seq_printf(m, "%6d ", order);
968 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
973 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
974 pg_data_t *pgdat, struct zone *zone)
978 unsigned long start_pfn = zone->zone_start_pfn;
979 unsigned long end_pfn = zone_end_pfn(zone);
980 unsigned long count[MIGRATE_TYPES] = { 0, };
982 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
988 page = pfn_to_page(pfn);
990 /* Watch for unexpected holes punched in the memmap */
991 if (!memmap_valid_within(pfn, page, zone))
994 mtype = get_pageblock_migratetype(page);
996 if (mtype < MIGRATE_TYPES)
1001 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1002 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1003 seq_printf(m, "%12lu ", count[mtype]);
1007 /* Print out the free pages at each order for each migratetype */
1008 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1011 pg_data_t *pgdat = (pg_data_t *)arg;
1013 seq_printf(m, "\n%-23s", "Number of blocks type ");
1014 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1015 seq_printf(m, "%12s ", migratetype_names[mtype]);
1017 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1022 #ifdef CONFIG_PAGE_OWNER
1023 static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1028 struct page_ext *page_ext;
1029 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1030 unsigned long end_pfn = pfn + zone->spanned_pages;
1031 unsigned long count[MIGRATE_TYPES] = { 0, };
1032 int pageblock_mt, page_mt;
1035 /* Scan block by block. First and last block may be incomplete */
1036 pfn = zone->zone_start_pfn;
1039 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1040 * a zone boundary, it will be double counted between zones. This does
1041 * not matter as the mixed block count will still be correct
1043 for (; pfn < end_pfn; ) {
1044 if (!pfn_valid(pfn)) {
1045 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1049 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1050 block_end_pfn = min(block_end_pfn, end_pfn);
1052 page = pfn_to_page(pfn);
1053 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1055 for (; pfn < block_end_pfn; pfn++) {
1056 if (!pfn_valid_within(pfn))
1059 page = pfn_to_page(pfn);
1060 if (PageBuddy(page)) {
1061 pfn += (1UL << page_order(page)) - 1;
1065 if (PageReserved(page))
1068 page_ext = lookup_page_ext(page);
1070 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1073 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1074 if (pageblock_mt != page_mt) {
1075 if (is_migrate_cma(pageblock_mt))
1076 count[MIGRATE_MOVABLE]++;
1078 count[pageblock_mt]++;
1080 pfn = block_end_pfn;
1083 pfn += (1UL << page_ext->order) - 1;
1088 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1089 for (i = 0; i < MIGRATE_TYPES; i++)
1090 seq_printf(m, "%12lu ", count[i]);
1093 #endif /* CONFIG_PAGE_OWNER */
1096 * Print out the number of pageblocks for each migratetype that contain pages
1097 * of other types. This gives an indication of how well fallbacks are being
1098 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1099 * to determine what is going on
1101 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1103 #ifdef CONFIG_PAGE_OWNER
1106 if (!page_owner_inited)
1109 drain_all_pages(NULL);
1111 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1112 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1113 seq_printf(m, "%12s ", migratetype_names[mtype]);
1116 walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1117 #endif /* CONFIG_PAGE_OWNER */
1121 * This prints out statistics in relation to grouping pages by mobility.
1122 * It is expensive to collect so do not constantly read the file.
1124 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1126 pg_data_t *pgdat = (pg_data_t *)arg;
1128 /* check memoryless node */
1129 if (!node_state(pgdat->node_id, N_MEMORY))
1132 seq_printf(m, "Page block order: %d\n", pageblock_order);
1133 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1135 pagetypeinfo_showfree(m, pgdat);
1136 pagetypeinfo_showblockcount(m, pgdat);
1137 pagetypeinfo_showmixedcount(m, pgdat);
1142 static const struct seq_operations fragmentation_op = {
1143 .start = frag_start,
1149 static int fragmentation_open(struct inode *inode, struct file *file)
1151 return seq_open(file, &fragmentation_op);
1154 static const struct file_operations fragmentation_file_operations = {
1155 .open = fragmentation_open,
1157 .llseek = seq_lseek,
1158 .release = seq_release,
1161 static const struct seq_operations pagetypeinfo_op = {
1162 .start = frag_start,
1165 .show = pagetypeinfo_show,
1168 static int pagetypeinfo_open(struct inode *inode, struct file *file)
1170 return seq_open(file, &pagetypeinfo_op);
1173 static const struct file_operations pagetypeinfo_file_ops = {
1174 .open = pagetypeinfo_open,
1176 .llseek = seq_lseek,
1177 .release = seq_release,
1180 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1184 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1194 zone_page_state(zone, NR_FREE_PAGES),
1195 min_wmark_pages(zone),
1196 low_wmark_pages(zone),
1197 high_wmark_pages(zone),
1198 zone_page_state(zone, NR_PAGES_SCANNED),
1199 zone->spanned_pages,
1200 zone->present_pages,
1201 zone->managed_pages);
1203 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1204 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1205 zone_page_state(zone, i));
1208 "\n protection: (%ld",
1209 zone->lowmem_reserve[0]);
1210 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1211 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1215 for_each_online_cpu(i) {
1216 struct per_cpu_pageset *pageset;
1218 pageset = per_cpu_ptr(zone->pageset, i);
1227 pageset->pcp.batch);
1229 seq_printf(m, "\n vm stats threshold: %d",
1230 pageset->stat_threshold);
1234 "\n all_unreclaimable: %u"
1236 "\n inactive_ratio: %u",
1237 !zone_reclaimable(zone),
1238 zone->zone_start_pfn,
1239 zone->inactive_ratio);
1244 * Output information about zones in @pgdat.
1246 static int zoneinfo_show(struct seq_file *m, void *arg)
1248 pg_data_t *pgdat = (pg_data_t *)arg;
1249 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1253 static const struct seq_operations zoneinfo_op = {
1254 .start = frag_start, /* iterate over all zones. The same as in
1258 .show = zoneinfo_show,
1261 static int zoneinfo_open(struct inode *inode, struct file *file)
1263 return seq_open(file, &zoneinfo_op);
1266 static const struct file_operations proc_zoneinfo_file_operations = {
1267 .open = zoneinfo_open,
1269 .llseek = seq_lseek,
1270 .release = seq_release,
1273 enum writeback_stat_item {
1275 NR_DIRTY_BG_THRESHOLD,
1276 NR_VM_WRITEBACK_STAT_ITEMS,
1279 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1282 int i, stat_items_size;
1284 if (*pos >= ARRAY_SIZE(vmstat_text))
1286 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1287 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1289 #ifdef CONFIG_VM_EVENT_COUNTERS
1290 stat_items_size += sizeof(struct vm_event_state);
1293 v = kmalloc(stat_items_size, GFP_KERNEL);
1296 return ERR_PTR(-ENOMEM);
1297 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1298 v[i] = global_page_state(i);
1299 v += NR_VM_ZONE_STAT_ITEMS;
1301 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1302 v + NR_DIRTY_THRESHOLD);
1303 v += NR_VM_WRITEBACK_STAT_ITEMS;
1305 #ifdef CONFIG_VM_EVENT_COUNTERS
1307 v[PGPGIN] /= 2; /* sectors -> kbytes */
1310 return (unsigned long *)m->private + *pos;
1313 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1316 if (*pos >= ARRAY_SIZE(vmstat_text))
1318 return (unsigned long *)m->private + *pos;
1321 static int vmstat_show(struct seq_file *m, void *arg)
1323 unsigned long *l = arg;
1324 unsigned long off = l - (unsigned long *)m->private;
1326 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1330 static void vmstat_stop(struct seq_file *m, void *arg)
1336 static const struct seq_operations vmstat_op = {
1337 .start = vmstat_start,
1338 .next = vmstat_next,
1339 .stop = vmstat_stop,
1340 .show = vmstat_show,
1343 static int vmstat_open(struct inode *inode, struct file *file)
1345 return seq_open(file, &vmstat_op);
1348 static const struct file_operations proc_vmstat_file_operations = {
1349 .open = vmstat_open,
1351 .llseek = seq_lseek,
1352 .release = seq_release,
1354 #endif /* CONFIG_PROC_FS */
1357 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1358 int sysctl_stat_interval __read_mostly = HZ;
1359 static cpumask_var_t cpu_stat_off;
1361 static void vmstat_update(struct work_struct *w)
1363 if (refresh_cpu_vm_stats())
1365 * Counters were updated so we expect more updates
1366 * to occur in the future. Keep on running the
1367 * update worker thread.
1369 schedule_delayed_work(this_cpu_ptr(&vmstat_work),
1370 round_jiffies_relative(sysctl_stat_interval));
1373 * We did not update any counters so the app may be in
1374 * a mode where it does not cause counter updates.
1375 * We may be uselessly running vmstat_update.
1376 * Defer the checking for differentials to the
1377 * shepherd thread on a different processor.
1381 * Shepherd work thread does not race since it never
1382 * changes the bit if its zero but the cpu
1383 * online / off line code may race if
1384 * worker threads are still allowed during
1385 * shutdown / startup.
1387 r = cpumask_test_and_set_cpu(smp_processor_id(),
1394 * Check if the diffs for a certain cpu indicate that
1395 * an update is needed.
1397 static bool need_update(int cpu)
1401 for_each_populated_zone(zone) {
1402 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1404 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1406 * The fast way of checking if there are any vmstat diffs.
1407 * This works because the diffs are byte sized items.
1409 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1418 * Shepherd worker thread that checks the
1419 * differentials of processors that have their worker
1420 * threads for vm statistics updates disabled because of
1423 static void vmstat_shepherd(struct work_struct *w);
1425 static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
1427 static void vmstat_shepherd(struct work_struct *w)
1432 /* Check processors whose vmstat worker threads have been disabled */
1433 for_each_cpu(cpu, cpu_stat_off)
1434 if (need_update(cpu) &&
1435 cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1437 schedule_delayed_work_on(cpu, &per_cpu(vmstat_work, cpu),
1438 __round_jiffies_relative(sysctl_stat_interval, cpu));
1442 schedule_delayed_work(&shepherd,
1443 round_jiffies_relative(sysctl_stat_interval));
1447 static void __init start_shepherd_timer(void)
1451 for_each_possible_cpu(cpu)
1452 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1455 if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1457 cpumask_copy(cpu_stat_off, cpu_online_mask);
1459 schedule_delayed_work(&shepherd,
1460 round_jiffies_relative(sysctl_stat_interval));
1463 static void vmstat_cpu_dead(int node)
1468 for_each_online_cpu(cpu)
1469 if (cpu_to_node(cpu) == node)
1472 node_clear_state(node, N_CPU);
1478 * Use the cpu notifier to insure that the thresholds are recalculated
1481 static int vmstat_cpuup_callback(struct notifier_block *nfb,
1482 unsigned long action,
1485 long cpu = (long)hcpu;
1489 case CPU_ONLINE_FROZEN:
1490 refresh_zone_stat_thresholds();
1491 node_set_state(cpu_to_node(cpu), N_CPU);
1492 cpumask_set_cpu(cpu, cpu_stat_off);
1494 case CPU_DOWN_PREPARE:
1495 case CPU_DOWN_PREPARE_FROZEN:
1496 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1497 cpumask_clear_cpu(cpu, cpu_stat_off);
1499 case CPU_DOWN_FAILED:
1500 case CPU_DOWN_FAILED_FROZEN:
1501 cpumask_set_cpu(cpu, cpu_stat_off);
1504 case CPU_DEAD_FROZEN:
1505 refresh_zone_stat_thresholds();
1506 vmstat_cpu_dead(cpu_to_node(cpu));
1514 static struct notifier_block vmstat_notifier =
1515 { &vmstat_cpuup_callback, NULL, 0 };
1518 static int __init setup_vmstat(void)
1521 cpu_notifier_register_begin();
1522 __register_cpu_notifier(&vmstat_notifier);
1524 start_shepherd_timer();
1525 cpu_notifier_register_done();
1527 #ifdef CONFIG_PROC_FS
1528 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1529 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1530 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1531 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1535 module_init(setup_vmstat)
1537 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1538 #include <linux/debugfs.h>
1542 * Return an index indicating how much of the available free memory is
1543 * unusable for an allocation of the requested size.
1545 static int unusable_free_index(unsigned int order,
1546 struct contig_page_info *info)
1548 /* No free memory is interpreted as all free memory is unusable */
1549 if (info->free_pages == 0)
1553 * Index should be a value between 0 and 1. Return a value to 3
1556 * 0 => no fragmentation
1557 * 1 => high fragmentation
1559 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1563 static void unusable_show_print(struct seq_file *m,
1564 pg_data_t *pgdat, struct zone *zone)
1568 struct contig_page_info info;
1570 seq_printf(m, "Node %d, zone %8s ",
1573 for (order = 0; order < MAX_ORDER; ++order) {
1574 fill_contig_page_info(zone, order, &info);
1575 index = unusable_free_index(order, &info);
1576 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1583 * Display unusable free space index
1585 * The unusable free space index measures how much of the available free
1586 * memory cannot be used to satisfy an allocation of a given size and is a
1587 * value between 0 and 1. The higher the value, the more of free memory is
1588 * unusable and by implication, the worse the external fragmentation is. This
1589 * can be expressed as a percentage by multiplying by 100.
1591 static int unusable_show(struct seq_file *m, void *arg)
1593 pg_data_t *pgdat = (pg_data_t *)arg;
1595 /* check memoryless node */
1596 if (!node_state(pgdat->node_id, N_MEMORY))
1599 walk_zones_in_node(m, pgdat, unusable_show_print);
1604 static const struct seq_operations unusable_op = {
1605 .start = frag_start,
1608 .show = unusable_show,
1611 static int unusable_open(struct inode *inode, struct file *file)
1613 return seq_open(file, &unusable_op);
1616 static const struct file_operations unusable_file_ops = {
1617 .open = unusable_open,
1619 .llseek = seq_lseek,
1620 .release = seq_release,
1623 static void extfrag_show_print(struct seq_file *m,
1624 pg_data_t *pgdat, struct zone *zone)
1629 /* Alloc on stack as interrupts are disabled for zone walk */
1630 struct contig_page_info info;
1632 seq_printf(m, "Node %d, zone %8s ",
1635 for (order = 0; order < MAX_ORDER; ++order) {
1636 fill_contig_page_info(zone, order, &info);
1637 index = __fragmentation_index(order, &info);
1638 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1645 * Display fragmentation index for orders that allocations would fail for
1647 static int extfrag_show(struct seq_file *m, void *arg)
1649 pg_data_t *pgdat = (pg_data_t *)arg;
1651 walk_zones_in_node(m, pgdat, extfrag_show_print);
1656 static const struct seq_operations extfrag_op = {
1657 .start = frag_start,
1660 .show = extfrag_show,
1663 static int extfrag_open(struct inode *inode, struct file *file)
1665 return seq_open(file, &extfrag_op);
1668 static const struct file_operations extfrag_file_ops = {
1669 .open = extfrag_open,
1671 .llseek = seq_lseek,
1672 .release = seq_release,
1675 static int __init extfrag_debug_init(void)
1677 struct dentry *extfrag_debug_root;
1679 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1680 if (!extfrag_debug_root)
1683 if (!debugfs_create_file("unusable_index", 0444,
1684 extfrag_debug_root, NULL, &unusable_file_ops))
1687 if (!debugfs_create_file("extfrag_index", 0444,
1688 extfrag_debug_root, NULL, &extfrag_file_ops))
1693 debugfs_remove_recursive(extfrag_debug_root);
1697 module_init(extfrag_debug_init);