Linux 6.17-rc6
[linux-2.6-block.git] / mm / vmstat.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
f6ac2354
CL
2/*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
2244b95a
CL
7 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
786d5cc2 10 * Christoph Lameter <cl@gentwo.org>
7cc36bbd 11 * Copyright (C) 2008-2014 Christoph Lameter
f6ac2354 12 */
8f32f7e5 13#include <linux/fs.h>
f6ac2354 14#include <linux/mm.h>
4e950f6f 15#include <linux/err.h>
2244b95a 16#include <linux/module.h>
5a0e3ad6 17#include <linux/slab.h>
df9ecaba 18#include <linux/cpu.h>
7cc36bbd 19#include <linux/cpumask.h>
c748e134 20#include <linux/vmstat.h>
3c486871
AM
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/debugfs.h>
e8edc6e0 24#include <linux/sched.h>
f1a5ab12 25#include <linux/math64.h>
79da826a 26#include <linux/writeback.h>
36deb0be 27#include <linux/compaction.h>
6e543d57 28#include <linux/mm_inline.h>
48c96a36 29#include <linux/page_owner.h>
be5e015d 30#include <linux/sched/isolation.h>
6e543d57
LD
31
32#include "internal.h"
f6ac2354 33
b8974b89 34#ifdef CONFIG_PROC_FS
4518085e 35#ifdef CONFIG_NUMA
b8974b89
KY
36#define ENABLE_NUMA_STAT 1
37static int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
4518085e
KW
38
39/* zero numa counters within a zone */
40static void zero_zone_numa_counters(struct zone *zone)
41{
42 int item, cpu;
43
f19298b9
MG
44 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
45 atomic_long_set(&zone->vm_numa_event[item], 0);
46 for_each_online_cpu(cpu) {
47 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
4518085e 48 = 0;
f19298b9 49 }
4518085e
KW
50 }
51}
52
53/* zero numa counters of all the populated zones */
54static void zero_zones_numa_counters(void)
55{
56 struct zone *zone;
57
58 for_each_populated_zone(zone)
59 zero_zone_numa_counters(zone);
60}
61
62/* zero global numa counters */
63static void zero_global_numa_counters(void)
64{
65 int item;
66
f19298b9
MG
67 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
68 atomic_long_set(&vm_numa_event[item], 0);
4518085e
KW
69}
70
71static void invalid_numa_statistics(void)
72{
73 zero_zones_numa_counters();
74 zero_global_numa_counters();
75}
76
77static DEFINE_MUTEX(vm_numa_stat_lock);
78
b8974b89 79static int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
32927393 80 void *buffer, size_t *length, loff_t *ppos)
4518085e
KW
81{
82 int ret, oldval;
83
84 mutex_lock(&vm_numa_stat_lock);
85 if (write)
86 oldval = sysctl_vm_numa_stat;
87 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
88 if (ret || !write)
89 goto out;
90
91 if (oldval == sysctl_vm_numa_stat)
92 goto out;
93 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
94 static_branch_enable(&vm_numa_stat_key);
95 pr_info("enable numa statistics\n");
96 } else {
97 static_branch_disable(&vm_numa_stat_key);
98 invalid_numa_statistics();
99 pr_info("disable numa statistics, and clear numa counters\n");
100 }
101
102out:
103 mutex_unlock(&vm_numa_stat_lock);
104 return ret;
105}
106#endif
b8974b89 107#endif /* CONFIG_PROC_FS */
4518085e 108
f8891e5e
CL
109#ifdef CONFIG_VM_EVENT_COUNTERS
110DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
111EXPORT_PER_CPU_SYMBOL(vm_event_states);
112
31f961a8 113static void sum_vm_events(unsigned long *ret)
f8891e5e 114{
9eccf2a8 115 int cpu;
f8891e5e
CL
116 int i;
117
118 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
119
31f961a8 120 for_each_online_cpu(cpu) {
f8891e5e
CL
121 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
122
f8891e5e
CL
123 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
124 ret[i] += this->event[i];
125 }
126}
127
128/*
129 * Accumulate the vm event counters across all CPUs.
130 * The result is unavoidably approximate - it can change
131 * during and after execution of this function.
132*/
133void all_vm_events(unsigned long *ret)
134{
7625eccd 135 cpus_read_lock();
31f961a8 136 sum_vm_events(ret);
7625eccd 137 cpus_read_unlock();
f8891e5e 138}
32dd66fc 139EXPORT_SYMBOL_GPL(all_vm_events);
f8891e5e 140
f8891e5e
CL
141/*
142 * Fold the foreign cpu events into our own.
143 *
144 * This is adding to the events on one processor
145 * but keeps the global counts constant.
146 */
147void vm_events_fold_cpu(int cpu)
148{
149 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
150 int i;
151
152 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
153 count_vm_events(i, fold_state->event[i]);
154 fold_state->event[i] = 0;
155 }
156}
f8891e5e
CL
157
158#endif /* CONFIG_VM_EVENT_COUNTERS */
159
2244b95a
CL
160/*
161 * Manage combined zone based / global counters
162 *
163 * vm_stat contains the global counters
164 */
75ef7184
MG
165atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
166atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
f19298b9 167atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
75ef7184
MG
168EXPORT_SYMBOL(vm_zone_stat);
169EXPORT_SYMBOL(vm_node_stat);
2244b95a 170
ebeac3ea
GU
171#ifdef CONFIG_NUMA
172static void fold_vm_zone_numa_events(struct zone *zone)
173{
174 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
175 int cpu;
176 enum numa_stat_item item;
177
178 for_each_online_cpu(cpu) {
179 struct per_cpu_zonestat *pzstats;
180
181 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
182 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
183 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
184 }
185
186 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
187 zone_numa_event_add(zone_numa_events[item], zone, item);
188}
189
190void fold_vm_numa_events(void)
191{
192 struct zone *zone;
193
194 for_each_populated_zone(zone)
195 fold_vm_zone_numa_events(zone);
196}
197#endif
198
2244b95a
CL
199#ifdef CONFIG_SMP
200
b44129b3 201int calculate_pressure_threshold(struct zone *zone)
88f5acf8
MG
202{
203 int threshold;
204 int watermark_distance;
205
206 /*
207 * As vmstats are not up to date, there is drift between the estimated
208 * and real values. For high thresholds and a high number of CPUs, it
209 * is possible for the min watermark to be breached while the estimated
210 * value looks fine. The pressure threshold is a reduced value such
211 * that even the maximum amount of drift will not accidentally breach
212 * the min watermark
213 */
214 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
215 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
216
217 /*
218 * Maximum threshold is 125
219 */
220 threshold = min(125, threshold);
221
222 return threshold;
223}
224
b44129b3 225int calculate_normal_threshold(struct zone *zone)
df9ecaba
CL
226{
227 int threshold;
228 int mem; /* memory in 128 MB units */
229
230 /*
231 * The threshold scales with the number of processors and the amount
232 * of memory per zone. More memory means that we can defer updates for
233 * longer, more processors could lead to more contention.
234 * fls() is used to have a cheap way of logarithmic scaling.
235 *
236 * Some sample thresholds:
237 *
ea15ba17 238 * Threshold Processors (fls) Zonesize fls(mem)+1
df9ecaba
CL
239 * ------------------------------------------------------------------
240 * 8 1 1 0.9-1 GB 4
241 * 16 2 2 0.9-1 GB 4
242 * 20 2 2 1-2 GB 5
243 * 24 2 2 2-4 GB 6
244 * 28 2 2 4-8 GB 7
245 * 32 2 2 8-16 GB 8
246 * 4 2 2 <128M 1
247 * 30 4 3 2-4 GB 5
248 * 48 4 3 8-16 GB 8
249 * 32 8 4 1-2 GB 4
250 * 32 8 4 0.9-1GB 4
251 * 10 16 5 <128M 1
252 * 40 16 5 900M 4
253 * 70 64 7 2-4 GB 5
254 * 84 64 7 4-8 GB 6
255 * 108 512 9 4-8 GB 6
256 * 125 1024 10 8-16 GB 8
257 * 125 1024 10 16-32 GB 9
258 */
259
9705bea5 260 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
df9ecaba
CL
261
262 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
263
264 /*
265 * Maximum threshold is 125
266 */
267 threshold = min(125, threshold);
268
269 return threshold;
270}
2244b95a
CL
271
272/*
df9ecaba 273 * Refresh the thresholds for each zone.
2244b95a 274 */
a6cccdc3 275void refresh_zone_stat_thresholds(void)
2244b95a 276{
75ef7184 277 struct pglist_data *pgdat;
df9ecaba
CL
278 struct zone *zone;
279 int cpu;
280 int threshold;
281
75ef7184
MG
282 /* Zero current pgdat thresholds */
283 for_each_online_pgdat(pgdat) {
284 for_each_online_cpu(cpu) {
285 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
286 }
287 }
288
ee99c71c 289 for_each_populated_zone(zone) {
75ef7184 290 struct pglist_data *pgdat = zone->zone_pgdat;
aa454840
CL
291 unsigned long max_drift, tolerate_drift;
292
b44129b3 293 threshold = calculate_normal_threshold(zone);
df9ecaba 294
75ef7184
MG
295 for_each_online_cpu(cpu) {
296 int pgdat_threshold;
297
28f836b6 298 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
99dcc3e5 299 = threshold;
1d90ca89 300
75ef7184
MG
301 /* Base nodestat threshold on the largest populated zone. */
302 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
303 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
304 = max(threshold, pgdat_threshold);
305 }
306
aa454840
CL
307 /*
308 * Only set percpu_drift_mark if there is a danger that
309 * NR_FREE_PAGES reports the low watermark is ok when in fact
310 * the min watermark could be breached by an allocation
311 */
312 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
313 max_drift = num_online_cpus() * threshold;
314 if (max_drift > tolerate_drift)
315 zone->percpu_drift_mark = high_wmark_pages(zone) +
316 max_drift;
df9ecaba 317 }
2244b95a
CL
318}
319
b44129b3
MG
320void set_pgdat_percpu_threshold(pg_data_t *pgdat,
321 int (*calculate_pressure)(struct zone *))
88f5acf8
MG
322{
323 struct zone *zone;
324 int cpu;
325 int threshold;
326 int i;
327
88f5acf8
MG
328 for (i = 0; i < pgdat->nr_zones; i++) {
329 zone = &pgdat->node_zones[i];
330 if (!zone->percpu_drift_mark)
331 continue;
332
b44129b3 333 threshold = (*calculate_pressure)(zone);
1d90ca89 334 for_each_online_cpu(cpu)
28f836b6 335 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
88f5acf8
MG
336 = threshold;
337 }
88f5acf8
MG
338}
339
2244b95a 340/*
bea04b07
JZ
341 * For use when we know that interrupts are disabled,
342 * or when we know that preemption is disabled and that
343 * particular counter cannot be updated from interrupt context.
2244b95a
CL
344 */
345void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
6cdb18ad 346 long delta)
2244b95a 347{
28f836b6 348 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
12938a92 349 s8 __percpu *p = pcp->vm_stat_diff + item;
2244b95a 350 long x;
12938a92
CL
351 long t;
352
c68ed794
IM
353 /*
354 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
355 * atomicity is provided by IRQs being disabled -- either explicitly
356 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
357 * CPU migrations and preemption potentially corrupts a counter so
358 * disable preemption.
359 */
7a025e91 360 preempt_disable_nested();
c68ed794 361
12938a92 362 x = delta + __this_cpu_read(*p);
2244b95a 363
12938a92 364 t = __this_cpu_read(pcp->stat_threshold);
2244b95a 365
40610076 366 if (unlikely(abs(x) > t)) {
2244b95a
CL
367 zone_page_state_add(x, zone, item);
368 x = 0;
369 }
12938a92 370 __this_cpu_write(*p, x);
c68ed794 371
7a025e91 372 preempt_enable_nested();
2244b95a
CL
373}
374EXPORT_SYMBOL(__mod_zone_page_state);
375
75ef7184
MG
376void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
377 long delta)
378{
379 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
380 s8 __percpu *p = pcp->vm_node_stat_diff + item;
381 long x;
382 long t;
383
ea426c2a 384 if (vmstat_item_in_bytes(item)) {
629484ae
JW
385 /*
386 * Only cgroups use subpage accounting right now; at
387 * the global level, these items still change in
388 * multiples of whole pages. Store them as pages
389 * internally to keep the per-cpu counters compact.
390 */
ea426c2a
RG
391 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
392 delta >>= PAGE_SHIFT;
393 }
394
c68ed794 395 /* See __mod_node_page_state */
7a025e91 396 preempt_disable_nested();
c68ed794 397
75ef7184
MG
398 x = delta + __this_cpu_read(*p);
399
400 t = __this_cpu_read(pcp->stat_threshold);
401
40610076 402 if (unlikely(abs(x) > t)) {
75ef7184
MG
403 node_page_state_add(x, pgdat, item);
404 x = 0;
405 }
406 __this_cpu_write(*p, x);
c68ed794 407
7a025e91 408 preempt_enable_nested();
75ef7184
MG
409}
410EXPORT_SYMBOL(__mod_node_page_state);
411
2244b95a
CL
412/*
413 * Optimized increment and decrement functions.
414 *
415 * These are only for a single page and therefore can take a struct page *
416 * argument instead of struct zone *. This allows the inclusion of the code
417 * generated for page_zone(page) into the optimized functions.
418 *
419 * No overflow check is necessary and therefore the differential can be
420 * incremented or decremented in place which may allow the compilers to
421 * generate better code.
2244b95a
CL
422 * The increment or decrement is known and therefore one boundary check can
423 * be omitted.
424 *
df9ecaba
CL
425 * NOTE: These functions are very performance sensitive. Change only
426 * with care.
427 *
2244b95a
CL
428 * Some processors have inc/dec instructions that are atomic vs an interrupt.
429 * However, the code must first determine the differential location in a zone
430 * based on the processor number and then inc/dec the counter. There is no
431 * guarantee without disabling preemption that the processor will not change
432 * in between and therefore the atomicity vs. interrupt cannot be exploited
433 * in a useful way here.
434 */
c8785385 435void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a 436{
28f836b6 437 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
12938a92
CL
438 s8 __percpu *p = pcp->vm_stat_diff + item;
439 s8 v, t;
2244b95a 440
c68ed794 441 /* See __mod_node_page_state */
7a025e91 442 preempt_disable_nested();
c68ed794 443
908ee0f1 444 v = __this_cpu_inc_return(*p);
12938a92
CL
445 t = __this_cpu_read(pcp->stat_threshold);
446 if (unlikely(v > t)) {
447 s8 overstep = t >> 1;
df9ecaba 448
12938a92
CL
449 zone_page_state_add(v + overstep, zone, item);
450 __this_cpu_write(*p, -overstep);
2244b95a 451 }
c68ed794 452
7a025e91 453 preempt_enable_nested();
2244b95a 454}
ca889e6c 455
75ef7184
MG
456void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
457{
458 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
459 s8 __percpu *p = pcp->vm_node_stat_diff + item;
460 s8 v, t;
461
ea426c2a
RG
462 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
463
c68ed794 464 /* See __mod_node_page_state */
7a025e91 465 preempt_disable_nested();
c68ed794 466
75ef7184
MG
467 v = __this_cpu_inc_return(*p);
468 t = __this_cpu_read(pcp->stat_threshold);
469 if (unlikely(v > t)) {
470 s8 overstep = t >> 1;
471
472 node_page_state_add(v + overstep, pgdat, item);
473 __this_cpu_write(*p, -overstep);
474 }
c68ed794 475
7a025e91 476 preempt_enable_nested();
75ef7184
MG
477}
478
ca889e6c
CL
479void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
480{
481 __inc_zone_state(page_zone(page), item);
482}
2244b95a
CL
483EXPORT_SYMBOL(__inc_zone_page_state);
484
75ef7184
MG
485void __inc_node_page_state(struct page *page, enum node_stat_item item)
486{
487 __inc_node_state(page_pgdat(page), item);
488}
489EXPORT_SYMBOL(__inc_node_page_state);
490
c8785385 491void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a 492{
28f836b6 493 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
12938a92
CL
494 s8 __percpu *p = pcp->vm_stat_diff + item;
495 s8 v, t;
2244b95a 496
c68ed794 497 /* See __mod_node_page_state */
7a025e91 498 preempt_disable_nested();
c68ed794 499
908ee0f1 500 v = __this_cpu_dec_return(*p);
12938a92
CL
501 t = __this_cpu_read(pcp->stat_threshold);
502 if (unlikely(v < - t)) {
503 s8 overstep = t >> 1;
2244b95a 504
12938a92
CL
505 zone_page_state_add(v - overstep, zone, item);
506 __this_cpu_write(*p, overstep);
2244b95a 507 }
c68ed794 508
7a025e91 509 preempt_enable_nested();
2244b95a 510}
c8785385 511
75ef7184
MG
512void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
513{
514 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
515 s8 __percpu *p = pcp->vm_node_stat_diff + item;
516 s8 v, t;
517
ea426c2a
RG
518 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
519
c68ed794 520 /* See __mod_node_page_state */
7a025e91 521 preempt_disable_nested();
c68ed794 522
75ef7184
MG
523 v = __this_cpu_dec_return(*p);
524 t = __this_cpu_read(pcp->stat_threshold);
525 if (unlikely(v < - t)) {
526 s8 overstep = t >> 1;
527
528 node_page_state_add(v - overstep, pgdat, item);
529 __this_cpu_write(*p, overstep);
530 }
c68ed794 531
7a025e91 532 preempt_enable_nested();
75ef7184
MG
533}
534
c8785385
CL
535void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
536{
537 __dec_zone_state(page_zone(page), item);
538}
2244b95a
CL
539EXPORT_SYMBOL(__dec_zone_page_state);
540
75ef7184
MG
541void __dec_node_page_state(struct page *page, enum node_stat_item item)
542{
543 __dec_node_state(page_pgdat(page), item);
544}
545EXPORT_SYMBOL(__dec_node_page_state);
546
4156153c 547#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
7c839120
CL
548/*
549 * If we have cmpxchg_local support then we do not need to incur the overhead
550 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
551 *
552 * mod_state() modifies the zone counter state through atomic per cpu
553 * operations.
554 *
555 * Overstep mode specifies how overstep should handled:
556 * 0 No overstepping
557 * 1 Overstepping half of threshold
558 * -1 Overstepping minus half of threshold
559*/
75ef7184
MG
560static inline void mod_zone_state(struct zone *zone,
561 enum zone_stat_item item, long delta, int overstep_mode)
7c839120 562{
28f836b6 563 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
7c839120 564 s8 __percpu *p = pcp->vm_stat_diff + item;
77cd8148
UB
565 long n, t, z;
566 s8 o;
7c839120 567
77cd8148 568 o = this_cpu_read(*p);
7c839120
CL
569 do {
570 z = 0; /* overflow to zone counters */
571
572 /*
573 * The fetching of the stat_threshold is racy. We may apply
574 * a counter threshold to the wrong the cpu if we get
d3bc2367
CL
575 * rescheduled while executing here. However, the next
576 * counter update will apply the threshold again and
577 * therefore bring the counter under the threshold again.
578 *
579 * Most of the time the thresholds are the same anyways
580 * for all cpus in a zone.
7c839120
CL
581 */
582 t = this_cpu_read(pcp->stat_threshold);
583
77cd8148 584 n = delta + (long)o;
7c839120 585
40610076 586 if (abs(n) > t) {
7c839120
CL
587 int os = overstep_mode * (t >> 1) ;
588
589 /* Overflow must be added to zone counters */
590 z = n + os;
591 n = -os;
592 }
77cd8148 593 } while (!this_cpu_try_cmpxchg(*p, &o, n));
7c839120
CL
594
595 if (z)
596 zone_page_state_add(z, zone, item);
597}
598
599void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
6cdb18ad 600 long delta)
7c839120 601{
75ef7184 602 mod_zone_state(zone, item, delta, 0);
7c839120
CL
603}
604EXPORT_SYMBOL(mod_zone_page_state);
605
7c839120
CL
606void inc_zone_page_state(struct page *page, enum zone_stat_item item)
607{
75ef7184 608 mod_zone_state(page_zone(page), item, 1, 1);
7c839120
CL
609}
610EXPORT_SYMBOL(inc_zone_page_state);
611
612void dec_zone_page_state(struct page *page, enum zone_stat_item item)
613{
75ef7184 614 mod_zone_state(page_zone(page), item, -1, -1);
7c839120
CL
615}
616EXPORT_SYMBOL(dec_zone_page_state);
75ef7184
MG
617
618static inline void mod_node_state(struct pglist_data *pgdat,
619 enum node_stat_item item, int delta, int overstep_mode)
620{
621 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
622 s8 __percpu *p = pcp->vm_node_stat_diff + item;
77cd8148
UB
623 long n, t, z;
624 s8 o;
75ef7184 625
ea426c2a 626 if (vmstat_item_in_bytes(item)) {
629484ae
JW
627 /*
628 * Only cgroups use subpage accounting right now; at
629 * the global level, these items still change in
630 * multiples of whole pages. Store them as pages
631 * internally to keep the per-cpu counters compact.
632 */
ea426c2a
RG
633 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
634 delta >>= PAGE_SHIFT;
635 }
636
77cd8148 637 o = this_cpu_read(*p);
75ef7184
MG
638 do {
639 z = 0; /* overflow to node counters */
640
641 /*
642 * The fetching of the stat_threshold is racy. We may apply
643 * a counter threshold to the wrong the cpu if we get
644 * rescheduled while executing here. However, the next
645 * counter update will apply the threshold again and
646 * therefore bring the counter under the threshold again.
647 *
648 * Most of the time the thresholds are the same anyways
649 * for all cpus in a node.
650 */
651 t = this_cpu_read(pcp->stat_threshold);
652
77cd8148 653 n = delta + (long)o;
75ef7184 654
40610076 655 if (abs(n) > t) {
75ef7184
MG
656 int os = overstep_mode * (t >> 1) ;
657
658 /* Overflow must be added to node counters */
659 z = n + os;
660 n = -os;
661 }
77cd8148 662 } while (!this_cpu_try_cmpxchg(*p, &o, n));
75ef7184
MG
663
664 if (z)
665 node_page_state_add(z, pgdat, item);
666}
667
668void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
669 long delta)
670{
671 mod_node_state(pgdat, item, delta, 0);
672}
673EXPORT_SYMBOL(mod_node_page_state);
674
675void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
676{
677 mod_node_state(pgdat, item, 1, 1);
678}
679
680void inc_node_page_state(struct page *page, enum node_stat_item item)
681{
682 mod_node_state(page_pgdat(page), item, 1, 1);
683}
684EXPORT_SYMBOL(inc_node_page_state);
685
686void dec_node_page_state(struct page *page, enum node_stat_item item)
687{
688 mod_node_state(page_pgdat(page), item, -1, -1);
689}
690EXPORT_SYMBOL(dec_node_page_state);
7c839120
CL
691#else
692/*
693 * Use interrupt disable to serialize counter updates
694 */
695void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
6cdb18ad 696 long delta)
7c839120
CL
697{
698 unsigned long flags;
699
700 local_irq_save(flags);
701 __mod_zone_page_state(zone, item, delta);
702 local_irq_restore(flags);
703}
704EXPORT_SYMBOL(mod_zone_page_state);
705
2244b95a
CL
706void inc_zone_page_state(struct page *page, enum zone_stat_item item)
707{
708 unsigned long flags;
709 struct zone *zone;
2244b95a
CL
710
711 zone = page_zone(page);
712 local_irq_save(flags);
ca889e6c 713 __inc_zone_state(zone, item);
2244b95a
CL
714 local_irq_restore(flags);
715}
716EXPORT_SYMBOL(inc_zone_page_state);
717
718void dec_zone_page_state(struct page *page, enum zone_stat_item item)
719{
720 unsigned long flags;
2244b95a 721
2244b95a 722 local_irq_save(flags);
a302eb4e 723 __dec_zone_page_state(page, item);
2244b95a
CL
724 local_irq_restore(flags);
725}
726EXPORT_SYMBOL(dec_zone_page_state);
727
75ef7184
MG
728void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
729{
730 unsigned long flags;
731
732 local_irq_save(flags);
733 __inc_node_state(pgdat, item);
734 local_irq_restore(flags);
735}
736EXPORT_SYMBOL(inc_node_state);
737
738void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
739 long delta)
740{
741 unsigned long flags;
742
743 local_irq_save(flags);
744 __mod_node_page_state(pgdat, item, delta);
745 local_irq_restore(flags);
746}
747EXPORT_SYMBOL(mod_node_page_state);
748
749void inc_node_page_state(struct page *page, enum node_stat_item item)
750{
751 unsigned long flags;
752 struct pglist_data *pgdat;
753
754 pgdat = page_pgdat(page);
755 local_irq_save(flags);
756 __inc_node_state(pgdat, item);
757 local_irq_restore(flags);
758}
759EXPORT_SYMBOL(inc_node_page_state);
760
761void dec_node_page_state(struct page *page, enum node_stat_item item)
762{
763 unsigned long flags;
764
765 local_irq_save(flags);
766 __dec_node_page_state(page, item);
767 local_irq_restore(flags);
768}
769EXPORT_SYMBOL(dec_node_page_state);
770#endif
7cc36bbd
CL
771
772/*
773 * Fold a differential into the global counters.
774 * Returns the number of counters updated.
775 */
f19298b9 776static int fold_diff(int *zone_diff, int *node_diff)
3a321d2a
KW
777{
778 int i;
779 int changes = 0;
780
781 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
782 if (zone_diff[i]) {
783 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
784 changes++;
785 }
786
3a321d2a
KW
787 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
788 if (node_diff[i]) {
789 atomic_long_add(node_diff[i], &vm_node_stat[i]);
790 changes++;
791 }
792 return changes;
793}
f19298b9 794
2244b95a 795/*
2bb921e5 796 * Update the zone counters for the current cpu.
a7f75e25 797 *
4037d452
CL
798 * Note that refresh_cpu_vm_stats strives to only access
799 * node local memory. The per cpu pagesets on remote zones are placed
800 * in the memory local to the processor using that pageset. So the
801 * loop over all zones will access a series of cachelines local to
802 * the processor.
803 *
804 * The call to zone_page_state_add updates the cachelines with the
805 * statistics in the remote zone struct as well as the global cachelines
806 * with the global counters. These could cause remote node cache line
807 * bouncing and will have to be only done when necessary.
7cc36bbd
CL
808 *
809 * The function returns the number of global counters updated.
2244b95a 810 */
0eb77e98 811static int refresh_cpu_vm_stats(bool do_pagesets)
2244b95a 812{
75ef7184 813 struct pglist_data *pgdat;
2244b95a
CL
814 struct zone *zone;
815 int i;
75ef7184
MG
816 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
817 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
7cc36bbd 818 int changes = 0;
2244b95a 819
ee99c71c 820 for_each_populated_zone(zone) {
28f836b6 821 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
28f836b6 822 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
2244b95a 823
fbc2edb0
CL
824 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
825 int v;
2244b95a 826
28f836b6 827 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
fbc2edb0 828 if (v) {
a7f75e25 829
a7f75e25 830 atomic_long_add(v, &zone->vm_stat[i]);
75ef7184 831 global_zone_diff[i] += v;
4037d452
CL
832#ifdef CONFIG_NUMA
833 /* 3 seconds idle till flush */
28f836b6 834 __this_cpu_write(pcp->expire, 3);
4037d452 835#endif
2244b95a 836 }
fbc2edb0 837 }
3a321d2a 838
0eb77e98
CL
839 if (do_pagesets) {
840 cond_resched();
51a755c5
HY
841
842 changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
843#ifdef CONFIG_NUMA
0eb77e98
CL
844 /*
845 * Deal with draining the remote pageset of this
846 * processor
847 *
848 * Check if there are pages remaining in this pageset
849 * if not then there is nothing to expire.
850 */
28f836b6
MG
851 if (!__this_cpu_read(pcp->expire) ||
852 !__this_cpu_read(pcp->count))
0eb77e98 853 continue;
4037d452 854
0eb77e98
CL
855 /*
856 * We never drain zones local to this processor.
857 */
858 if (zone_to_nid(zone) == numa_node_id()) {
28f836b6 859 __this_cpu_write(pcp->expire, 0);
0eb77e98
CL
860 continue;
861 }
4037d452 862
fa8c4f9a
HY
863 if (__this_cpu_dec_return(pcp->expire)) {
864 changes++;
0eb77e98 865 continue;
fa8c4f9a 866 }
4037d452 867
28f836b6
MG
868 if (__this_cpu_read(pcp->count)) {
869 drain_zone_pages(zone, this_cpu_ptr(pcp));
0eb77e98
CL
870 changes++;
871 }
4037d452 872#endif
51a755c5 873 }
2244b95a 874 }
75ef7184
MG
875
876 for_each_online_pgdat(pgdat) {
877 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
878
879 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
880 int v;
881
882 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
883 if (v) {
884 atomic_long_add(v, &pgdat->vm_stat[i]);
885 global_node_diff[i] += v;
886 }
887 }
888 }
889
890 changes += fold_diff(global_zone_diff, global_node_diff);
7cc36bbd 891 return changes;
2244b95a
CL
892}
893
2bb921e5
CL
894/*
895 * Fold the data for an offline cpu into the global array.
896 * There cannot be any access by the offline cpu and therefore
897 * synchronization is simplified.
898 */
899void cpu_vm_stats_fold(int cpu)
900{
75ef7184 901 struct pglist_data *pgdat;
2bb921e5
CL
902 struct zone *zone;
903 int i;
75ef7184
MG
904 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
905 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
2bb921e5
CL
906
907 for_each_populated_zone(zone) {
28f836b6 908 struct per_cpu_zonestat *pzstats;
2bb921e5 909
28f836b6 910 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
2bb921e5 911
f19298b9 912 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
28f836b6 913 if (pzstats->vm_stat_diff[i]) {
2bb921e5
CL
914 int v;
915
28f836b6
MG
916 v = pzstats->vm_stat_diff[i];
917 pzstats->vm_stat_diff[i] = 0;
2bb921e5 918 atomic_long_add(v, &zone->vm_stat[i]);
75ef7184 919 global_zone_diff[i] += v;
2bb921e5 920 }
f19298b9 921 }
3a321d2a 922#ifdef CONFIG_NUMA
f19298b9
MG
923 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
924 if (pzstats->vm_numa_event[i]) {
925 unsigned long v;
3a321d2a 926
f19298b9
MG
927 v = pzstats->vm_numa_event[i];
928 pzstats->vm_numa_event[i] = 0;
929 zone_numa_event_add(v, zone, i);
3a321d2a 930 }
f19298b9 931 }
3a321d2a 932#endif
2bb921e5
CL
933 }
934
75ef7184
MG
935 for_each_online_pgdat(pgdat) {
936 struct per_cpu_nodestat *p;
937
938 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
939
940 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
941 if (p->vm_node_stat_diff[i]) {
942 int v;
943
944 v = p->vm_node_stat_diff[i];
945 p->vm_node_stat_diff[i] = 0;
946 atomic_long_add(v, &pgdat->vm_stat[i]);
947 global_node_diff[i] += v;
948 }
949 }
950
951 fold_diff(global_zone_diff, global_node_diff);
2bb921e5
CL
952}
953
40f4b1ea
CS
954/*
955 * this is only called if !populated_zone(zone), which implies no other users of
f0953a1b 956 * pset->vm_stat_diff[] exist.
40f4b1ea 957 */
28f836b6 958void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
5a883813 959{
f19298b9 960 unsigned long v;
5a883813
MK
961 int i;
962
f19298b9 963 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
28f836b6 964 if (pzstats->vm_stat_diff[i]) {
f19298b9 965 v = pzstats->vm_stat_diff[i];
28f836b6 966 pzstats->vm_stat_diff[i] = 0;
f19298b9 967 zone_page_state_add(v, zone, i);
5a883813 968 }
f19298b9 969 }
3a321d2a
KW
970
971#ifdef CONFIG_NUMA
f19298b9
MG
972 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
973 if (pzstats->vm_numa_event[i]) {
974 v = pzstats->vm_numa_event[i];
975 pzstats->vm_numa_event[i] = 0;
976 zone_numa_event_add(v, zone, i);
3a321d2a 977 }
f19298b9 978 }
3a321d2a 979#endif
5a883813 980}
2244b95a
CL
981#endif
982
ca889e6c 983#ifdef CONFIG_NUMA
c2d42c16 984/*
75ef7184
MG
985 * Determine the per node value of a stat item. This function
986 * is called frequently in a NUMA machine, so try to be as
987 * frugal as possible.
c2d42c16 988 */
75ef7184
MG
989unsigned long sum_zone_node_page_state(int node,
990 enum zone_stat_item item)
c2d42c16
AM
991{
992 struct zone *zones = NODE_DATA(node)->node_zones;
e87d59f7
JK
993 int i;
994 unsigned long count = 0;
c2d42c16 995
e87d59f7
JK
996 for (i = 0; i < MAX_NR_ZONES; i++)
997 count += zone_page_state(zones + i, item);
998
999 return count;
c2d42c16
AM
1000}
1001
f19298b9
MG
1002/* Determine the per node value of a numa stat item. */
1003unsigned long sum_zone_numa_event_state(int node,
3a321d2a
KW
1004 enum numa_stat_item item)
1005{
1006 struct zone *zones = NODE_DATA(node)->node_zones;
3a321d2a 1007 unsigned long count = 0;
f19298b9 1008 int i;
3a321d2a
KW
1009
1010 for (i = 0; i < MAX_NR_ZONES; i++)
f19298b9 1011 count += zone_numa_event_state(zones + i, item);
3a321d2a
KW
1012
1013 return count;
1014}
1015
75ef7184
MG
1016/*
1017 * Determine the per node value of a stat item.
1018 */
ea426c2a
RG
1019unsigned long node_page_state_pages(struct pglist_data *pgdat,
1020 enum node_stat_item item)
75ef7184
MG
1021{
1022 long x = atomic_long_read(&pgdat->vm_stat[item]);
1023#ifdef CONFIG_SMP
1024 if (x < 0)
1025 x = 0;
1026#endif
1027 return x;
1028}
ea426c2a
RG
1029
1030unsigned long node_page_state(struct pglist_data *pgdat,
1031 enum node_stat_item item)
1032{
1033 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1034
1035 return node_page_state_pages(pgdat, item);
1036}
ca889e6c
CL
1037#endif
1038
9d857311
PT
1039/*
1040 * Count number of pages "struct page" and "struct page_ext" consume.
1041 * nr_memmap_boot_pages: # of pages allocated by boot allocator
1042 * nr_memmap_pages: # of pages that were allocated by buddy allocator
1043 */
1044static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
1045static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
1046
1047void memmap_boot_pages_add(long delta)
1048{
1049 atomic_long_add(delta, &nr_memmap_boot_pages);
1050}
1051
1052void memmap_pages_add(long delta)
1053{
1054 atomic_long_add(delta, &nr_memmap_pages);
1055}
1056
d7a5752c 1057#ifdef CONFIG_COMPACTION
36deb0be 1058
d7a5752c
MG
1059struct contig_page_info {
1060 unsigned long free_pages;
1061 unsigned long free_blocks_total;
1062 unsigned long free_blocks_suitable;
1063};
1064
1065/*
1066 * Calculate the number of free pages in a zone, how many contiguous
1067 * pages are free and how many are large enough to satisfy an allocation of
1068 * the target size. Note that this function makes no attempt to estimate
1069 * how many suitable free blocks there *might* be if MOVABLE pages were
1070 * migrated. Calculating that is possible, but expensive and can be
1071 * figured out from userspace
1072 */
1073static void fill_contig_page_info(struct zone *zone,
1074 unsigned int suitable_order,
1075 struct contig_page_info *info)
1076{
1077 unsigned int order;
1078
1079 info->free_pages = 0;
1080 info->free_blocks_total = 0;
1081 info->free_blocks_suitable = 0;
1082
fd377218 1083 for (order = 0; order < NR_PAGE_ORDERS; order++) {
d7a5752c
MG
1084 unsigned long blocks;
1085
af1c31ac
LS
1086 /*
1087 * Count number of free blocks.
1088 *
1089 * Access to nr_free is lockless as nr_free is used only for
1090 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1091 */
1092 blocks = data_race(zone->free_area[order].nr_free);
d7a5752c
MG
1093 info->free_blocks_total += blocks;
1094
1095 /* Count free base pages */
1096 info->free_pages += blocks << order;
1097
1098 /* Count the suitable free blocks */
1099 if (order >= suitable_order)
1100 info->free_blocks_suitable += blocks <<
1101 (order - suitable_order);
1102 }
1103}
f1a5ab12
MG
1104
1105/*
1106 * A fragmentation index only makes sense if an allocation of a requested
1107 * size would fail. If that is true, the fragmentation index indicates
1108 * whether external fragmentation or a lack of memory was the problem.
1109 * The value can be used to determine if page reclaim or compaction
1110 * should be used
1111 */
56de7263 1112static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
f1a5ab12
MG
1113{
1114 unsigned long requested = 1UL << order;
1115
5e0a760b 1116 if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
88d6ac40
WY
1117 return 0;
1118
f1a5ab12
MG
1119 if (!info->free_blocks_total)
1120 return 0;
1121
1122 /* Fragmentation index only makes sense when a request would fail */
1123 if (info->free_blocks_suitable)
1124 return -1000;
1125
1126 /*
1127 * Index is between 0 and 1 so return within 3 decimal places
1128 *
1129 * 0 => allocation would fail due to lack of memory
1130 * 1 => allocation would fail due to fragmentation
1131 */
1132 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1133}
56de7263 1134
facdaa91
NG
1135/*
1136 * Calculates external fragmentation within a zone wrt the given order.
1137 * It is defined as the percentage of pages found in blocks of size
1138 * less than 1 << order. It returns values in range [0, 100].
1139 */
d34c0a75 1140unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
facdaa91
NG
1141{
1142 struct contig_page_info info;
1143
1144 fill_contig_page_info(zone, order, &info);
1145 if (info.free_pages == 0)
1146 return 0;
1147
1148 return div_u64((info.free_pages -
1149 (info.free_blocks_suitable << order)) * 100,
1150 info.free_pages);
1151}
1152
56de7263
MG
1153/* Same as __fragmentation index but allocs contig_page_info on stack */
1154int fragmentation_index(struct zone *zone, unsigned int order)
1155{
1156 struct contig_page_info info;
1157
1158 fill_contig_page_info(zone, order, &info);
1159 return __fragmentation_index(order, &info);
1160}
d7a5752c
MG
1161#endif
1162
ebc5d83d
KK
1163#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1164 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
fa25c503 1165#ifdef CONFIG_ZONE_DMA
ed6a9068 1166#define TEXT_FOR_DMA(xx, yy) [xx##_DMA] = yy "_dma",
fa25c503 1167#else
ed6a9068 1168#define TEXT_FOR_DMA(xx, yy)
fa25c503
KM
1169#endif
1170
1171#ifdef CONFIG_ZONE_DMA32
ed6a9068 1172#define TEXT_FOR_DMA32(xx, yy) [xx##_DMA32] = yy "_dma32",
fa25c503 1173#else
ed6a9068 1174#define TEXT_FOR_DMA32(xx, yy)
fa25c503
KM
1175#endif
1176
1177#ifdef CONFIG_HIGHMEM
ed6a9068 1178#define TEXT_FOR_HIGHMEM(xx, yy) [xx##_HIGH] = yy "_high",
fa25c503 1179#else
ed6a9068 1180#define TEXT_FOR_HIGHMEM(xx, yy)
fa25c503
KM
1181#endif
1182
a39c5d3c 1183#ifdef CONFIG_ZONE_DEVICE
ed6a9068 1184#define TEXT_FOR_DEVICE(xx, yy) [xx##_DEVICE] = yy "_device",
a39c5d3c 1185#else
ed6a9068 1186#define TEXT_FOR_DEVICE(xx, yy)
a39c5d3c
HL
1187#endif
1188
ed6a9068
KS
1189#define TEXTS_FOR_ZONES(xx, yy) \
1190 TEXT_FOR_DMA(xx, yy) \
1191 TEXT_FOR_DMA32(xx, yy) \
1192 [xx##_NORMAL] = yy "_normal", \
1193 TEXT_FOR_HIGHMEM(xx, yy) \
1194 [xx##_MOVABLE] = yy "_movable", \
1195 TEXT_FOR_DEVICE(xx, yy)
fa25c503
KM
1196
1197const char * const vmstat_text[] = {
8d92890b 1198 /* enum zone_stat_item counters */
ed6a9068
KS
1199#define I(x) (x)
1200 [I(NR_FREE_PAGES)] = "nr_free_pages",
1201 [I(NR_FREE_PAGES_BLOCKS)] = "nr_free_pages_blocks",
1202 [I(NR_ZONE_INACTIVE_ANON)] = "nr_zone_inactive_anon",
1203 [I(NR_ZONE_ACTIVE_ANON)] = "nr_zone_active_anon",
1204 [I(NR_ZONE_INACTIVE_FILE)] = "nr_zone_inactive_file",
1205 [I(NR_ZONE_ACTIVE_FILE)] = "nr_zone_active_file",
1206 [I(NR_ZONE_UNEVICTABLE)] = "nr_zone_unevictable",
1207 [I(NR_ZONE_WRITE_PENDING)] = "nr_zone_write_pending",
1208 [I(NR_MLOCK)] = "nr_mlock",
91537fee 1209#if IS_ENABLED(CONFIG_ZSMALLOC)
ed6a9068 1210 [I(NR_ZSPAGES)] = "nr_zspages",
91537fee 1211#endif
ed6a9068 1212 [I(NR_FREE_CMA_PAGES)] = "nr_free_cma",
dcdfdd40 1213#ifdef CONFIG_UNACCEPTED_MEMORY
ed6a9068 1214 [I(NR_UNACCEPTED)] = "nr_unaccepted",
dcdfdd40 1215#endif
ed6a9068 1216#undef I
3a321d2a
KW
1217
1218 /* enum numa_stat_item counters */
ed6a9068 1219#define I(x) (NR_VM_ZONE_STAT_ITEMS + x)
fa25c503 1220#ifdef CONFIG_NUMA
ed6a9068
KS
1221 [I(NUMA_HIT)] = "numa_hit",
1222 [I(NUMA_MISS)] = "numa_miss",
1223 [I(NUMA_FOREIGN)] = "numa_foreign",
1224 [I(NUMA_INTERLEAVE_HIT)] = "numa_interleave",
1225 [I(NUMA_LOCAL)] = "numa_local",
1226 [I(NUMA_OTHER)] = "numa_other",
fa25c503 1227#endif
ed6a9068 1228#undef I
09316c09 1229
9d7ea9a2 1230 /* enum node_stat_item counters */
ed6a9068
KS
1231#define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + x)
1232 [I(NR_INACTIVE_ANON)] = "nr_inactive_anon",
1233 [I(NR_ACTIVE_ANON)] = "nr_active_anon",
1234 [I(NR_INACTIVE_FILE)] = "nr_inactive_file",
1235 [I(NR_ACTIVE_FILE)] = "nr_active_file",
1236 [I(NR_UNEVICTABLE)] = "nr_unevictable",
1237 [I(NR_SLAB_RECLAIMABLE_B)] = "nr_slab_reclaimable",
1238 [I(NR_SLAB_UNRECLAIMABLE_B)] = "nr_slab_unreclaimable",
1239 [I(NR_ISOLATED_ANON)] = "nr_isolated_anon",
1240 [I(NR_ISOLATED_FILE)] = "nr_isolated_file",
1241 [I(WORKINGSET_NODES)] = "workingset_nodes",
1242 [I(WORKINGSET_REFAULT_ANON)] = "workingset_refault_anon",
1243 [I(WORKINGSET_REFAULT_FILE)] = "workingset_refault_file",
1244 [I(WORKINGSET_ACTIVATE_ANON)] = "workingset_activate_anon",
1245 [I(WORKINGSET_ACTIVATE_FILE)] = "workingset_activate_file",
1246 [I(WORKINGSET_RESTORE_ANON)] = "workingset_restore_anon",
1247 [I(WORKINGSET_RESTORE_FILE)] = "workingset_restore_file",
1248 [I(WORKINGSET_NODERECLAIM)] = "workingset_nodereclaim",
1249 [I(NR_ANON_MAPPED)] = "nr_anon_pages",
1250 [I(NR_FILE_MAPPED)] = "nr_mapped",
1251 [I(NR_FILE_PAGES)] = "nr_file_pages",
1252 [I(NR_FILE_DIRTY)] = "nr_dirty",
1253 [I(NR_WRITEBACK)] = "nr_writeback",
ed6a9068
KS
1254 [I(NR_SHMEM)] = "nr_shmem",
1255 [I(NR_SHMEM_THPS)] = "nr_shmem_hugepages",
1256 [I(NR_SHMEM_PMDMAPPED)] = "nr_shmem_pmdmapped",
1257 [I(NR_FILE_THPS)] = "nr_file_hugepages",
1258 [I(NR_FILE_PMDMAPPED)] = "nr_file_pmdmapped",
1259 [I(NR_ANON_THPS)] = "nr_anon_transparent_hugepages",
1260 [I(NR_VMSCAN_WRITE)] = "nr_vmscan_write",
1261 [I(NR_VMSCAN_IMMEDIATE)] = "nr_vmscan_immediate_reclaim",
1262 [I(NR_DIRTIED)] = "nr_dirtied",
1263 [I(NR_WRITTEN)] = "nr_written",
1264 [I(NR_THROTTLED_WRITTEN)] = "nr_throttled_written",
1265 [I(NR_KERNEL_MISC_RECLAIMABLE)] = "nr_kernel_misc_reclaimable",
1266 [I(NR_FOLL_PIN_ACQUIRED)] = "nr_foll_pin_acquired",
1267 [I(NR_FOLL_PIN_RELEASED)] = "nr_foll_pin_released",
1268 [I(NR_KERNEL_STACK_KB)] = "nr_kernel_stack",
991e7673 1269#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
ed6a9068 1270 [I(NR_KERNEL_SCS_KB)] = "nr_shadow_call_stack",
991e7673 1271#endif
ed6a9068
KS
1272 [I(NR_PAGETABLE)] = "nr_page_table_pages",
1273 [I(NR_SECONDARY_PAGETABLE)] = "nr_sec_page_table_pages",
bd3520a9 1274#ifdef CONFIG_IOMMU_SUPPORT
ed6a9068 1275 [I(NR_IOMMU_PAGES)] = "nr_iommu_pages",
bd3520a9 1276#endif
b6038942 1277#ifdef CONFIG_SWAP
ed6a9068 1278 [I(NR_SWAPCACHE)] = "nr_swapcached",
b6038942 1279#endif
e39bb6be 1280#ifdef CONFIG_NUMA_BALANCING
ed6a9068
KS
1281 [I(PGPROMOTE_SUCCESS)] = "pgpromote_success",
1282 [I(PGPROMOTE_CANDIDATE)] = "pgpromote_candidate",
b805ab3c 1283#endif
ed6a9068
KS
1284 [I(PGDEMOTE_KSWAPD)] = "pgdemote_kswapd",
1285 [I(PGDEMOTE_DIRECT)] = "pgdemote_direct",
1286 [I(PGDEMOTE_KHUGEPAGED)] = "pgdemote_khugepaged",
1287 [I(PGDEMOTE_PROACTIVE)] = "pgdemote_proactive",
05d4532b 1288#ifdef CONFIG_HUGETLB_PAGE
ed6a9068 1289 [I(NR_HUGETLB)] = "nr_hugetlb",
05d4532b 1290#endif
ed6a9068
KS
1291 [I(NR_BALLOON_PAGES)] = "nr_balloon_pages",
1292#undef I
1293
f4cb78af 1294 /* system-wide enum vm_stat_item counters */
ed6a9068
KS
1295#define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1296 NR_VM_NODE_STAT_ITEMS + x)
1297 [I(NR_DIRTY_THRESHOLD)] = "nr_dirty_threshold",
1298 [I(NR_DIRTY_BG_THRESHOLD)] = "nr_dirty_background_threshold",
1299 [I(NR_MEMMAP_PAGES)] = "nr_memmap_pages",
1300 [I(NR_MEMMAP_BOOT_PAGES)] = "nr_memmap_boot_pages",
1301#undef I
fa25c503 1302
fdc5001b 1303#if defined(CONFIG_VM_EVENT_COUNTERS)
09316c09 1304 /* enum vm_event_item counters */
ed6a9068
KS
1305#define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1306 NR_VM_NODE_STAT_ITEMS + NR_VM_STAT_ITEMS + x)
1307
1308 [I(PGPGIN)] = "pgpgin",
1309 [I(PGPGOUT)] = "pgpgout",
1310 [I(PSWPIN)] = "pswpin",
1311 [I(PSWPOUT)] = "pswpout",
1312
1313#define OFF (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1314 NR_VM_NODE_STAT_ITEMS + NR_VM_STAT_ITEMS)
1315 TEXTS_FOR_ZONES(OFF+PGALLOC, "pgalloc")
1316 TEXTS_FOR_ZONES(OFF+ALLOCSTALL, "allocstall")
1317 TEXTS_FOR_ZONES(OFF+PGSCAN_SKIP, "pgskip")
1318#undef OFF
1319
1320 [I(PGFREE)] = "pgfree",
1321 [I(PGACTIVATE)] = "pgactivate",
1322 [I(PGDEACTIVATE)] = "pgdeactivate",
1323 [I(PGLAZYFREE)] = "pglazyfree",
1324
1325 [I(PGFAULT)] = "pgfault",
1326 [I(PGMAJFAULT)] = "pgmajfault",
1327 [I(PGLAZYFREED)] = "pglazyfreed",
1328
1329 [I(PGREFILL)] = "pgrefill",
1330 [I(PGREUSE)] = "pgreuse",
1331 [I(PGSTEAL_KSWAPD)] = "pgsteal_kswapd",
1332 [I(PGSTEAL_DIRECT)] = "pgsteal_direct",
1333 [I(PGSTEAL_KHUGEPAGED)] = "pgsteal_khugepaged",
1334 [I(PGSTEAL_PROACTIVE)] = "pgsteal_proactive",
1335 [I(PGSCAN_KSWAPD)] = "pgscan_kswapd",
1336 [I(PGSCAN_DIRECT)] = "pgscan_direct",
1337 [I(PGSCAN_KHUGEPAGED)] = "pgscan_khugepaged",
1338 [I(PGSCAN_PROACTIVE)] = "pgscan_proactive",
1339 [I(PGSCAN_DIRECT_THROTTLE)] = "pgscan_direct_throttle",
1340 [I(PGSCAN_ANON)] = "pgscan_anon",
1341 [I(PGSCAN_FILE)] = "pgscan_file",
1342 [I(PGSTEAL_ANON)] = "pgsteal_anon",
1343 [I(PGSTEAL_FILE)] = "pgsteal_file",
fa25c503
KM
1344
1345#ifdef CONFIG_NUMA
ed6a9068
KS
1346 [I(PGSCAN_ZONE_RECLAIM_SUCCESS)] = "zone_reclaim_success",
1347 [I(PGSCAN_ZONE_RECLAIM_FAILED)] = "zone_reclaim_failed",
fa25c503 1348#endif
ed6a9068
KS
1349 [I(PGINODESTEAL)] = "pginodesteal",
1350 [I(SLABS_SCANNED)] = "slabs_scanned",
1351 [I(KSWAPD_INODESTEAL)] = "kswapd_inodesteal",
1352 [I(KSWAPD_LOW_WMARK_HIT_QUICKLY)] = "kswapd_low_wmark_hit_quickly",
1353 [I(KSWAPD_HIGH_WMARK_HIT_QUICKLY)] = "kswapd_high_wmark_hit_quickly",
1354 [I(PAGEOUTRUN)] = "pageoutrun",
fa25c503 1355
ed6a9068 1356 [I(PGROTATED)] = "pgrotated",
fa25c503 1357
ed6a9068
KS
1358 [I(DROP_PAGECACHE)] = "drop_pagecache",
1359 [I(DROP_SLAB)] = "drop_slab",
1360 [I(OOM_KILL)] = "oom_kill",
5509a5d2 1361
03c5a6e1 1362#ifdef CONFIG_NUMA_BALANCING
ed6a9068
KS
1363 [I(NUMA_PTE_UPDATES)] = "numa_pte_updates",
1364 [I(NUMA_HUGE_PTE_UPDATES)] = "numa_huge_pte_updates",
1365 [I(NUMA_HINT_FAULTS)] = "numa_hint_faults",
1366 [I(NUMA_HINT_FAULTS_LOCAL)] = "numa_hint_faults_local",
1367 [I(NUMA_PAGE_MIGRATE)] = "numa_pages_migrated",
03c5a6e1 1368#endif
5647bc29 1369#ifdef CONFIG_MIGRATION
ed6a9068
KS
1370 [I(PGMIGRATE_SUCCESS)] = "pgmigrate_success",
1371 [I(PGMIGRATE_FAIL)] = "pgmigrate_fail",
1372 [I(THP_MIGRATION_SUCCESS)] = "thp_migration_success",
1373 [I(THP_MIGRATION_FAIL)] = "thp_migration_fail",
1374 [I(THP_MIGRATION_SPLIT)] = "thp_migration_split",
5647bc29 1375#endif
fa25c503 1376#ifdef CONFIG_COMPACTION
ed6a9068
KS
1377 [I(COMPACTMIGRATE_SCANNED)] = "compact_migrate_scanned",
1378 [I(COMPACTFREE_SCANNED)] = "compact_free_scanned",
1379 [I(COMPACTISOLATED)] = "compact_isolated",
1380 [I(COMPACTSTALL)] = "compact_stall",
1381 [I(COMPACTFAIL)] = "compact_fail",
1382 [I(COMPACTSUCCESS)] = "compact_success",
1383 [I(KCOMPACTD_WAKE)] = "compact_daemon_wake",
1384 [I(KCOMPACTD_MIGRATE_SCANNED)] = "compact_daemon_migrate_scanned",
1385 [I(KCOMPACTD_FREE_SCANNED)] = "compact_daemon_free_scanned",
fa25c503
KM
1386#endif
1387
1388#ifdef CONFIG_HUGETLB_PAGE
ed6a9068
KS
1389 [I(HTLB_BUDDY_PGALLOC)] = "htlb_buddy_alloc_success",
1390 [I(HTLB_BUDDY_PGALLOC_FAIL)] = "htlb_buddy_alloc_fail",
bbb26920
MK
1391#endif
1392#ifdef CONFIG_CMA
ed6a9068
KS
1393 [I(CMA_ALLOC_SUCCESS)] = "cma_alloc_success",
1394 [I(CMA_ALLOC_FAIL)] = "cma_alloc_fail",
fa25c503 1395#endif
ed6a9068
KS
1396 [I(UNEVICTABLE_PGCULLED)] = "unevictable_pgs_culled",
1397 [I(UNEVICTABLE_PGSCANNED)] = "unevictable_pgs_scanned",
1398 [I(UNEVICTABLE_PGRESCUED)] = "unevictable_pgs_rescued",
1399 [I(UNEVICTABLE_PGMLOCKED)] = "unevictable_pgs_mlocked",
1400 [I(UNEVICTABLE_PGMUNLOCKED)] = "unevictable_pgs_munlocked",
1401 [I(UNEVICTABLE_PGCLEARED)] = "unevictable_pgs_cleared",
1402 [I(UNEVICTABLE_PGSTRANDED)] = "unevictable_pgs_stranded",
fa25c503
KM
1403
1404#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ed6a9068
KS
1405 [I(THP_FAULT_ALLOC)] = "thp_fault_alloc",
1406 [I(THP_FAULT_FALLBACK)] = "thp_fault_fallback",
1407 [I(THP_FAULT_FALLBACK_CHARGE)] = "thp_fault_fallback_charge",
1408 [I(THP_COLLAPSE_ALLOC)] = "thp_collapse_alloc",
1409 [I(THP_COLLAPSE_ALLOC_FAILED)] = "thp_collapse_alloc_failed",
1410 [I(THP_FILE_ALLOC)] = "thp_file_alloc",
1411 [I(THP_FILE_FALLBACK)] = "thp_file_fallback",
1412 [I(THP_FILE_FALLBACK_CHARGE)] = "thp_file_fallback_charge",
1413 [I(THP_FILE_MAPPED)] = "thp_file_mapped",
1414 [I(THP_SPLIT_PAGE)] = "thp_split_page",
1415 [I(THP_SPLIT_PAGE_FAILED)] = "thp_split_page_failed",
1416 [I(THP_DEFERRED_SPLIT_PAGE)] = "thp_deferred_split_page",
1417 [I(THP_UNDERUSED_SPLIT_PAGE)] = "thp_underused_split_page",
1418 [I(THP_SPLIT_PMD)] = "thp_split_pmd",
1419 [I(THP_SCAN_EXCEED_NONE_PTE)] = "thp_scan_exceed_none_pte",
1420 [I(THP_SCAN_EXCEED_SWAP_PTE)] = "thp_scan_exceed_swap_pte",
1421 [I(THP_SCAN_EXCEED_SHARED_PTE)] = "thp_scan_exceed_share_pte",
ce9311cf 1422#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ed6a9068 1423 [I(THP_SPLIT_PUD)] = "thp_split_pud",
ce9311cf 1424#endif
ed6a9068
KS
1425 [I(THP_ZERO_PAGE_ALLOC)] = "thp_zero_page_alloc",
1426 [I(THP_ZERO_PAGE_ALLOC_FAILED)] = "thp_zero_page_alloc_failed",
1427 [I(THP_SWPOUT)] = "thp_swpout",
1428 [I(THP_SWPOUT_FALLBACK)] = "thp_swpout_fallback",
fa25c503 1429#endif
09316c09 1430#ifdef CONFIG_MEMORY_BALLOON
ed6a9068
KS
1431 [I(BALLOON_INFLATE)] = "balloon_inflate",
1432 [I(BALLOON_DEFLATE)] = "balloon_deflate",
09316c09 1433#ifdef CONFIG_BALLOON_COMPACTION
ed6a9068 1434 [I(BALLOON_MIGRATE)] = "balloon_migrate",
09316c09
KK
1435#endif
1436#endif /* CONFIG_MEMORY_BALLOON */
ec659934 1437#ifdef CONFIG_DEBUG_TLBFLUSH
ed6a9068
KS
1438 [I(NR_TLB_REMOTE_FLUSH)] = "nr_tlb_remote_flush",
1439 [I(NR_TLB_REMOTE_FLUSH_RECEIVED)] = "nr_tlb_remote_flush_received",
1440 [I(NR_TLB_LOCAL_FLUSH_ALL)] = "nr_tlb_local_flush_all",
1441 [I(NR_TLB_LOCAL_FLUSH_ONE)] = "nr_tlb_local_flush_one",
ec659934 1442#endif /* CONFIG_DEBUG_TLBFLUSH */
fa25c503 1443
cbc65df2 1444#ifdef CONFIG_SWAP
ed6a9068
KS
1445 [I(SWAP_RA)] = "swap_ra",
1446 [I(SWAP_RA_HIT)] = "swap_ra_hit",
1447 [I(SWPIN_ZERO)] = "swpin_zero",
1448 [I(SWPOUT_ZERO)] = "swpout_zero",
4d45c3af 1449#ifdef CONFIG_KSM
ed6a9068 1450 [I(KSM_SWPIN_COPY)] = "ksm_swpin_copy",
4d45c3af 1451#endif
cbc65df2 1452#endif
94bfe85b 1453#ifdef CONFIG_KSM
ed6a9068 1454 [I(COW_KSM)] = "cow_ksm",
94bfe85b 1455#endif
f6498b77 1456#ifdef CONFIG_ZSWAP
ed6a9068
KS
1457 [I(ZSWPIN)] = "zswpin",
1458 [I(ZSWPOUT)] = "zswpout",
1459 [I(ZSWPWB)] = "zswpwb",
f6498b77 1460#endif
575299ea 1461#ifdef CONFIG_X86
ed6a9068
KS
1462 [I(DIRECT_MAP_LEVEL2_SPLIT)] = "direct_map_level2_splits",
1463 [I(DIRECT_MAP_LEVEL3_SPLIT)] = "direct_map_level3_splits",
1464 [I(DIRECT_MAP_LEVEL2_COLLAPSE)] = "direct_map_level2_collapses",
1465 [I(DIRECT_MAP_LEVEL3_COLLAPSE)] = "direct_map_level3_collapses",
575299ea 1466#endif
52f23865 1467#ifdef CONFIG_PER_VMA_LOCK_STATS
ed6a9068
KS
1468 [I(VMA_LOCK_SUCCESS)] = "vma_lock_success",
1469 [I(VMA_LOCK_ABORT)] = "vma_lock_abort",
1470 [I(VMA_LOCK_RETRY)] = "vma_lock_retry",
1471 [I(VMA_LOCK_MISS)] = "vma_lock_miss",
52f23865 1472#endif
c4a6fce8 1473#ifdef CONFIG_DEBUG_STACK_USAGE
ed6a9068 1474 [I(KSTACK_1K)] = "kstack_1k",
c4a6fce8 1475#if THREAD_SIZE > 1024
ed6a9068 1476 [I(KSTACK_2K)] = "kstack_2k",
c4a6fce8
PT
1477#endif
1478#if THREAD_SIZE > 2048
ed6a9068 1479 [I(KSTACK_4K)] = "kstack_4k",
c4a6fce8
PT
1480#endif
1481#if THREAD_SIZE > 4096
ed6a9068 1482 [I(KSTACK_8K)] = "kstack_8k",
c4a6fce8
PT
1483#endif
1484#if THREAD_SIZE > 8192
ed6a9068 1485 [I(KSTACK_16K)] = "kstack_16k",
c4a6fce8
PT
1486#endif
1487#if THREAD_SIZE > 16384
ed6a9068 1488 [I(KSTACK_32K)] = "kstack_32k",
c4a6fce8
PT
1489#endif
1490#if THREAD_SIZE > 32768
ed6a9068 1491 [I(KSTACK_64K)] = "kstack_64k",
c4a6fce8
PT
1492#endif
1493#if THREAD_SIZE > 65536
ed6a9068 1494 [I(KSTACK_REST)] = "kstack_rest",
c4a6fce8
PT
1495#endif
1496#endif
ed6a9068 1497#undef I
fdc5001b 1498#endif /* CONFIG_VM_EVENT_COUNTERS */
fa25c503 1499};
ebc5d83d 1500#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
fa25c503 1501
3c486871
AM
1502#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1503 defined(CONFIG_PROC_FS)
1504static void *frag_start(struct seq_file *m, loff_t *pos)
1505{
1506 pg_data_t *pgdat;
1507 loff_t node = *pos;
1508
1509 for (pgdat = first_online_pgdat();
1510 pgdat && node;
1511 pgdat = next_online_pgdat(pgdat))
1512 --node;
1513
1514 return pgdat;
1515}
1516
1517static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1518{
1519 pg_data_t *pgdat = (pg_data_t *)arg;
1520
1521 (*pos)++;
1522 return next_online_pgdat(pgdat);
1523}
1524
1525static void frag_stop(struct seq_file *m, void *arg)
1526{
1527}
1528
b2bd8598
DR
1529/*
1530 * Walk zones in a node and print using a callback.
1531 * If @assert_populated is true, only use callback for zones that are populated.
1532 */
3c486871 1533static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
727c080f 1534 bool assert_populated, bool nolock,
3c486871
AM
1535 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1536{
1537 struct zone *zone;
1538 struct zone *node_zones = pgdat->node_zones;
1539 unsigned long flags;
1540
1541 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
b2bd8598 1542 if (assert_populated && !populated_zone(zone))
3c486871
AM
1543 continue;
1544
727c080f
VM
1545 if (!nolock)
1546 spin_lock_irqsave(&zone->lock, flags);
3c486871 1547 print(m, pgdat, zone);
727c080f
VM
1548 if (!nolock)
1549 spin_unlock_irqrestore(&zone->lock, flags);
3c486871
AM
1550 }
1551}
1552#endif
1553
d7a5752c 1554#ifdef CONFIG_PROC_FS
467c996c
MG
1555static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1556 struct zone *zone)
1557{
1558 int order;
1559
1560 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
fd377218 1561 for (order = 0; order < NR_PAGE_ORDERS; ++order)
af1c31ac
LS
1562 /*
1563 * Access to nr_free is lockless as nr_free is used only for
1564 * printing purposes. Use data_race to avoid KCSAN warning.
1565 */
1566 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
467c996c
MG
1567 seq_putc(m, '\n');
1568}
1569
1570/*
1571 * This walks the free areas for each zone.
1572 */
1573static int frag_show(struct seq_file *m, void *arg)
1574{
1575 pg_data_t *pgdat = (pg_data_t *)arg;
727c080f 1576 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
467c996c
MG
1577 return 0;
1578}
1579
1580static void pagetypeinfo_showfree_print(struct seq_file *m,
1581 pg_data_t *pgdat, struct zone *zone)
1582{
1583 int order, mtype;
1584
1585 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1586 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1587 pgdat->node_id,
1588 zone->name,
1589 migratetype_names[mtype]);
fd377218 1590 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
467c996c
MG
1591 unsigned long freecount = 0;
1592 struct free_area *area;
1593 struct list_head *curr;
93b3a674 1594 bool overflow = false;
467c996c
MG
1595
1596 area = &(zone->free_area[order]);
1597
93b3a674
MH
1598 list_for_each(curr, &area->free_list[mtype]) {
1599 /*
1600 * Cap the free_list iteration because it might
1601 * be really large and we are under a spinlock
1602 * so a long time spent here could trigger a
1603 * hard lockup detector. Anyway this is a
1604 * debugging tool so knowing there is a handful
1605 * of pages of this order should be more than
1606 * sufficient.
1607 */
1608 if (++freecount >= 100000) {
1609 overflow = true;
1610 break;
1611 }
1612 }
1613 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1614 spin_unlock_irq(&zone->lock);
1615 cond_resched();
1616 spin_lock_irq(&zone->lock);
467c996c 1617 }
f6ac2354
CL
1618 seq_putc(m, '\n');
1619 }
467c996c
MG
1620}
1621
1622/* Print out the free pages at each order for each migatetype */
33090af9 1623static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
467c996c
MG
1624{
1625 int order;
1626 pg_data_t *pgdat = (pg_data_t *)arg;
1627
1628 /* Print header */
1629 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
fd377218 1630 for (order = 0; order < NR_PAGE_ORDERS; ++order)
467c996c
MG
1631 seq_printf(m, "%6d ", order);
1632 seq_putc(m, '\n');
1633
727c080f 1634 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
467c996c
MG
1635}
1636
1637static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1638 pg_data_t *pgdat, struct zone *zone)
1639{
1640 int mtype;
1641 unsigned long pfn;
1642 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 1643 unsigned long end_pfn = zone_end_pfn(zone);
467c996c
MG
1644 unsigned long count[MIGRATE_TYPES] = { 0, };
1645
1646 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1647 struct page *page;
1648
d336e94e
MH
1649 page = pfn_to_online_page(pfn);
1650 if (!page)
467c996c
MG
1651 continue;
1652
a91c43c7
JK
1653 if (page_zone(page) != zone)
1654 continue;
1655
467c996c
MG
1656 mtype = get_pageblock_migratetype(page);
1657
e80d6a24
MG
1658 if (mtype < MIGRATE_TYPES)
1659 count[mtype]++;
467c996c
MG
1660 }
1661
1662 /* Print counts */
1663 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1664 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1665 seq_printf(m, "%12lu ", count[mtype]);
1666 seq_putc(m, '\n');
1667}
1668
f113e641 1669/* Print out the number of pageblocks for each migratetype */
33090af9 1670static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
467c996c
MG
1671{
1672 int mtype;
1673 pg_data_t *pgdat = (pg_data_t *)arg;
1674
1675 seq_printf(m, "\n%-23s", "Number of blocks type ");
1676 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1677 seq_printf(m, "%12s ", migratetype_names[mtype]);
1678 seq_putc(m, '\n');
727c080f
VM
1679 walk_zones_in_node(m, pgdat, true, false,
1680 pagetypeinfo_showblockcount_print);
467c996c
MG
1681}
1682
48c96a36
JK
1683/*
1684 * Print out the number of pageblocks for each migratetype that contain pages
1685 * of other types. This gives an indication of how well fallbacks are being
1686 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1687 * to determine what is going on
1688 */
1689static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1690{
1691#ifdef CONFIG_PAGE_OWNER
1692 int mtype;
1693
7dd80b8a 1694 if (!static_branch_unlikely(&page_owner_inited))
48c96a36
JK
1695 return;
1696
1697 drain_all_pages(NULL);
1698
1699 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1700 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1701 seq_printf(m, "%12s ", migratetype_names[mtype]);
1702 seq_putc(m, '\n');
1703
727c080f
VM
1704 walk_zones_in_node(m, pgdat, true, true,
1705 pagetypeinfo_showmixedcount_print);
48c96a36
JK
1706#endif /* CONFIG_PAGE_OWNER */
1707}
1708
467c996c
MG
1709/*
1710 * This prints out statistics in relation to grouping pages by mobility.
1711 * It is expensive to collect so do not constantly read the file.
1712 */
1713static int pagetypeinfo_show(struct seq_file *m, void *arg)
1714{
1715 pg_data_t *pgdat = (pg_data_t *)arg;
1716
41b25a37 1717 /* check memoryless node */
a47b53c5 1718 if (!node_state(pgdat->node_id, N_MEMORY))
41b25a37
KM
1719 return 0;
1720
467c996c
MG
1721 seq_printf(m, "Page block order: %d\n", pageblock_order);
1722 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1723 seq_putc(m, '\n');
1724 pagetypeinfo_showfree(m, pgdat);
1725 pagetypeinfo_showblockcount(m, pgdat);
48c96a36 1726 pagetypeinfo_showmixedcount(m, pgdat);
467c996c 1727
f6ac2354
CL
1728 return 0;
1729}
1730
8f32f7e5 1731static const struct seq_operations fragmentation_op = {
f6ac2354
CL
1732 .start = frag_start,
1733 .next = frag_next,
1734 .stop = frag_stop,
1735 .show = frag_show,
1736};
1737
74e2e8e8 1738static const struct seq_operations pagetypeinfo_op = {
467c996c
MG
1739 .start = frag_start,
1740 .next = frag_next,
1741 .stop = frag_stop,
1742 .show = pagetypeinfo_show,
1743};
1744
e2ecc8a7
MG
1745static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1746{
1747 int zid;
1748
1749 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1750 struct zone *compare = &pgdat->node_zones[zid];
1751
1752 if (populated_zone(compare))
1753 return zone == compare;
1754 }
1755
e2ecc8a7
MG
1756 return false;
1757}
1758
467c996c
MG
1759static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1760 struct zone *zone)
f6ac2354 1761{
467c996c
MG
1762 int i;
1763 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
e2ecc8a7
MG
1764 if (is_zone_first_populated(pgdat, zone)) {
1765 seq_printf(m, "\n per-node stats");
1766 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
69473e5d
MS
1767 unsigned long pages = node_page_state_pages(pgdat, i);
1768
1769 if (vmstat_item_print_in_thp(i))
1770 pages /= HPAGE_PMD_NR;
9d7ea9a2 1771 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
69473e5d 1772 pages);
e2ecc8a7
MG
1773 }
1774 }
467c996c
MG
1775 seq_printf(m,
1776 "\n pages free %lu"
a6ea8b5b 1777 "\n boost %lu"
467c996c
MG
1778 "\n min %lu"
1779 "\n low %lu"
1780 "\n high %lu"
528afe6b 1781 "\n promo %lu"
467c996c 1782 "\n spanned %lu"
9feedc9d 1783 "\n present %lu"
3c381db1
DH
1784 "\n managed %lu"
1785 "\n cma %lu",
88f5acf8 1786 zone_page_state(zone, NR_FREE_PAGES),
a6ea8b5b 1787 zone->watermark_boost,
41858966
MG
1788 min_wmark_pages(zone),
1789 low_wmark_pages(zone),
1790 high_wmark_pages(zone),
528afe6b 1791 promo_wmark_pages(zone),
467c996c 1792 zone->spanned_pages,
9feedc9d 1793 zone->present_pages,
3c381db1
DH
1794 zone_managed_pages(zone),
1795 zone_cma_pages(zone));
467c996c 1796
467c996c 1797 seq_printf(m,
3484b2de 1798 "\n protection: (%ld",
467c996c
MG
1799 zone->lowmem_reserve[0]);
1800 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
3484b2de 1801 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
7dfb8bf3
DR
1802 seq_putc(m, ')');
1803
a8a4b7ae
BH
1804 /* If unpopulated, no other information is useful */
1805 if (!populated_zone(zone)) {
1806 seq_putc(m, '\n');
1807 return;
1808 }
1809
7dfb8bf3 1810 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
9d7ea9a2
KK
1811 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1812 zone_page_state(zone, i));
7dfb8bf3 1813
3a321d2a 1814#ifdef CONFIG_NUMA
2ea80b03 1815 fold_vm_zone_numa_events(zone);
f19298b9 1816 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
9d7ea9a2 1817 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
f19298b9 1818 zone_numa_event_state(zone, i));
3a321d2a
KW
1819#endif
1820
7dfb8bf3 1821 seq_printf(m, "\n pagesets");
467c996c 1822 for_each_online_cpu(i) {
28f836b6
MG
1823 struct per_cpu_pages *pcp;
1824 struct per_cpu_zonestat __maybe_unused *pzstats;
467c996c 1825
28f836b6 1826 pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
3dfa5721
CL
1827 seq_printf(m,
1828 "\n cpu: %i"
f8780515
MS
1829 "\n count: %i"
1830 "\n high: %i"
1831 "\n batch: %i"
1832 "\n high_min: %i"
1833 "\n high_max: %i",
3dfa5721 1834 i,
28f836b6
MG
1835 pcp->count,
1836 pcp->high,
f8780515
MS
1837 pcp->batch,
1838 pcp->high_min,
1839 pcp->high_max);
df9ecaba 1840#ifdef CONFIG_SMP
28f836b6 1841 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
467c996c 1842 seq_printf(m, "\n vm stats threshold: %d",
28f836b6 1843 pzstats->stat_threshold);
df9ecaba 1844#endif
f6ac2354 1845 }
467c996c 1846 seq_printf(m,
599d0c95 1847 "\n node_unreclaimable: %u"
3a50d14d 1848 "\n start_pfn: %lu",
c73322d0 1849 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
3a50d14d 1850 zone->zone_start_pfn);
467c996c
MG
1851 seq_putc(m, '\n');
1852}
1853
1854/*
b2bd8598
DR
1855 * Output information about zones in @pgdat. All zones are printed regardless
1856 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1857 * set of all zones and userspace would not be aware of such zones if they are
1858 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
467c996c
MG
1859 */
1860static int zoneinfo_show(struct seq_file *m, void *arg)
1861{
1862 pg_data_t *pgdat = (pg_data_t *)arg;
727c080f 1863 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
f6ac2354
CL
1864 return 0;
1865}
1866
5c9fe628 1867static const struct seq_operations zoneinfo_op = {
f6ac2354
CL
1868 .start = frag_start, /* iterate over all zones. The same as in
1869 * fragmentation. */
1870 .next = frag_next,
1871 .stop = frag_stop,
1872 .show = zoneinfo_show,
1873};
1874
9d7ea9a2 1875#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
f19298b9 1876 NR_VM_NUMA_EVENT_ITEMS + \
9d7ea9a2 1877 NR_VM_NODE_STAT_ITEMS + \
f4cb78af 1878 NR_VM_STAT_ITEMS + \
9d7ea9a2
KK
1879 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1880 NR_VM_EVENT_ITEMS : 0))
79da826a 1881
f6ac2354
CL
1882static void *vmstat_start(struct seq_file *m, loff_t *pos)
1883{
2244b95a 1884 unsigned long *v;
9d7ea9a2 1885 int i;
f6ac2354 1886
9d7ea9a2 1887 if (*pos >= NR_VMSTAT_ITEMS)
f6ac2354 1888 return NULL;
79da826a 1889
8a63ff68 1890 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) != NR_VMSTAT_ITEMS);
f19298b9 1891 fold_vm_numa_events();
9d7ea9a2 1892 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
2244b95a
CL
1893 m->private = v;
1894 if (!v)
f6ac2354 1895 return ERR_PTR(-ENOMEM);
2244b95a 1896 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
c41f012a 1897 v[i] = global_zone_page_state(i);
79da826a
MR
1898 v += NR_VM_ZONE_STAT_ITEMS;
1899
3a321d2a 1900#ifdef CONFIG_NUMA
f19298b9
MG
1901 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1902 v[i] = global_numa_event_state(i);
1903 v += NR_VM_NUMA_EVENT_ITEMS;
3a321d2a
KW
1904#endif
1905
69473e5d 1906 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
ea426c2a 1907 v[i] = global_node_page_state_pages(i);
69473e5d
MS
1908 if (vmstat_item_print_in_thp(i))
1909 v[i] /= HPAGE_PMD_NR;
1910 }
75ef7184
MG
1911 v += NR_VM_NODE_STAT_ITEMS;
1912
79da826a
MR
1913 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1914 v + NR_DIRTY_THRESHOLD);
9d857311
PT
1915 v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
1916 v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
f4cb78af 1917 v += NR_VM_STAT_ITEMS;
79da826a 1918
f8891e5e 1919#ifdef CONFIG_VM_EVENT_COUNTERS
79da826a
MR
1920 all_vm_events(v);
1921 v[PGPGIN] /= 2; /* sectors -> kbytes */
1922 v[PGPGOUT] /= 2;
f8891e5e 1923#endif
ff8b16d7 1924 return (unsigned long *)m->private + *pos;
f6ac2354
CL
1925}
1926
1927static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1928{
1929 (*pos)++;
9d7ea9a2 1930 if (*pos >= NR_VMSTAT_ITEMS)
f6ac2354
CL
1931 return NULL;
1932 return (unsigned long *)m->private + *pos;
1933}
1934
1935static int vmstat_show(struct seq_file *m, void *arg)
1936{
1937 unsigned long *l = arg;
1938 unsigned long off = l - (unsigned long *)m->private;
68ba0326
AD
1939
1940 seq_puts(m, vmstat_text[off]);
75ba1d07 1941 seq_put_decimal_ull(m, " ", *l);
68ba0326 1942 seq_putc(m, '\n');
8d92890b
N
1943
1944 if (off == NR_VMSTAT_ITEMS - 1) {
1945 /*
1946 * We've come to the end - add any deprecated counters to avoid
1947 * breaking userspace which might depend on them being present.
1948 */
1949 seq_puts(m, "nr_unstable 0\n");
1950 }
f6ac2354
CL
1951 return 0;
1952}
1953
1954static void vmstat_stop(struct seq_file *m, void *arg)
1955{
1956 kfree(m->private);
1957 m->private = NULL;
1958}
1959
b6aa44ab 1960static const struct seq_operations vmstat_op = {
f6ac2354
CL
1961 .start = vmstat_start,
1962 .next = vmstat_next,
1963 .stop = vmstat_stop,
1964 .show = vmstat_show,
1965};
f6ac2354
CL
1966#endif /* CONFIG_PROC_FS */
1967
df9ecaba 1968#ifdef CONFIG_SMP
d1187ed2 1969static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
b8974b89 1970static int sysctl_stat_interval __read_mostly = HZ;
f69c2e4d 1971static int vmstat_late_init_done;
d1187ed2 1972
52b6f46b
HD
1973#ifdef CONFIG_PROC_FS
1974static void refresh_vm_stats(struct work_struct *work)
1975{
1976 refresh_cpu_vm_stats(true);
1977}
1978
b8974b89 1979static int vmstat_refresh(const struct ctl_table *table, int write,
32927393 1980 void *buffer, size_t *lenp, loff_t *ppos)
52b6f46b
HD
1981{
1982 long val;
1983 int err;
1984 int i;
1985
1986 /*
1987 * The regular update, every sysctl_stat_interval, may come later
1988 * than expected: leaving a significant amount in per_cpu buckets.
1989 * This is particularly misleading when checking a quantity of HUGE
1990 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1991 * which can equally be echo'ed to or cat'ted from (by root),
1992 * can be used to update the stats just before reading them.
1993 *
c41f012a 1994 * Oh, and since global_zone_page_state() etc. are so careful to hide
52b6f46b
HD
1995 * transiently negative values, report an error here if any of
1996 * the stats is negative, so we know to go looking for imbalance.
1997 */
1998 err = schedule_on_each_cpu(refresh_vm_stats);
1999 if (err)
2000 return err;
2001 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
75083aae
HD
2002 /*
2003 * Skip checking stats known to go negative occasionally.
2004 */
2005 switch (i) {
2006 case NR_ZONE_WRITE_PENDING:
2007 case NR_FREE_CMA_PAGES:
2008 continue;
2009 }
75ef7184 2010 val = atomic_long_read(&vm_zone_stat[i]);
52b6f46b 2011 if (val < 0) {
c822f622 2012 pr_warn("%s: %s %ld\n",
9d7ea9a2 2013 __func__, zone_stat_name(i), val);
52b6f46b
HD
2014 }
2015 }
76d8cc3c 2016 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
75083aae
HD
2017 /*
2018 * Skip checking stats known to go negative occasionally.
2019 */
2020 switch (i) {
2021 case NR_WRITEBACK:
2022 continue;
2023 }
76d8cc3c
HD
2024 val = atomic_long_read(&vm_node_stat[i]);
2025 if (val < 0) {
2026 pr_warn("%s: %s %ld\n",
2027 __func__, node_stat_name(i), val);
76d8cc3c
HD
2028 }
2029 }
52b6f46b
HD
2030 if (write)
2031 *ppos += *lenp;
2032 else
2033 *lenp = 0;
2034 return 0;
2035}
2036#endif /* CONFIG_PROC_FS */
2037
d1187ed2
CL
2038static void vmstat_update(struct work_struct *w)
2039{
0eb77e98 2040 if (refresh_cpu_vm_stats(true)) {
7cc36bbd
CL
2041 /*
2042 * Counters were updated so we expect more updates
2043 * to occur in the future. Keep on running the
2044 * update worker thread.
2045 */
ce612879 2046 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
f01f17d3
MH
2047 this_cpu_ptr(&vmstat_work),
2048 round_jiffies_relative(sysctl_stat_interval));
7cc36bbd
CL
2049 }
2050}
2051
2052/*
2053 * Check if the diffs for a certain cpu indicate that
2054 * an update is needed.
2055 */
2056static bool need_update(int cpu)
2057{
2bbd00ae 2058 pg_data_t *last_pgdat = NULL;
7cc36bbd
CL
2059 struct zone *zone;
2060
2061 for_each_populated_zone(zone) {
28f836b6 2062 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
2bbd00ae 2063 struct per_cpu_nodestat *n;
28f836b6 2064
7cc36bbd
CL
2065 /*
2066 * The fast way of checking if there are any vmstat diffs.
7cc36bbd 2067 */
64632fd3 2068 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
7cc36bbd 2069 return true;
f19298b9 2070
2bbd00ae
JW
2071 if (last_pgdat == zone->zone_pgdat)
2072 continue;
2073 last_pgdat = zone->zone_pgdat;
2074 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
64632fd3
ML
2075 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
2076 return true;
7cc36bbd
CL
2077 }
2078 return false;
2079}
2080
7b8da4c7
CL
2081/*
2082 * Switch off vmstat processing and then fold all the remaining differentials
2083 * until the diffs stay at zero. The function is used by NOHZ and can only be
2084 * invoked when tick processing is not active.
2085 */
f01f17d3
MH
2086void quiet_vmstat(void)
2087{
2088 if (system_state != SYSTEM_RUNNING)
2089 return;
2090
7b8da4c7 2091 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
f01f17d3
MH
2092 return;
2093
2094 if (!need_update(smp_processor_id()))
2095 return;
2096
2097 /*
2098 * Just refresh counters and do not care about the pending delayed
2099 * vmstat_update. It doesn't fire that often to matter and canceling
2100 * it would be too expensive from this path.
2101 * vmstat_shepherd will take care about that for us.
2102 */
2103 refresh_cpu_vm_stats(false);
2104}
2105
7cc36bbd
CL
2106/*
2107 * Shepherd worker thread that checks the
2108 * differentials of processors that have their worker
2109 * threads for vm statistics updates disabled because of
2110 * inactivity.
2111 */
2112static void vmstat_shepherd(struct work_struct *w);
2113
0eb77e98 2114static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
7cc36bbd
CL
2115
2116static void vmstat_shepherd(struct work_struct *w)
2117{
2118 int cpu;
2119
7625eccd 2120 cpus_read_lock();
7cc36bbd 2121 /* Check processors whose vmstat worker threads have been disabled */
7b8da4c7 2122 for_each_online_cpu(cpu) {
f01f17d3 2123 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
7cc36bbd 2124
be5e015d
MT
2125 /*
2126 * In kernel users of vmstat counters either require the precise value and
2127 * they are using zone_page_state_snapshot interface or they can live with
2128 * an imprecision as the regular flushing can happen at arbitrary time and
2129 * cumulative error can grow (see calculate_normal_threshold).
2130 *
2131 * From that POV the regular flushing can be postponed for CPUs that have
2132 * been isolated from the kernel interference without critical
2133 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
2134 * for all isolated CPUs to avoid interference with the isolated workload.
2135 */
2136 if (cpu_is_isolated(cpu))
2137 continue;
2138
7b8da4c7 2139 if (!delayed_work_pending(dw) && need_update(cpu))
ce612879 2140 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
fbcc8183
JB
2141
2142 cond_resched();
f01f17d3 2143 }
7625eccd 2144 cpus_read_unlock();
7cc36bbd
CL
2145
2146 schedule_delayed_work(&shepherd,
98f4ebb2 2147 round_jiffies_relative(sysctl_stat_interval));
d1187ed2
CL
2148}
2149
7cc36bbd 2150static void __init start_shepherd_timer(void)
d1187ed2 2151{
7cc36bbd
CL
2152 int cpu;
2153
9fd8fcf1 2154 for_each_possible_cpu(cpu) {
ccde8bd4 2155 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
7cc36bbd
CL
2156 vmstat_update);
2157
9fd8fcf1
KD
2158 /*
2159 * For secondary CPUs during CPU hotplug scenarios,
2160 * vmstat_cpu_online() will enable the work.
2161 * mm/vmstat:online enables and disables vmstat_work
2162 * symmetrically during CPU hotplug events.
2163 */
2164 if (!cpu_online(cpu))
2165 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2166 }
2167
7cc36bbd
CL
2168 schedule_delayed_work(&shepherd,
2169 round_jiffies_relative(sysctl_stat_interval));
d1187ed2
CL
2170}
2171
03e86dba
TC
2172static void __init init_cpu_node_state(void)
2173{
4c501327 2174 int node;
03e86dba 2175
4c501327 2176 for_each_online_node(node) {
b55032f1 2177 if (!cpumask_empty(cpumask_of_node(node)))
4c501327
SAS
2178 node_set_state(node, N_CPU);
2179 }
03e86dba
TC
2180}
2181
5438da97
SAS
2182static int vmstat_cpu_online(unsigned int cpu)
2183{
f69c2e4d
SS
2184 if (vmstat_late_init_done)
2185 refresh_zone_stat_thresholds();
734c1570
OS
2186
2187 if (!node_state(cpu_to_node(cpu), N_CPU)) {
2188 node_set_state(cpu_to_node(cpu), N_CPU);
734c1570 2189 }
9fd8fcf1 2190 enable_delayed_work(&per_cpu(vmstat_work, cpu));
734c1570 2191
5438da97
SAS
2192 return 0;
2193}
2194
2195static int vmstat_cpu_down_prep(unsigned int cpu)
2196{
9fd8fcf1 2197 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
5438da97
SAS
2198 return 0;
2199}
2200
2201static int vmstat_cpu_dead(unsigned int cpu)
807a1bd2 2202{
4c501327 2203 const struct cpumask *node_cpus;
5438da97 2204 int node;
807a1bd2 2205
5438da97
SAS
2206 node = cpu_to_node(cpu);
2207
2208 refresh_zone_stat_thresholds();
4c501327 2209 node_cpus = cpumask_of_node(node);
b55032f1 2210 if (!cpumask_empty(node_cpus))
5438da97 2211 return 0;
807a1bd2
TK
2212
2213 node_clear_state(node, N_CPU);
734c1570 2214
5438da97 2215 return 0;
807a1bd2
TK
2216}
2217
f69c2e4d
SS
2218static int __init vmstat_late_init(void)
2219{
2220 refresh_zone_stat_thresholds();
2221 vmstat_late_init_done = 1;
2222
2223 return 0;
2224}
2225late_initcall(vmstat_late_init);
8f32f7e5 2226#endif
df9ecaba 2227
b8974b89
KY
2228#ifdef CONFIG_PROC_FS
2229static const struct ctl_table vmstat_table[] = {
2230#ifdef CONFIG_SMP
2231 {
2232 .procname = "stat_interval",
2233 .data = &sysctl_stat_interval,
2234 .maxlen = sizeof(sysctl_stat_interval),
2235 .mode = 0644,
2236 .proc_handler = proc_dointvec_jiffies,
2237 },
2238 {
2239 .procname = "stat_refresh",
2240 .data = NULL,
2241 .maxlen = 0,
2242 .mode = 0600,
2243 .proc_handler = vmstat_refresh,
2244 },
2245#endif
2246#ifdef CONFIG_NUMA
2247 {
2248 .procname = "numa_stat",
2249 .data = &sysctl_vm_numa_stat,
2250 .maxlen = sizeof(int),
2251 .mode = 0644,
2252 .proc_handler = sysctl_vm_numa_stat_handler,
2253 .extra1 = SYSCTL_ZERO,
2254 .extra2 = SYSCTL_ONE,
2255 },
2256#endif
2257};
2258#endif
2259
ce612879
MH
2260struct workqueue_struct *mm_percpu_wq;
2261
597b7305 2262void __init init_mm_internals(void)
df9ecaba 2263{
ce612879 2264 int ret __maybe_unused;
5438da97 2265
80d136e1 2266 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
ce612879
MH
2267
2268#ifdef CONFIG_SMP
5438da97
SAS
2269 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2270 NULL, vmstat_cpu_dead);
2271 if (ret < 0)
2272 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2273
2274 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2275 vmstat_cpu_online,
2276 vmstat_cpu_down_prep);
2277 if (ret < 0)
2278 pr_err("vmstat: failed to register 'online' hotplug state\n");
2279
7625eccd 2280 cpus_read_lock();
03e86dba 2281 init_cpu_node_state();
7625eccd 2282 cpus_read_unlock();
d1187ed2 2283
7cc36bbd 2284 start_shepherd_timer();
8f32f7e5
AD
2285#endif
2286#ifdef CONFIG_PROC_FS
fddda2b7 2287 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
abaed011 2288 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
fddda2b7
CH
2289 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2290 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
b8974b89 2291 register_sysctl_init("vm", vmstat_table);
8f32f7e5 2292#endif
df9ecaba 2293}
d7a5752c
MG
2294
2295#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
d7a5752c
MG
2296
2297/*
2298 * Return an index indicating how much of the available free memory is
2299 * unusable for an allocation of the requested size.
2300 */
2301static int unusable_free_index(unsigned int order,
2302 struct contig_page_info *info)
2303{
2304 /* No free memory is interpreted as all free memory is unusable */
2305 if (info->free_pages == 0)
2306 return 1000;
2307
2308 /*
2309 * Index should be a value between 0 and 1. Return a value to 3
2310 * decimal places.
2311 *
2312 * 0 => no fragmentation
2313 * 1 => high fragmentation
2314 */
2315 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2316
2317}
2318
2319static void unusable_show_print(struct seq_file *m,
2320 pg_data_t *pgdat, struct zone *zone)
2321{
2322 unsigned int order;
2323 int index;
2324 struct contig_page_info info;
2325
2326 seq_printf(m, "Node %d, zone %8s ",
2327 pgdat->node_id,
2328 zone->name);
fd377218 2329 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
d7a5752c
MG
2330 fill_contig_page_info(zone, order, &info);
2331 index = unusable_free_index(order, &info);
2332 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2333 }
2334
2335 seq_putc(m, '\n');
2336}
2337
2338/*
2339 * Display unusable free space index
2340 *
2341 * The unusable free space index measures how much of the available free
2342 * memory cannot be used to satisfy an allocation of a given size and is a
2343 * value between 0 and 1. The higher the value, the more of free memory is
2344 * unusable and by implication, the worse the external fragmentation is. This
2345 * can be expressed as a percentage by multiplying by 100.
2346 */
2347static int unusable_show(struct seq_file *m, void *arg)
2348{
2349 pg_data_t *pgdat = (pg_data_t *)arg;
2350
2351 /* check memoryless node */
a47b53c5 2352 if (!node_state(pgdat->node_id, N_MEMORY))
d7a5752c
MG
2353 return 0;
2354
727c080f 2355 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
d7a5752c
MG
2356
2357 return 0;
2358}
2359
01a99560 2360static const struct seq_operations unusable_sops = {
d7a5752c
MG
2361 .start = frag_start,
2362 .next = frag_next,
2363 .stop = frag_stop,
2364 .show = unusable_show,
2365};
2366
01a99560 2367DEFINE_SEQ_ATTRIBUTE(unusable);
d7a5752c 2368
f1a5ab12
MG
2369static void extfrag_show_print(struct seq_file *m,
2370 pg_data_t *pgdat, struct zone *zone)
2371{
2372 unsigned int order;
2373 int index;
2374
2375 /* Alloc on stack as interrupts are disabled for zone walk */
2376 struct contig_page_info info;
2377
2378 seq_printf(m, "Node %d, zone %8s ",
2379 pgdat->node_id,
2380 zone->name);
fd377218 2381 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
f1a5ab12 2382 fill_contig_page_info(zone, order, &info);
56de7263 2383 index = __fragmentation_index(order, &info);
a9970586 2384 seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
f1a5ab12
MG
2385 }
2386
2387 seq_putc(m, '\n');
2388}
2389
2390/*
2391 * Display fragmentation index for orders that allocations would fail for
2392 */
2393static int extfrag_show(struct seq_file *m, void *arg)
2394{
2395 pg_data_t *pgdat = (pg_data_t *)arg;
2396
727c080f 2397 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
f1a5ab12
MG
2398
2399 return 0;
2400}
2401
01a99560 2402static const struct seq_operations extfrag_sops = {
f1a5ab12
MG
2403 .start = frag_start,
2404 .next = frag_next,
2405 .stop = frag_stop,
2406 .show = extfrag_show,
2407};
2408
01a99560 2409DEFINE_SEQ_ATTRIBUTE(extfrag);
f1a5ab12 2410
d7a5752c
MG
2411static int __init extfrag_debug_init(void)
2412{
bde8bd8a
S
2413 struct dentry *extfrag_debug_root;
2414
d7a5752c 2415 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
d7a5752c 2416
d9f7979c 2417 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
01a99560 2418 &unusable_fops);
d7a5752c 2419
d9f7979c 2420 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
01a99560 2421 &extfrag_fops);
f1a5ab12 2422
d7a5752c
MG
2423 return 0;
2424}
2425
2426module_init(extfrag_debug_init);
15995a35 2427
d7a5752c 2428#endif