mm: Add folio_pgdat(), folio_zone() and folio_zonenum()
[linux-block.git] / include / linux / vmstat.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
f6ac2354
CL
2#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
2244b95a 7#include <linux/mmzone.h>
f042e707 8#include <linux/vm_event_item.h>
60063497 9#include <linux/atomic.h>
4518085e 10#include <linux/static_key.h>
ea426c2a 11#include <linux/mmdebug.h>
f6ac2354 12
c748e134
AB
13extern int sysctl_stat_interval;
14
4518085e
KW
15#ifdef CONFIG_NUMA
16#define ENABLE_NUMA_STAT 1
17#define DISABLE_NUMA_STAT 0
18extern int sysctl_vm_numa_stat;
19DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
32927393
CH
20int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
21 void *buffer, size_t *length, loff_t *ppos);
4518085e
KW
22#endif
23
d51d1e64
SR
24struct reclaim_stat {
25 unsigned nr_dirty;
26 unsigned nr_unqueued_dirty;
27 unsigned nr_congested;
28 unsigned nr_writeback;
29 unsigned nr_immediate;
96f8bf4f 30 unsigned nr_pageout;
ed017373 31 unsigned nr_activate[ANON_AND_FILE];
d51d1e64
SR
32 unsigned nr_ref_keep;
33 unsigned nr_unmap_fail;
1f318a9b 34 unsigned nr_lazyfree_fail;
d51d1e64
SR
35};
36
9d7ea9a2
KK
37enum writeback_stat_item {
38 NR_DIRTY_THRESHOLD,
39 NR_DIRTY_BG_THRESHOLD,
40 NR_VM_WRITEBACK_STAT_ITEMS,
41};
42
780a0656
AM
43#ifdef CONFIG_VM_EVENT_COUNTERS
44/*
45 * Light weight per cpu counter implementation.
46 *
47 * Counters should only be incremented and no critical kernel component
48 * should rely on the counter values.
49 *
50 * Counters are handled completely inline. On many platforms the code
51 * generated will simply be the increment of a global address.
52 */
53
f8891e5e
CL
54struct vm_event_state {
55 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
56};
57
f8891e5e
CL
58DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
59
293b6a4c
CL
60/*
61 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62 * local_irq_disable overhead.
63 */
f8891e5e
CL
64static inline void __count_vm_event(enum vm_event_item item)
65{
293b6a4c 66 raw_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
67}
68
69static inline void count_vm_event(enum vm_event_item item)
70{
dd17c8f7 71 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
72}
73
74static inline void __count_vm_events(enum vm_event_item item, long delta)
75{
293b6a4c 76 raw_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
77}
78
79static inline void count_vm_events(enum vm_event_item item, long delta)
80{
dd17c8f7 81 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
82}
83
84extern void all_vm_events(unsigned long *);
f1cb0879 85
f8891e5e
CL
86extern void vm_events_fold_cpu(int cpu);
87
88#else
89
90/* Disable counters */
780a0656
AM
91static inline void count_vm_event(enum vm_event_item item)
92{
93}
94static inline void count_vm_events(enum vm_event_item item, long delta)
95{
96}
97static inline void __count_vm_event(enum vm_event_item item)
98{
99}
100static inline void __count_vm_events(enum vm_event_item item, long delta)
101{
102}
103static inline void all_vm_events(unsigned long *ret)
104{
105}
106static inline void vm_events_fold_cpu(int cpu)
107{
108}
f8891e5e
CL
109
110#endif /* CONFIG_VM_EVENT_COUNTERS */
111
03c5a6e1
MG
112#ifdef CONFIG_NUMA_BALANCING
113#define count_vm_numa_event(x) count_vm_event(x)
114#define count_vm_numa_events(x, y) count_vm_events(x, y)
115#else
116#define count_vm_numa_event(x) do {} while (0)
3c0ff468 117#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
118#endif /* CONFIG_NUMA_BALANCING */
119
ec659934
MG
120#ifdef CONFIG_DEBUG_TLBFLUSH
121#define count_vm_tlb_event(x) count_vm_event(x)
122#define count_vm_tlb_events(x, y) count_vm_events(x, y)
123#else
124#define count_vm_tlb_event(x) do {} while (0)
125#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126#endif
127
4f115147
DB
128#ifdef CONFIG_DEBUG_VM_VMACACHE
129#define count_vm_vmacache_event(x) count_vm_event(x)
130#else
131#define count_vm_vmacache_event(x) do {} while (0)
132#endif
133
16709d1d
MG
134#define __count_zid_vm_events(item, zid, delta) \
135 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
f6ac2354 136
2244b95a 137/*
75ef7184 138 * Zone and node-based page accounting with per cpu differentials.
2244b95a 139 */
75ef7184
MG
140extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
141extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
f19298b9 142extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
2244b95a 143
3a321d2a 144#ifdef CONFIG_NUMA
f19298b9
MG
145static inline void zone_numa_event_add(long x, struct zone *zone,
146 enum numa_stat_item item)
3a321d2a 147{
f19298b9
MG
148 atomic_long_add(x, &zone->vm_numa_event[item]);
149 atomic_long_add(x, &vm_numa_event[item]);
3a321d2a
KW
150}
151
f19298b9
MG
152static inline unsigned long zone_numa_event_state(struct zone *zone,
153 enum numa_stat_item item)
3a321d2a 154{
f19298b9 155 return atomic_long_read(&zone->vm_numa_event[item]);
3a321d2a
KW
156}
157
f19298b9
MG
158static inline unsigned long
159global_numa_event_state(enum numa_stat_item item)
3a321d2a 160{
f19298b9 161 return atomic_long_read(&vm_numa_event[item]);
3a321d2a
KW
162}
163#endif /* CONFIG_NUMA */
164
2244b95a
CL
165static inline void zone_page_state_add(long x, struct zone *zone,
166 enum zone_stat_item item)
167{
168 atomic_long_add(x, &zone->vm_stat[item]);
75ef7184
MG
169 atomic_long_add(x, &vm_zone_stat[item]);
170}
171
172static inline void node_page_state_add(long x, struct pglist_data *pgdat,
173 enum node_stat_item item)
174{
175 atomic_long_add(x, &pgdat->vm_stat[item]);
176 atomic_long_add(x, &vm_node_stat[item]);
2244b95a
CL
177}
178
c41f012a 179static inline unsigned long global_zone_page_state(enum zone_stat_item item)
2244b95a 180{
75ef7184
MG
181 long x = atomic_long_read(&vm_zone_stat[item]);
182#ifdef CONFIG_SMP
183 if (x < 0)
184 x = 0;
185#endif
186 return x;
187}
188
ea426c2a
RG
189static inline
190unsigned long global_node_page_state_pages(enum node_stat_item item)
75ef7184
MG
191{
192 long x = atomic_long_read(&vm_node_stat[item]);
2244b95a
CL
193#ifdef CONFIG_SMP
194 if (x < 0)
195 x = 0;
196#endif
197 return x;
198}
199
ea426c2a
RG
200static inline unsigned long global_node_page_state(enum node_stat_item item)
201{
202 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
203
204 return global_node_page_state_pages(item);
205}
206
2244b95a
CL
207static inline unsigned long zone_page_state(struct zone *zone,
208 enum zone_stat_item item)
209{
210 long x = atomic_long_read(&zone->vm_stat[item]);
211#ifdef CONFIG_SMP
212 if (x < 0)
213 x = 0;
214#endif
215 return x;
216}
217
aa454840
CL
218/*
219 * More accurate version that also considers the currently pending
220 * deltas. For that we need to loop over all cpus to find the current
221 * deltas. There is no synchronization so the result cannot be
222 * exactly accurate either.
223 */
224static inline unsigned long zone_page_state_snapshot(struct zone *zone,
225 enum zone_stat_item item)
226{
227 long x = atomic_long_read(&zone->vm_stat[item]);
228
229#ifdef CONFIG_SMP
230 int cpu;
231 for_each_online_cpu(cpu)
28f836b6 232 x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
aa454840
CL
233
234 if (x < 0)
235 x = 0;
236#endif
237 return x;
238}
239
2244b95a 240#ifdef CONFIG_NUMA
3ac44a34
MG
241/* See __count_vm_event comment on why raw_cpu_inc is used. */
242static inline void
243__count_numa_event(struct zone *zone, enum numa_stat_item item)
244{
245 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
246
247 raw_cpu_inc(pzstats->vm_numa_event[item]);
248}
249
3e23060b
MG
250static inline void
251__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
252{
253 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
254
255 raw_cpu_add(pzstats->vm_numa_event[item], delta);
256}
257
75ef7184 258extern unsigned long sum_zone_node_page_state(int node,
3a321d2a 259 enum zone_stat_item item);
f19298b9 260extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
75ef7184
MG
261extern unsigned long node_page_state(struct pglist_data *pgdat,
262 enum node_stat_item item);
ea426c2a
RG
263extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
264 enum node_stat_item item);
f19298b9 265extern void fold_vm_numa_events(void);
2244b95a 266#else
c41f012a 267#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
75ef7184 268#define node_page_state(node, item) global_node_page_state(item)
ea426c2a 269#define node_page_state_pages(node, item) global_node_page_state_pages(item)
f19298b9
MG
270static inline void fold_vm_numa_events(void)
271{
272}
ca889e6c 273#endif /* CONFIG_NUMA */
2244b95a 274
2244b95a 275#ifdef CONFIG_SMP
6cdb18ad 276void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
2244b95a
CL
277void __inc_zone_page_state(struct page *, enum zone_stat_item);
278void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 279
75ef7184
MG
280void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
281void __inc_node_page_state(struct page *, enum node_stat_item);
282void __dec_node_page_state(struct page *, enum node_stat_item);
283
6cdb18ad 284void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
2244b95a
CL
285void inc_zone_page_state(struct page *, enum zone_stat_item);
286void dec_zone_page_state(struct page *, enum zone_stat_item);
287
75ef7184
MG
288void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
289void inc_node_page_state(struct page *, enum node_stat_item);
290void dec_node_page_state(struct page *, enum node_stat_item);
291
75ef7184 292extern void inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385 293extern void __inc_zone_state(struct zone *, enum zone_stat_item);
75ef7184 294extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385
CL
295extern void dec_zone_state(struct zone *, enum zone_stat_item);
296extern void __dec_zone_state(struct zone *, enum zone_stat_item);
75ef7184 297extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
2244b95a 298
0eb77e98 299void quiet_vmstat(void);
2bb921e5 300void cpu_vm_stats_fold(int cpu);
a6cccdc3 301void refresh_zone_stat_thresholds(void);
b44129b3 302
52b6f46b 303struct ctl_table;
32927393
CH
304int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
305 loff_t *ppos);
52b6f46b 306
28f836b6 307void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
5a883813 308
b44129b3
MG
309int calculate_pressure_threshold(struct zone *zone);
310int calculate_normal_threshold(struct zone *zone);
311void set_pgdat_percpu_threshold(pg_data_t *pgdat,
312 int (*calculate_pressure)(struct zone *));
2244b95a
CL
313#else /* CONFIG_SMP */
314
315/*
316 * We do not maintain differentials in a single processor configuration.
317 * The functions directly modify the zone and global counters.
318 */
319static inline void __mod_zone_page_state(struct zone *zone,
6cdb18ad 320 enum zone_stat_item item, long delta)
2244b95a
CL
321{
322 zone_page_state_add(delta, zone, item);
323}
324
75ef7184
MG
325static inline void __mod_node_page_state(struct pglist_data *pgdat,
326 enum node_stat_item item, int delta)
327{
be458311 328 if (vmstat_item_in_bytes(item)) {
629484ae
JW
329 /*
330 * Only cgroups use subpage accounting right now; at
331 * the global level, these items still change in
332 * multiples of whole pages. Store them as pages
333 * internally to keep the per-cpu counters compact.
334 */
be458311
RG
335 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
336 delta >>= PAGE_SHIFT;
337 }
338
75ef7184
MG
339 node_page_state_add(delta, pgdat, item);
340}
341
7f4599e9
CL
342static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
343{
344 atomic_long_inc(&zone->vm_stat[item]);
75ef7184
MG
345 atomic_long_inc(&vm_zone_stat[item]);
346}
347
348static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
349{
350 atomic_long_inc(&pgdat->vm_stat[item]);
351 atomic_long_inc(&vm_node_stat[item]);
7f4599e9
CL
352}
353
c8785385
CL
354static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
355{
356 atomic_long_dec(&zone->vm_stat[item]);
75ef7184
MG
357 atomic_long_dec(&vm_zone_stat[item]);
358}
359
360static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
361{
362 atomic_long_dec(&pgdat->vm_stat[item]);
363 atomic_long_dec(&vm_node_stat[item]);
c8785385
CL
364}
365
6a3ed212
JW
366static inline void __inc_zone_page_state(struct page *page,
367 enum zone_stat_item item)
368{
369 __inc_zone_state(page_zone(page), item);
370}
371
75ef7184
MG
372static inline void __inc_node_page_state(struct page *page,
373 enum node_stat_item item)
374{
375 __inc_node_state(page_pgdat(page), item);
376}
377
378
2244b95a
CL
379static inline void __dec_zone_page_state(struct page *page,
380 enum zone_stat_item item)
381{
57ce36fe 382 __dec_zone_state(page_zone(page), item);
2244b95a
CL
383}
384
75ef7184
MG
385static inline void __dec_node_page_state(struct page *page,
386 enum node_stat_item item)
387{
388 __dec_node_state(page_pgdat(page), item);
389}
390
391
2244b95a
CL
392/*
393 * We only use atomic operations to update counters. So there is no need to
394 * disable interrupts.
395 */
396#define inc_zone_page_state __inc_zone_page_state
397#define dec_zone_page_state __dec_zone_page_state
398#define mod_zone_page_state __mod_zone_page_state
399
75ef7184
MG
400#define inc_node_page_state __inc_node_page_state
401#define dec_node_page_state __dec_node_page_state
402#define mod_node_page_state __mod_node_page_state
403
6a3ed212 404#define inc_zone_state __inc_zone_state
75ef7184 405#define inc_node_state __inc_node_state
6a3ed212
JW
406#define dec_zone_state __dec_zone_state
407
b44129b3 408#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 409
a6cccdc3 410static inline void refresh_zone_stat_thresholds(void) { }
2bb921e5 411static inline void cpu_vm_stats_fold(int cpu) { }
0eb77e98 412static inline void quiet_vmstat(void) { }
a6cccdc3 413
5a883813 414static inline void drain_zonestat(struct zone *zone,
28f836b6 415 struct per_cpu_zonestat *pzstats) { }
fa25c503
KM
416#endif /* CONFIG_SMP */
417
d1ce749a
BZ
418static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
419 int migratetype)
420{
421 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
422 if (is_migrate_cma(migratetype))
423 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
424}
425
fa25c503 426extern const char * const vmstat_text[];
2244b95a 427
9d7ea9a2
KK
428static inline const char *zone_stat_name(enum zone_stat_item item)
429{
430 return vmstat_text[item];
431}
432
433#ifdef CONFIG_NUMA
434static inline const char *numa_stat_name(enum numa_stat_item item)
435{
436 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
437 item];
438}
439#endif /* CONFIG_NUMA */
440
441static inline const char *node_stat_name(enum node_stat_item item)
442{
443 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
f19298b9 444 NR_VM_NUMA_EVENT_ITEMS +
9d7ea9a2
KK
445 item];
446}
447
448static inline const char *lru_list_name(enum lru_list lru)
449{
450 return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
451}
452
453static inline const char *writeback_stat_name(enum writeback_stat_item item)
454{
455 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
f19298b9 456 NR_VM_NUMA_EVENT_ITEMS +
9d7ea9a2
KK
457 NR_VM_NODE_STAT_ITEMS +
458 item];
459}
460
ebc5d83d 461#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
9d7ea9a2
KK
462static inline const char *vm_event_name(enum vm_event_item item)
463{
464 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
f19298b9 465 NR_VM_NUMA_EVENT_ITEMS +
9d7ea9a2
KK
466 NR_VM_NODE_STAT_ITEMS +
467 NR_VM_WRITEBACK_STAT_ITEMS +
468 item];
469}
ebc5d83d 470#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
9d7ea9a2 471
c47d5032
SB
472#ifdef CONFIG_MEMCG
473
474void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
475 int val);
476
477static inline void mod_lruvec_state(struct lruvec *lruvec,
478 enum node_stat_item idx, int val)
479{
480 unsigned long flags;
481
482 local_irq_save(flags);
483 __mod_lruvec_state(lruvec, idx, val);
484 local_irq_restore(flags);
485}
486
487void __mod_lruvec_page_state(struct page *page,
488 enum node_stat_item idx, int val);
489
490static inline void mod_lruvec_page_state(struct page *page,
491 enum node_stat_item idx, int val)
492{
493 unsigned long flags;
494
495 local_irq_save(flags);
496 __mod_lruvec_page_state(page, idx, val);
497 local_irq_restore(flags);
498}
499
500#else
501
502static inline void __mod_lruvec_state(struct lruvec *lruvec,
503 enum node_stat_item idx, int val)
504{
505 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
506}
507
508static inline void mod_lruvec_state(struct lruvec *lruvec,
509 enum node_stat_item idx, int val)
510{
511 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
512}
513
514static inline void __mod_lruvec_page_state(struct page *page,
515 enum node_stat_item idx, int val)
516{
517 __mod_node_page_state(page_pgdat(page), idx, val);
518}
519
520static inline void mod_lruvec_page_state(struct page *page,
521 enum node_stat_item idx, int val)
522{
523 mod_node_page_state(page_pgdat(page), idx, val);
524}
525
526#endif /* CONFIG_MEMCG */
527
1c824a68
JW
528static inline void inc_lruvec_state(struct lruvec *lruvec,
529 enum node_stat_item idx)
c47d5032 530{
1c824a68 531 mod_lruvec_state(lruvec, idx, 1);
c47d5032
SB
532}
533
534static inline void __inc_lruvec_page_state(struct page *page,
535 enum node_stat_item idx)
536{
537 __mod_lruvec_page_state(page, idx, 1);
538}
539
540static inline void __dec_lruvec_page_state(struct page *page,
541 enum node_stat_item idx)
542{
543 __mod_lruvec_page_state(page, idx, -1);
544}
545
c47d5032
SB
546static inline void inc_lruvec_page_state(struct page *page,
547 enum node_stat_item idx)
548{
549 mod_lruvec_page_state(page, idx, 1);
550}
551
552static inline void dec_lruvec_page_state(struct page *page,
553 enum node_stat_item idx)
554{
555 mod_lruvec_page_state(page, idx, -1);
556}
557
2244b95a 558#endif /* _LINUX_VMSTAT_H */