Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f6ac2354 CL |
2 | #ifndef _LINUX_VMSTAT_H |
3 | #define _LINUX_VMSTAT_H | |
4 | ||
5 | #include <linux/types.h> | |
6 | #include <linux/percpu.h> | |
2244b95a | 7 | #include <linux/mmzone.h> |
f042e707 | 8 | #include <linux/vm_event_item.h> |
60063497 | 9 | #include <linux/atomic.h> |
4518085e | 10 | #include <linux/static_key.h> |
f6ac2354 | 11 | |
c748e134 AB |
12 | extern int sysctl_stat_interval; |
13 | ||
4518085e KW |
14 | #ifdef CONFIG_NUMA |
15 | #define ENABLE_NUMA_STAT 1 | |
16 | #define DISABLE_NUMA_STAT 0 | |
17 | extern int sysctl_vm_numa_stat; | |
18 | DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); | |
19 | extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, | |
20 | int write, void __user *buffer, size_t *length, loff_t *ppos); | |
21 | #endif | |
22 | ||
d51d1e64 SR |
23 | struct reclaim_stat { |
24 | unsigned nr_dirty; | |
25 | unsigned nr_unqueued_dirty; | |
26 | unsigned nr_congested; | |
27 | unsigned nr_writeback; | |
28 | unsigned nr_immediate; | |
29 | unsigned nr_activate; | |
30 | unsigned nr_ref_keep; | |
31 | unsigned nr_unmap_fail; | |
32 | }; | |
33 | ||
780a0656 AM |
34 | #ifdef CONFIG_VM_EVENT_COUNTERS |
35 | /* | |
36 | * Light weight per cpu counter implementation. | |
37 | * | |
38 | * Counters should only be incremented and no critical kernel component | |
39 | * should rely on the counter values. | |
40 | * | |
41 | * Counters are handled completely inline. On many platforms the code | |
42 | * generated will simply be the increment of a global address. | |
43 | */ | |
44 | ||
f8891e5e CL |
45 | struct vm_event_state { |
46 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
f6ac2354 CL |
47 | }; |
48 | ||
f8891e5e CL |
49 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
50 | ||
293b6a4c CL |
51 | /* |
52 | * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the | |
53 | * local_irq_disable overhead. | |
54 | */ | |
f8891e5e CL |
55 | static inline void __count_vm_event(enum vm_event_item item) |
56 | { | |
293b6a4c | 57 | raw_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
58 | } |
59 | ||
60 | static inline void count_vm_event(enum vm_event_item item) | |
61 | { | |
dd17c8f7 | 62 | this_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
63 | } |
64 | ||
65 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
66 | { | |
293b6a4c | 67 | raw_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
68 | } |
69 | ||
70 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
71 | { | |
dd17c8f7 | 72 | this_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
73 | } |
74 | ||
75 | extern void all_vm_events(unsigned long *); | |
f1cb0879 | 76 | |
f8891e5e CL |
77 | extern void vm_events_fold_cpu(int cpu); |
78 | ||
79 | #else | |
80 | ||
81 | /* Disable counters */ | |
780a0656 AM |
82 | static inline void count_vm_event(enum vm_event_item item) |
83 | { | |
84 | } | |
85 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
86 | { | |
87 | } | |
88 | static inline void __count_vm_event(enum vm_event_item item) | |
89 | { | |
90 | } | |
91 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
92 | { | |
93 | } | |
94 | static inline void all_vm_events(unsigned long *ret) | |
95 | { | |
96 | } | |
97 | static inline void vm_events_fold_cpu(int cpu) | |
98 | { | |
99 | } | |
f8891e5e CL |
100 | |
101 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
102 | ||
03c5a6e1 MG |
103 | #ifdef CONFIG_NUMA_BALANCING |
104 | #define count_vm_numa_event(x) count_vm_event(x) | |
105 | #define count_vm_numa_events(x, y) count_vm_events(x, y) | |
106 | #else | |
107 | #define count_vm_numa_event(x) do {} while (0) | |
3c0ff468 | 108 | #define count_vm_numa_events(x, y) do { (void)(y); } while (0) |
03c5a6e1 MG |
109 | #endif /* CONFIG_NUMA_BALANCING */ |
110 | ||
ec659934 MG |
111 | #ifdef CONFIG_DEBUG_TLBFLUSH |
112 | #define count_vm_tlb_event(x) count_vm_event(x) | |
113 | #define count_vm_tlb_events(x, y) count_vm_events(x, y) | |
114 | #else | |
115 | #define count_vm_tlb_event(x) do {} while (0) | |
116 | #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) | |
117 | #endif | |
118 | ||
4f115147 DB |
119 | #ifdef CONFIG_DEBUG_VM_VMACACHE |
120 | #define count_vm_vmacache_event(x) count_vm_event(x) | |
121 | #else | |
122 | #define count_vm_vmacache_event(x) do {} while (0) | |
123 | #endif | |
124 | ||
16709d1d MG |
125 | #define __count_zid_vm_events(item, zid, delta) \ |
126 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) | |
f6ac2354 | 127 | |
2244b95a | 128 | /* |
75ef7184 | 129 | * Zone and node-based page accounting with per cpu differentials. |
2244b95a | 130 | */ |
75ef7184 | 131 | extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; |
3a321d2a | 132 | extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; |
75ef7184 | 133 | extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; |
2244b95a | 134 | |
3a321d2a KW |
135 | #ifdef CONFIG_NUMA |
136 | static inline void zone_numa_state_add(long x, struct zone *zone, | |
137 | enum numa_stat_item item) | |
138 | { | |
139 | atomic_long_add(x, &zone->vm_numa_stat[item]); | |
140 | atomic_long_add(x, &vm_numa_stat[item]); | |
141 | } | |
142 | ||
143 | static inline unsigned long global_numa_state(enum numa_stat_item item) | |
144 | { | |
145 | long x = atomic_long_read(&vm_numa_stat[item]); | |
146 | ||
147 | return x; | |
148 | } | |
149 | ||
63803222 | 150 | static inline unsigned long zone_numa_state_snapshot(struct zone *zone, |
3a321d2a KW |
151 | enum numa_stat_item item) |
152 | { | |
153 | long x = atomic_long_read(&zone->vm_numa_stat[item]); | |
63803222 KW |
154 | int cpu; |
155 | ||
156 | for_each_online_cpu(cpu) | |
157 | x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; | |
3a321d2a KW |
158 | |
159 | return x; | |
160 | } | |
161 | #endif /* CONFIG_NUMA */ | |
162 | ||
2244b95a CL |
163 | static inline void zone_page_state_add(long x, struct zone *zone, |
164 | enum zone_stat_item item) | |
165 | { | |
166 | atomic_long_add(x, &zone->vm_stat[item]); | |
75ef7184 MG |
167 | atomic_long_add(x, &vm_zone_stat[item]); |
168 | } | |
169 | ||
170 | static inline void node_page_state_add(long x, struct pglist_data *pgdat, | |
171 | enum node_stat_item item) | |
172 | { | |
173 | atomic_long_add(x, &pgdat->vm_stat[item]); | |
174 | atomic_long_add(x, &vm_node_stat[item]); | |
2244b95a CL |
175 | } |
176 | ||
c41f012a | 177 | static inline unsigned long global_zone_page_state(enum zone_stat_item item) |
2244b95a | 178 | { |
75ef7184 MG |
179 | long x = atomic_long_read(&vm_zone_stat[item]); |
180 | #ifdef CONFIG_SMP | |
181 | if (x < 0) | |
182 | x = 0; | |
183 | #endif | |
184 | return x; | |
185 | } | |
186 | ||
187 | static inline unsigned long global_node_page_state(enum node_stat_item item) | |
188 | { | |
189 | long x = atomic_long_read(&vm_node_stat[item]); | |
2244b95a CL |
190 | #ifdef CONFIG_SMP |
191 | if (x < 0) | |
192 | x = 0; | |
193 | #endif | |
194 | return x; | |
195 | } | |
196 | ||
197 | static inline unsigned long zone_page_state(struct zone *zone, | |
198 | enum zone_stat_item item) | |
199 | { | |
200 | long x = atomic_long_read(&zone->vm_stat[item]); | |
201 | #ifdef CONFIG_SMP | |
202 | if (x < 0) | |
203 | x = 0; | |
204 | #endif | |
205 | return x; | |
206 | } | |
207 | ||
aa454840 CL |
208 | /* |
209 | * More accurate version that also considers the currently pending | |
210 | * deltas. For that we need to loop over all cpus to find the current | |
211 | * deltas. There is no synchronization so the result cannot be | |
212 | * exactly accurate either. | |
213 | */ | |
214 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | |
215 | enum zone_stat_item item) | |
216 | { | |
217 | long x = atomic_long_read(&zone->vm_stat[item]); | |
218 | ||
219 | #ifdef CONFIG_SMP | |
220 | int cpu; | |
221 | for_each_online_cpu(cpu) | |
222 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | |
223 | ||
224 | if (x < 0) | |
225 | x = 0; | |
226 | #endif | |
227 | return x; | |
228 | } | |
229 | ||
2244b95a | 230 | #ifdef CONFIG_NUMA |
3a321d2a | 231 | extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); |
75ef7184 | 232 | extern unsigned long sum_zone_node_page_state(int node, |
3a321d2a KW |
233 | enum zone_stat_item item); |
234 | extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); | |
75ef7184 MG |
235 | extern unsigned long node_page_state(struct pglist_data *pgdat, |
236 | enum node_stat_item item); | |
2244b95a | 237 | #else |
c41f012a | 238 | #define sum_zone_node_page_state(node, item) global_zone_page_state(item) |
75ef7184 | 239 | #define node_page_state(node, item) global_node_page_state(item) |
ca889e6c | 240 | #endif /* CONFIG_NUMA */ |
2244b95a | 241 | |
2244b95a CL |
242 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
243 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | |
75ef7184 MG |
244 | #define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) |
245 | #define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) | |
2244b95a | 246 | |
2244b95a | 247 | #ifdef CONFIG_SMP |
6cdb18ad | 248 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
2244b95a CL |
249 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
250 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
f6ac2354 | 251 | |
75ef7184 MG |
252 | void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); |
253 | void __inc_node_page_state(struct page *, enum node_stat_item); | |
254 | void __dec_node_page_state(struct page *, enum node_stat_item); | |
255 | ||
6cdb18ad | 256 | void mod_zone_page_state(struct zone *, enum zone_stat_item, long); |
2244b95a CL |
257 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
258 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
259 | ||
75ef7184 MG |
260 | void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); |
261 | void inc_node_page_state(struct page *, enum node_stat_item); | |
262 | void dec_node_page_state(struct page *, enum node_stat_item); | |
263 | ||
75ef7184 | 264 | extern void inc_node_state(struct pglist_data *, enum node_stat_item); |
c8785385 | 265 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
75ef7184 | 266 | extern void __inc_node_state(struct pglist_data *, enum node_stat_item); |
c8785385 CL |
267 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
268 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | |
75ef7184 | 269 | extern void __dec_node_state(struct pglist_data *, enum node_stat_item); |
2244b95a | 270 | |
0eb77e98 | 271 | void quiet_vmstat(void); |
2bb921e5 | 272 | void cpu_vm_stats_fold(int cpu); |
a6cccdc3 | 273 | void refresh_zone_stat_thresholds(void); |
b44129b3 | 274 | |
52b6f46b HD |
275 | struct ctl_table; |
276 | int vmstat_refresh(struct ctl_table *, int write, | |
277 | void __user *buffer, size_t *lenp, loff_t *ppos); | |
278 | ||
5a883813 MK |
279 | void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); |
280 | ||
b44129b3 MG |
281 | int calculate_pressure_threshold(struct zone *zone); |
282 | int calculate_normal_threshold(struct zone *zone); | |
283 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, | |
284 | int (*calculate_pressure)(struct zone *)); | |
2244b95a CL |
285 | #else /* CONFIG_SMP */ |
286 | ||
287 | /* | |
288 | * We do not maintain differentials in a single processor configuration. | |
289 | * The functions directly modify the zone and global counters. | |
290 | */ | |
291 | static inline void __mod_zone_page_state(struct zone *zone, | |
6cdb18ad | 292 | enum zone_stat_item item, long delta) |
2244b95a CL |
293 | { |
294 | zone_page_state_add(delta, zone, item); | |
295 | } | |
296 | ||
75ef7184 MG |
297 | static inline void __mod_node_page_state(struct pglist_data *pgdat, |
298 | enum node_stat_item item, int delta) | |
299 | { | |
300 | node_page_state_add(delta, pgdat, item); | |
301 | } | |
302 | ||
7f4599e9 CL |
303 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
304 | { | |
305 | atomic_long_inc(&zone->vm_stat[item]); | |
75ef7184 MG |
306 | atomic_long_inc(&vm_zone_stat[item]); |
307 | } | |
308 | ||
309 | static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) | |
310 | { | |
311 | atomic_long_inc(&pgdat->vm_stat[item]); | |
312 | atomic_long_inc(&vm_node_stat[item]); | |
7f4599e9 CL |
313 | } |
314 | ||
c8785385 CL |
315 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
316 | { | |
317 | atomic_long_dec(&zone->vm_stat[item]); | |
75ef7184 MG |
318 | atomic_long_dec(&vm_zone_stat[item]); |
319 | } | |
320 | ||
321 | static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) | |
322 | { | |
323 | atomic_long_dec(&pgdat->vm_stat[item]); | |
324 | atomic_long_dec(&vm_node_stat[item]); | |
c8785385 CL |
325 | } |
326 | ||
6a3ed212 JW |
327 | static inline void __inc_zone_page_state(struct page *page, |
328 | enum zone_stat_item item) | |
329 | { | |
330 | __inc_zone_state(page_zone(page), item); | |
331 | } | |
332 | ||
75ef7184 MG |
333 | static inline void __inc_node_page_state(struct page *page, |
334 | enum node_stat_item item) | |
335 | { | |
336 | __inc_node_state(page_pgdat(page), item); | |
337 | } | |
338 | ||
339 | ||
2244b95a CL |
340 | static inline void __dec_zone_page_state(struct page *page, |
341 | enum zone_stat_item item) | |
342 | { | |
57ce36fe | 343 | __dec_zone_state(page_zone(page), item); |
2244b95a CL |
344 | } |
345 | ||
75ef7184 MG |
346 | static inline void __dec_node_page_state(struct page *page, |
347 | enum node_stat_item item) | |
348 | { | |
349 | __dec_node_state(page_pgdat(page), item); | |
350 | } | |
351 | ||
352 | ||
2244b95a CL |
353 | /* |
354 | * We only use atomic operations to update counters. So there is no need to | |
355 | * disable interrupts. | |
356 | */ | |
357 | #define inc_zone_page_state __inc_zone_page_state | |
358 | #define dec_zone_page_state __dec_zone_page_state | |
359 | #define mod_zone_page_state __mod_zone_page_state | |
360 | ||
75ef7184 MG |
361 | #define inc_node_page_state __inc_node_page_state |
362 | #define dec_node_page_state __dec_node_page_state | |
363 | #define mod_node_page_state __mod_node_page_state | |
364 | ||
6a3ed212 | 365 | #define inc_zone_state __inc_zone_state |
75ef7184 | 366 | #define inc_node_state __inc_node_state |
6a3ed212 JW |
367 | #define dec_zone_state __dec_zone_state |
368 | ||
b44129b3 | 369 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
88f5acf8 | 370 | |
a6cccdc3 | 371 | static inline void refresh_zone_stat_thresholds(void) { } |
2bb921e5 | 372 | static inline void cpu_vm_stats_fold(int cpu) { } |
0eb77e98 | 373 | static inline void quiet_vmstat(void) { } |
a6cccdc3 | 374 | |
5a883813 MK |
375 | static inline void drain_zonestat(struct zone *zone, |
376 | struct per_cpu_pageset *pset) { } | |
fa25c503 KM |
377 | #endif /* CONFIG_SMP */ |
378 | ||
d1ce749a BZ |
379 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, |
380 | int migratetype) | |
381 | { | |
382 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); | |
383 | if (is_migrate_cma(migratetype)) | |
384 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); | |
385 | } | |
386 | ||
fa25c503 | 387 | extern const char * const vmstat_text[]; |
2244b95a CL |
388 | |
389 | #endif /* _LINUX_VMSTAT_H */ |