Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f6ac2354 CL |
2 | #ifndef _LINUX_VMSTAT_H |
3 | #define _LINUX_VMSTAT_H | |
4 | ||
5 | #include <linux/types.h> | |
6 | #include <linux/percpu.h> | |
2244b95a | 7 | #include <linux/mmzone.h> |
f042e707 | 8 | #include <linux/vm_event_item.h> |
60063497 | 9 | #include <linux/atomic.h> |
4518085e | 10 | #include <linux/static_key.h> |
ea426c2a | 11 | #include <linux/mmdebug.h> |
f6ac2354 | 12 | |
c748e134 AB |
13 | extern int sysctl_stat_interval; |
14 | ||
4518085e KW |
15 | #ifdef CONFIG_NUMA |
16 | #define ENABLE_NUMA_STAT 1 | |
17 | #define DISABLE_NUMA_STAT 0 | |
18 | extern int sysctl_vm_numa_stat; | |
19 | DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); | |
32927393 CH |
20 | int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, |
21 | void *buffer, size_t *length, loff_t *ppos); | |
4518085e KW |
22 | #endif |
23 | ||
d51d1e64 SR |
24 | struct reclaim_stat { |
25 | unsigned nr_dirty; | |
26 | unsigned nr_unqueued_dirty; | |
27 | unsigned nr_congested; | |
28 | unsigned nr_writeback; | |
29 | unsigned nr_immediate; | |
96f8bf4f | 30 | unsigned nr_pageout; |
ed017373 | 31 | unsigned nr_activate[ANON_AND_FILE]; |
d51d1e64 SR |
32 | unsigned nr_ref_keep; |
33 | unsigned nr_unmap_fail; | |
1f318a9b | 34 | unsigned nr_lazyfree_fail; |
d51d1e64 SR |
35 | }; |
36 | ||
9d7ea9a2 KK |
37 | enum writeback_stat_item { |
38 | NR_DIRTY_THRESHOLD, | |
39 | NR_DIRTY_BG_THRESHOLD, | |
40 | NR_VM_WRITEBACK_STAT_ITEMS, | |
41 | }; | |
42 | ||
780a0656 AM |
43 | #ifdef CONFIG_VM_EVENT_COUNTERS |
44 | /* | |
45 | * Light weight per cpu counter implementation. | |
46 | * | |
47 | * Counters should only be incremented and no critical kernel component | |
48 | * should rely on the counter values. | |
49 | * | |
50 | * Counters are handled completely inline. On many platforms the code | |
51 | * generated will simply be the increment of a global address. | |
52 | */ | |
53 | ||
f8891e5e CL |
54 | struct vm_event_state { |
55 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
f6ac2354 CL |
56 | }; |
57 | ||
f8891e5e CL |
58 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
59 | ||
293b6a4c CL |
60 | /* |
61 | * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the | |
62 | * local_irq_disable overhead. | |
63 | */ | |
f8891e5e CL |
64 | static inline void __count_vm_event(enum vm_event_item item) |
65 | { | |
293b6a4c | 66 | raw_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
67 | } |
68 | ||
69 | static inline void count_vm_event(enum vm_event_item item) | |
70 | { | |
dd17c8f7 | 71 | this_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
72 | } |
73 | ||
74 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
75 | { | |
293b6a4c | 76 | raw_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
77 | } |
78 | ||
79 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
80 | { | |
dd17c8f7 | 81 | this_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
82 | } |
83 | ||
84 | extern void all_vm_events(unsigned long *); | |
f1cb0879 | 85 | |
f8891e5e CL |
86 | extern void vm_events_fold_cpu(int cpu); |
87 | ||
88 | #else | |
89 | ||
90 | /* Disable counters */ | |
780a0656 AM |
91 | static inline void count_vm_event(enum vm_event_item item) |
92 | { | |
93 | } | |
94 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
95 | { | |
96 | } | |
97 | static inline void __count_vm_event(enum vm_event_item item) | |
98 | { | |
99 | } | |
100 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
101 | { | |
102 | } | |
103 | static inline void all_vm_events(unsigned long *ret) | |
104 | { | |
105 | } | |
106 | static inline void vm_events_fold_cpu(int cpu) | |
107 | { | |
108 | } | |
f8891e5e CL |
109 | |
110 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
111 | ||
03c5a6e1 MG |
112 | #ifdef CONFIG_NUMA_BALANCING |
113 | #define count_vm_numa_event(x) count_vm_event(x) | |
114 | #define count_vm_numa_events(x, y) count_vm_events(x, y) | |
115 | #else | |
116 | #define count_vm_numa_event(x) do {} while (0) | |
3c0ff468 | 117 | #define count_vm_numa_events(x, y) do { (void)(y); } while (0) |
03c5a6e1 MG |
118 | #endif /* CONFIG_NUMA_BALANCING */ |
119 | ||
ec659934 MG |
120 | #ifdef CONFIG_DEBUG_TLBFLUSH |
121 | #define count_vm_tlb_event(x) count_vm_event(x) | |
122 | #define count_vm_tlb_events(x, y) count_vm_events(x, y) | |
123 | #else | |
124 | #define count_vm_tlb_event(x) do {} while (0) | |
125 | #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) | |
126 | #endif | |
127 | ||
52f23865 SB |
128 | #ifdef CONFIG_PER_VMA_LOCK_STATS |
129 | #define count_vm_vma_lock_event(x) count_vm_event(x) | |
130 | #else | |
131 | #define count_vm_vma_lock_event(x) do {} while (0) | |
132 | #endif | |
133 | ||
16709d1d MG |
134 | #define __count_zid_vm_events(item, zid, delta) \ |
135 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) | |
f6ac2354 | 136 | |
2244b95a | 137 | /* |
75ef7184 | 138 | * Zone and node-based page accounting with per cpu differentials. |
2244b95a | 139 | */ |
75ef7184 MG |
140 | extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; |
141 | extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; | |
f19298b9 | 142 | extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; |
2244b95a | 143 | |
3a321d2a | 144 | #ifdef CONFIG_NUMA |
f19298b9 MG |
145 | static inline void zone_numa_event_add(long x, struct zone *zone, |
146 | enum numa_stat_item item) | |
3a321d2a | 147 | { |
f19298b9 MG |
148 | atomic_long_add(x, &zone->vm_numa_event[item]); |
149 | atomic_long_add(x, &vm_numa_event[item]); | |
3a321d2a KW |
150 | } |
151 | ||
f19298b9 MG |
152 | static inline unsigned long zone_numa_event_state(struct zone *zone, |
153 | enum numa_stat_item item) | |
3a321d2a | 154 | { |
f19298b9 | 155 | return atomic_long_read(&zone->vm_numa_event[item]); |
3a321d2a KW |
156 | } |
157 | ||
f19298b9 MG |
158 | static inline unsigned long |
159 | global_numa_event_state(enum numa_stat_item item) | |
3a321d2a | 160 | { |
f19298b9 | 161 | return atomic_long_read(&vm_numa_event[item]); |
3a321d2a KW |
162 | } |
163 | #endif /* CONFIG_NUMA */ | |
164 | ||
2244b95a CL |
165 | static inline void zone_page_state_add(long x, struct zone *zone, |
166 | enum zone_stat_item item) | |
167 | { | |
168 | atomic_long_add(x, &zone->vm_stat[item]); | |
75ef7184 MG |
169 | atomic_long_add(x, &vm_zone_stat[item]); |
170 | } | |
171 | ||
172 | static inline void node_page_state_add(long x, struct pglist_data *pgdat, | |
173 | enum node_stat_item item) | |
174 | { | |
175 | atomic_long_add(x, &pgdat->vm_stat[item]); | |
176 | atomic_long_add(x, &vm_node_stat[item]); | |
2244b95a CL |
177 | } |
178 | ||
c41f012a | 179 | static inline unsigned long global_zone_page_state(enum zone_stat_item item) |
2244b95a | 180 | { |
75ef7184 MG |
181 | long x = atomic_long_read(&vm_zone_stat[item]); |
182 | #ifdef CONFIG_SMP | |
183 | if (x < 0) | |
184 | x = 0; | |
185 | #endif | |
186 | return x; | |
187 | } | |
188 | ||
ea426c2a RG |
189 | static inline |
190 | unsigned long global_node_page_state_pages(enum node_stat_item item) | |
75ef7184 MG |
191 | { |
192 | long x = atomic_long_read(&vm_node_stat[item]); | |
2244b95a CL |
193 | #ifdef CONFIG_SMP |
194 | if (x < 0) | |
195 | x = 0; | |
196 | #endif | |
197 | return x; | |
198 | } | |
199 | ||
ea426c2a RG |
200 | static inline unsigned long global_node_page_state(enum node_stat_item item) |
201 | { | |
202 | VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); | |
203 | ||
204 | return global_node_page_state_pages(item); | |
205 | } | |
206 | ||
2244b95a CL |
207 | static inline unsigned long zone_page_state(struct zone *zone, |
208 | enum zone_stat_item item) | |
209 | { | |
210 | long x = atomic_long_read(&zone->vm_stat[item]); | |
211 | #ifdef CONFIG_SMP | |
212 | if (x < 0) | |
213 | x = 0; | |
214 | #endif | |
215 | return x; | |
216 | } | |
217 | ||
aa454840 CL |
218 | /* |
219 | * More accurate version that also considers the currently pending | |
220 | * deltas. For that we need to loop over all cpus to find the current | |
221 | * deltas. There is no synchronization so the result cannot be | |
222 | * exactly accurate either. | |
223 | */ | |
224 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | |
225 | enum zone_stat_item item) | |
226 | { | |
227 | long x = atomic_long_read(&zone->vm_stat[item]); | |
228 | ||
229 | #ifdef CONFIG_SMP | |
230 | int cpu; | |
231 | for_each_online_cpu(cpu) | |
28f836b6 | 232 | x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item]; |
aa454840 CL |
233 | |
234 | if (x < 0) | |
235 | x = 0; | |
236 | #endif | |
237 | return x; | |
238 | } | |
239 | ||
2244b95a | 240 | #ifdef CONFIG_NUMA |
3ac44a34 MG |
241 | /* See __count_vm_event comment on why raw_cpu_inc is used. */ |
242 | static inline void | |
243 | __count_numa_event(struct zone *zone, enum numa_stat_item item) | |
244 | { | |
245 | struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; | |
246 | ||
247 | raw_cpu_inc(pzstats->vm_numa_event[item]); | |
248 | } | |
249 | ||
3e23060b MG |
250 | static inline void |
251 | __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) | |
252 | { | |
253 | struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; | |
254 | ||
255 | raw_cpu_add(pzstats->vm_numa_event[item], delta); | |
256 | } | |
257 | ||
75ef7184 | 258 | extern unsigned long sum_zone_node_page_state(int node, |
3a321d2a | 259 | enum zone_stat_item item); |
f19298b9 | 260 | extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); |
75ef7184 MG |
261 | extern unsigned long node_page_state(struct pglist_data *pgdat, |
262 | enum node_stat_item item); | |
ea426c2a RG |
263 | extern unsigned long node_page_state_pages(struct pglist_data *pgdat, |
264 | enum node_stat_item item); | |
f19298b9 | 265 | extern void fold_vm_numa_events(void); |
2244b95a | 266 | #else |
c41f012a | 267 | #define sum_zone_node_page_state(node, item) global_zone_page_state(item) |
75ef7184 | 268 | #define node_page_state(node, item) global_node_page_state(item) |
ea426c2a | 269 | #define node_page_state_pages(node, item) global_node_page_state_pages(item) |
f19298b9 MG |
270 | static inline void fold_vm_numa_events(void) |
271 | { | |
272 | } | |
ca889e6c | 273 | #endif /* CONFIG_NUMA */ |
2244b95a | 274 | |
2244b95a | 275 | #ifdef CONFIG_SMP |
6cdb18ad | 276 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
2244b95a CL |
277 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
278 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
f6ac2354 | 279 | |
75ef7184 MG |
280 | void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); |
281 | void __inc_node_page_state(struct page *, enum node_stat_item); | |
282 | void __dec_node_page_state(struct page *, enum node_stat_item); | |
283 | ||
6cdb18ad | 284 | void mod_zone_page_state(struct zone *, enum zone_stat_item, long); |
2244b95a CL |
285 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
286 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
287 | ||
75ef7184 MG |
288 | void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); |
289 | void inc_node_page_state(struct page *, enum node_stat_item); | |
290 | void dec_node_page_state(struct page *, enum node_stat_item); | |
291 | ||
75ef7184 | 292 | extern void inc_node_state(struct pglist_data *, enum node_stat_item); |
c8785385 | 293 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
75ef7184 | 294 | extern void __inc_node_state(struct pglist_data *, enum node_stat_item); |
c8785385 CL |
295 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
296 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | |
75ef7184 | 297 | extern void __dec_node_state(struct pglist_data *, enum node_stat_item); |
2244b95a | 298 | |
0eb77e98 | 299 | void quiet_vmstat(void); |
2bb921e5 | 300 | void cpu_vm_stats_fold(int cpu); |
a6cccdc3 | 301 | void refresh_zone_stat_thresholds(void); |
b44129b3 | 302 | |
52b6f46b | 303 | struct ctl_table; |
32927393 CH |
304 | int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, |
305 | loff_t *ppos); | |
52b6f46b | 306 | |
28f836b6 | 307 | void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); |
5a883813 | 308 | |
b44129b3 MG |
309 | int calculate_pressure_threshold(struct zone *zone); |
310 | int calculate_normal_threshold(struct zone *zone); | |
311 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, | |
312 | int (*calculate_pressure)(struct zone *)); | |
2244b95a CL |
313 | #else /* CONFIG_SMP */ |
314 | ||
315 | /* | |
316 | * We do not maintain differentials in a single processor configuration. | |
317 | * The functions directly modify the zone and global counters. | |
318 | */ | |
319 | static inline void __mod_zone_page_state(struct zone *zone, | |
6cdb18ad | 320 | enum zone_stat_item item, long delta) |
2244b95a CL |
321 | { |
322 | zone_page_state_add(delta, zone, item); | |
323 | } | |
324 | ||
75ef7184 MG |
325 | static inline void __mod_node_page_state(struct pglist_data *pgdat, |
326 | enum node_stat_item item, int delta) | |
327 | { | |
be458311 | 328 | if (vmstat_item_in_bytes(item)) { |
629484ae JW |
329 | /* |
330 | * Only cgroups use subpage accounting right now; at | |
331 | * the global level, these items still change in | |
332 | * multiples of whole pages. Store them as pages | |
333 | * internally to keep the per-cpu counters compact. | |
334 | */ | |
be458311 RG |
335 | VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
336 | delta >>= PAGE_SHIFT; | |
337 | } | |
338 | ||
75ef7184 MG |
339 | node_page_state_add(delta, pgdat, item); |
340 | } | |
341 | ||
7f4599e9 CL |
342 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
343 | { | |
344 | atomic_long_inc(&zone->vm_stat[item]); | |
75ef7184 MG |
345 | atomic_long_inc(&vm_zone_stat[item]); |
346 | } | |
347 | ||
348 | static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) | |
349 | { | |
350 | atomic_long_inc(&pgdat->vm_stat[item]); | |
351 | atomic_long_inc(&vm_node_stat[item]); | |
7f4599e9 CL |
352 | } |
353 | ||
c8785385 CL |
354 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
355 | { | |
356 | atomic_long_dec(&zone->vm_stat[item]); | |
75ef7184 MG |
357 | atomic_long_dec(&vm_zone_stat[item]); |
358 | } | |
359 | ||
360 | static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) | |
361 | { | |
362 | atomic_long_dec(&pgdat->vm_stat[item]); | |
363 | atomic_long_dec(&vm_node_stat[item]); | |
c8785385 CL |
364 | } |
365 | ||
6a3ed212 JW |
366 | static inline void __inc_zone_page_state(struct page *page, |
367 | enum zone_stat_item item) | |
368 | { | |
369 | __inc_zone_state(page_zone(page), item); | |
370 | } | |
371 | ||
75ef7184 MG |
372 | static inline void __inc_node_page_state(struct page *page, |
373 | enum node_stat_item item) | |
374 | { | |
375 | __inc_node_state(page_pgdat(page), item); | |
376 | } | |
377 | ||
378 | ||
2244b95a CL |
379 | static inline void __dec_zone_page_state(struct page *page, |
380 | enum zone_stat_item item) | |
381 | { | |
57ce36fe | 382 | __dec_zone_state(page_zone(page), item); |
2244b95a CL |
383 | } |
384 | ||
75ef7184 MG |
385 | static inline void __dec_node_page_state(struct page *page, |
386 | enum node_stat_item item) | |
387 | { | |
388 | __dec_node_state(page_pgdat(page), item); | |
389 | } | |
390 | ||
391 | ||
2244b95a CL |
392 | /* |
393 | * We only use atomic operations to update counters. So there is no need to | |
394 | * disable interrupts. | |
395 | */ | |
396 | #define inc_zone_page_state __inc_zone_page_state | |
397 | #define dec_zone_page_state __dec_zone_page_state | |
398 | #define mod_zone_page_state __mod_zone_page_state | |
399 | ||
75ef7184 MG |
400 | #define inc_node_page_state __inc_node_page_state |
401 | #define dec_node_page_state __dec_node_page_state | |
402 | #define mod_node_page_state __mod_node_page_state | |
403 | ||
6a3ed212 | 404 | #define inc_zone_state __inc_zone_state |
75ef7184 | 405 | #define inc_node_state __inc_node_state |
6a3ed212 JW |
406 | #define dec_zone_state __dec_zone_state |
407 | ||
b44129b3 | 408 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
88f5acf8 | 409 | |
a6cccdc3 | 410 | static inline void refresh_zone_stat_thresholds(void) { } |
2bb921e5 | 411 | static inline void cpu_vm_stats_fold(int cpu) { } |
0eb77e98 | 412 | static inline void quiet_vmstat(void) { } |
a6cccdc3 | 413 | |
5a883813 | 414 | static inline void drain_zonestat(struct zone *zone, |
28f836b6 | 415 | struct per_cpu_zonestat *pzstats) { } |
fa25c503 KM |
416 | #endif /* CONFIG_SMP */ |
417 | ||
a53e17e4 MWO |
418 | static inline void __zone_stat_mod_folio(struct folio *folio, |
419 | enum zone_stat_item item, long nr) | |
420 | { | |
421 | __mod_zone_page_state(folio_zone(folio), item, nr); | |
422 | } | |
423 | ||
424 | static inline void __zone_stat_add_folio(struct folio *folio, | |
425 | enum zone_stat_item item) | |
426 | { | |
427 | __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); | |
428 | } | |
429 | ||
430 | static inline void __zone_stat_sub_folio(struct folio *folio, | |
431 | enum zone_stat_item item) | |
432 | { | |
433 | __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); | |
434 | } | |
435 | ||
436 | static inline void zone_stat_mod_folio(struct folio *folio, | |
437 | enum zone_stat_item item, long nr) | |
438 | { | |
439 | mod_zone_page_state(folio_zone(folio), item, nr); | |
440 | } | |
441 | ||
442 | static inline void zone_stat_add_folio(struct folio *folio, | |
443 | enum zone_stat_item item) | |
444 | { | |
445 | mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); | |
446 | } | |
447 | ||
448 | static inline void zone_stat_sub_folio(struct folio *folio, | |
449 | enum zone_stat_item item) | |
450 | { | |
451 | mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); | |
452 | } | |
453 | ||
454 | static inline void __node_stat_mod_folio(struct folio *folio, | |
455 | enum node_stat_item item, long nr) | |
456 | { | |
457 | __mod_node_page_state(folio_pgdat(folio), item, nr); | |
458 | } | |
459 | ||
460 | static inline void __node_stat_add_folio(struct folio *folio, | |
461 | enum node_stat_item item) | |
462 | { | |
463 | __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); | |
464 | } | |
465 | ||
466 | static inline void __node_stat_sub_folio(struct folio *folio, | |
467 | enum node_stat_item item) | |
468 | { | |
469 | __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); | |
470 | } | |
471 | ||
472 | static inline void node_stat_mod_folio(struct folio *folio, | |
473 | enum node_stat_item item, long nr) | |
474 | { | |
475 | mod_node_page_state(folio_pgdat(folio), item, nr); | |
476 | } | |
477 | ||
478 | static inline void node_stat_add_folio(struct folio *folio, | |
479 | enum node_stat_item item) | |
480 | { | |
481 | mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); | |
482 | } | |
483 | ||
484 | static inline void node_stat_sub_folio(struct folio *folio, | |
485 | enum node_stat_item item) | |
486 | { | |
487 | mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); | |
488 | } | |
489 | ||
d1ce749a BZ |
490 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, |
491 | int migratetype) | |
492 | { | |
493 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); | |
494 | if (is_migrate_cma(migratetype)) | |
495 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); | |
496 | } | |
497 | ||
fa25c503 | 498 | extern const char * const vmstat_text[]; |
2244b95a | 499 | |
9d7ea9a2 KK |
500 | static inline const char *zone_stat_name(enum zone_stat_item item) |
501 | { | |
502 | return vmstat_text[item]; | |
503 | } | |
504 | ||
505 | #ifdef CONFIG_NUMA | |
506 | static inline const char *numa_stat_name(enum numa_stat_item item) | |
507 | { | |
508 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
509 | item]; | |
510 | } | |
511 | #endif /* CONFIG_NUMA */ | |
512 | ||
513 | static inline const char *node_stat_name(enum node_stat_item item) | |
514 | { | |
515 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
f19298b9 | 516 | NR_VM_NUMA_EVENT_ITEMS + |
9d7ea9a2 KK |
517 | item]; |
518 | } | |
519 | ||
520 | static inline const char *lru_list_name(enum lru_list lru) | |
521 | { | |
522 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" | |
523 | } | |
524 | ||
525 | static inline const char *writeback_stat_name(enum writeback_stat_item item) | |
526 | { | |
527 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
f19298b9 | 528 | NR_VM_NUMA_EVENT_ITEMS + |
9d7ea9a2 KK |
529 | NR_VM_NODE_STAT_ITEMS + |
530 | item]; | |
531 | } | |
532 | ||
ebc5d83d | 533 | #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) |
9d7ea9a2 KK |
534 | static inline const char *vm_event_name(enum vm_event_item item) |
535 | { | |
536 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
f19298b9 | 537 | NR_VM_NUMA_EVENT_ITEMS + |
9d7ea9a2 KK |
538 | NR_VM_NODE_STAT_ITEMS + |
539 | NR_VM_WRITEBACK_STAT_ITEMS + | |
540 | item]; | |
541 | } | |
ebc5d83d | 542 | #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ |
9d7ea9a2 | 543 | |
c47d5032 SB |
544 | #ifdef CONFIG_MEMCG |
545 | ||
546 | void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, | |
547 | int val); | |
548 | ||
549 | static inline void mod_lruvec_state(struct lruvec *lruvec, | |
550 | enum node_stat_item idx, int val) | |
551 | { | |
552 | unsigned long flags; | |
553 | ||
554 | local_irq_save(flags); | |
555 | __mod_lruvec_state(lruvec, idx, val); | |
556 | local_irq_restore(flags); | |
557 | } | |
558 | ||
559 | void __mod_lruvec_page_state(struct page *page, | |
560 | enum node_stat_item idx, int val); | |
561 | ||
562 | static inline void mod_lruvec_page_state(struct page *page, | |
563 | enum node_stat_item idx, int val) | |
564 | { | |
565 | unsigned long flags; | |
566 | ||
567 | local_irq_save(flags); | |
568 | __mod_lruvec_page_state(page, idx, val); | |
569 | local_irq_restore(flags); | |
570 | } | |
571 | ||
572 | #else | |
573 | ||
574 | static inline void __mod_lruvec_state(struct lruvec *lruvec, | |
575 | enum node_stat_item idx, int val) | |
576 | { | |
577 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | |
578 | } | |
579 | ||
580 | static inline void mod_lruvec_state(struct lruvec *lruvec, | |
581 | enum node_stat_item idx, int val) | |
582 | { | |
583 | mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | |
584 | } | |
585 | ||
586 | static inline void __mod_lruvec_page_state(struct page *page, | |
587 | enum node_stat_item idx, int val) | |
588 | { | |
589 | __mod_node_page_state(page_pgdat(page), idx, val); | |
590 | } | |
591 | ||
592 | static inline void mod_lruvec_page_state(struct page *page, | |
593 | enum node_stat_item idx, int val) | |
594 | { | |
595 | mod_node_page_state(page_pgdat(page), idx, val); | |
596 | } | |
597 | ||
598 | #endif /* CONFIG_MEMCG */ | |
599 | ||
c47d5032 SB |
600 | static inline void __inc_lruvec_page_state(struct page *page, |
601 | enum node_stat_item idx) | |
602 | { | |
603 | __mod_lruvec_page_state(page, idx, 1); | |
604 | } | |
605 | ||
606 | static inline void __dec_lruvec_page_state(struct page *page, | |
607 | enum node_stat_item idx) | |
608 | { | |
609 | __mod_lruvec_page_state(page, idx, -1); | |
610 | } | |
611 | ||
a53e17e4 MWO |
612 | static inline void __lruvec_stat_mod_folio(struct folio *folio, |
613 | enum node_stat_item idx, int val) | |
614 | { | |
615 | __mod_lruvec_page_state(&folio->page, idx, val); | |
616 | } | |
617 | ||
618 | static inline void __lruvec_stat_add_folio(struct folio *folio, | |
619 | enum node_stat_item idx) | |
620 | { | |
621 | __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); | |
622 | } | |
623 | ||
624 | static inline void __lruvec_stat_sub_folio(struct folio *folio, | |
625 | enum node_stat_item idx) | |
626 | { | |
627 | __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); | |
628 | } | |
629 | ||
c47d5032 SB |
630 | static inline void inc_lruvec_page_state(struct page *page, |
631 | enum node_stat_item idx) | |
632 | { | |
633 | mod_lruvec_page_state(page, idx, 1); | |
634 | } | |
635 | ||
636 | static inline void dec_lruvec_page_state(struct page *page, | |
637 | enum node_stat_item idx) | |
638 | { | |
639 | mod_lruvec_page_state(page, idx, -1); | |
640 | } | |
641 | ||
a53e17e4 MWO |
642 | static inline void lruvec_stat_mod_folio(struct folio *folio, |
643 | enum node_stat_item idx, int val) | |
644 | { | |
645 | mod_lruvec_page_state(&folio->page, idx, val); | |
646 | } | |
647 | ||
648 | static inline void lruvec_stat_add_folio(struct folio *folio, | |
649 | enum node_stat_item idx) | |
650 | { | |
651 | lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); | |
652 | } | |
653 | ||
654 | static inline void lruvec_stat_sub_folio(struct folio *folio, | |
655 | enum node_stat_item idx) | |
656 | { | |
657 | lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); | |
658 | } | |
2244b95a | 659 | #endif /* _LINUX_VMSTAT_H */ |