Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f6ac2354 CL |
2 | #ifndef _LINUX_VMSTAT_H |
3 | #define _LINUX_VMSTAT_H | |
4 | ||
5 | #include <linux/types.h> | |
6 | #include <linux/percpu.h> | |
2244b95a | 7 | #include <linux/mmzone.h> |
f042e707 | 8 | #include <linux/vm_event_item.h> |
60063497 | 9 | #include <linux/atomic.h> |
4518085e | 10 | #include <linux/static_key.h> |
ea426c2a | 11 | #include <linux/mmdebug.h> |
f6ac2354 | 12 | |
c748e134 AB |
13 | extern int sysctl_stat_interval; |
14 | ||
4518085e KW |
15 | #ifdef CONFIG_NUMA |
16 | #define ENABLE_NUMA_STAT 1 | |
17 | #define DISABLE_NUMA_STAT 0 | |
18 | extern int sysctl_vm_numa_stat; | |
19 | DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); | |
32927393 CH |
20 | int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, |
21 | void *buffer, size_t *length, loff_t *ppos); | |
4518085e KW |
22 | #endif |
23 | ||
d51d1e64 SR |
24 | struct reclaim_stat { |
25 | unsigned nr_dirty; | |
26 | unsigned nr_unqueued_dirty; | |
27 | unsigned nr_congested; | |
28 | unsigned nr_writeback; | |
29 | unsigned nr_immediate; | |
96f8bf4f | 30 | unsigned nr_pageout; |
ed017373 | 31 | unsigned nr_activate[ANON_AND_FILE]; |
d51d1e64 SR |
32 | unsigned nr_ref_keep; |
33 | unsigned nr_unmap_fail; | |
1f318a9b | 34 | unsigned nr_lazyfree_fail; |
d51d1e64 SR |
35 | }; |
36 | ||
9d7ea9a2 KK |
37 | enum writeback_stat_item { |
38 | NR_DIRTY_THRESHOLD, | |
39 | NR_DIRTY_BG_THRESHOLD, | |
40 | NR_VM_WRITEBACK_STAT_ITEMS, | |
41 | }; | |
42 | ||
780a0656 AM |
43 | #ifdef CONFIG_VM_EVENT_COUNTERS |
44 | /* | |
45 | * Light weight per cpu counter implementation. | |
46 | * | |
47 | * Counters should only be incremented and no critical kernel component | |
48 | * should rely on the counter values. | |
49 | * | |
50 | * Counters are handled completely inline. On many platforms the code | |
51 | * generated will simply be the increment of a global address. | |
52 | */ | |
53 | ||
f8891e5e CL |
54 | struct vm_event_state { |
55 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
f6ac2354 CL |
56 | }; |
57 | ||
f8891e5e CL |
58 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
59 | ||
293b6a4c CL |
60 | /* |
61 | * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the | |
62 | * local_irq_disable overhead. | |
63 | */ | |
f8891e5e CL |
64 | static inline void __count_vm_event(enum vm_event_item item) |
65 | { | |
293b6a4c | 66 | raw_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
67 | } |
68 | ||
69 | static inline void count_vm_event(enum vm_event_item item) | |
70 | { | |
dd17c8f7 | 71 | this_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
72 | } |
73 | ||
74 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
75 | { | |
293b6a4c | 76 | raw_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
77 | } |
78 | ||
79 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
80 | { | |
dd17c8f7 | 81 | this_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
82 | } |
83 | ||
84 | extern void all_vm_events(unsigned long *); | |
f1cb0879 | 85 | |
f8891e5e CL |
86 | extern void vm_events_fold_cpu(int cpu); |
87 | ||
88 | #else | |
89 | ||
90 | /* Disable counters */ | |
780a0656 AM |
91 | static inline void count_vm_event(enum vm_event_item item) |
92 | { | |
93 | } | |
94 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
95 | { | |
96 | } | |
97 | static inline void __count_vm_event(enum vm_event_item item) | |
98 | { | |
99 | } | |
100 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
101 | { | |
102 | } | |
103 | static inline void all_vm_events(unsigned long *ret) | |
104 | { | |
105 | } | |
106 | static inline void vm_events_fold_cpu(int cpu) | |
107 | { | |
108 | } | |
f8891e5e CL |
109 | |
110 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
111 | ||
03c5a6e1 MG |
112 | #ifdef CONFIG_NUMA_BALANCING |
113 | #define count_vm_numa_event(x) count_vm_event(x) | |
114 | #define count_vm_numa_events(x, y) count_vm_events(x, y) | |
115 | #else | |
116 | #define count_vm_numa_event(x) do {} while (0) | |
3c0ff468 | 117 | #define count_vm_numa_events(x, y) do { (void)(y); } while (0) |
03c5a6e1 MG |
118 | #endif /* CONFIG_NUMA_BALANCING */ |
119 | ||
ec659934 MG |
120 | #ifdef CONFIG_DEBUG_TLBFLUSH |
121 | #define count_vm_tlb_event(x) count_vm_event(x) | |
122 | #define count_vm_tlb_events(x, y) count_vm_events(x, y) | |
123 | #else | |
124 | #define count_vm_tlb_event(x) do {} while (0) | |
125 | #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) | |
126 | #endif | |
127 | ||
16709d1d MG |
128 | #define __count_zid_vm_events(item, zid, delta) \ |
129 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) | |
f6ac2354 | 130 | |
2244b95a | 131 | /* |
75ef7184 | 132 | * Zone and node-based page accounting with per cpu differentials. |
2244b95a | 133 | */ |
75ef7184 MG |
134 | extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; |
135 | extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; | |
f19298b9 | 136 | extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; |
2244b95a | 137 | |
3a321d2a | 138 | #ifdef CONFIG_NUMA |
f19298b9 MG |
139 | static inline void zone_numa_event_add(long x, struct zone *zone, |
140 | enum numa_stat_item item) | |
3a321d2a | 141 | { |
f19298b9 MG |
142 | atomic_long_add(x, &zone->vm_numa_event[item]); |
143 | atomic_long_add(x, &vm_numa_event[item]); | |
3a321d2a KW |
144 | } |
145 | ||
f19298b9 MG |
146 | static inline unsigned long zone_numa_event_state(struct zone *zone, |
147 | enum numa_stat_item item) | |
3a321d2a | 148 | { |
f19298b9 | 149 | return atomic_long_read(&zone->vm_numa_event[item]); |
3a321d2a KW |
150 | } |
151 | ||
f19298b9 MG |
152 | static inline unsigned long |
153 | global_numa_event_state(enum numa_stat_item item) | |
3a321d2a | 154 | { |
f19298b9 | 155 | return atomic_long_read(&vm_numa_event[item]); |
3a321d2a KW |
156 | } |
157 | #endif /* CONFIG_NUMA */ | |
158 | ||
2244b95a CL |
159 | static inline void zone_page_state_add(long x, struct zone *zone, |
160 | enum zone_stat_item item) | |
161 | { | |
162 | atomic_long_add(x, &zone->vm_stat[item]); | |
75ef7184 MG |
163 | atomic_long_add(x, &vm_zone_stat[item]); |
164 | } | |
165 | ||
166 | static inline void node_page_state_add(long x, struct pglist_data *pgdat, | |
167 | enum node_stat_item item) | |
168 | { | |
169 | atomic_long_add(x, &pgdat->vm_stat[item]); | |
170 | atomic_long_add(x, &vm_node_stat[item]); | |
2244b95a CL |
171 | } |
172 | ||
c41f012a | 173 | static inline unsigned long global_zone_page_state(enum zone_stat_item item) |
2244b95a | 174 | { |
75ef7184 MG |
175 | long x = atomic_long_read(&vm_zone_stat[item]); |
176 | #ifdef CONFIG_SMP | |
177 | if (x < 0) | |
178 | x = 0; | |
179 | #endif | |
180 | return x; | |
181 | } | |
182 | ||
ea426c2a RG |
183 | static inline |
184 | unsigned long global_node_page_state_pages(enum node_stat_item item) | |
75ef7184 MG |
185 | { |
186 | long x = atomic_long_read(&vm_node_stat[item]); | |
2244b95a CL |
187 | #ifdef CONFIG_SMP |
188 | if (x < 0) | |
189 | x = 0; | |
190 | #endif | |
191 | return x; | |
192 | } | |
193 | ||
ea426c2a RG |
194 | static inline unsigned long global_node_page_state(enum node_stat_item item) |
195 | { | |
196 | VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); | |
197 | ||
198 | return global_node_page_state_pages(item); | |
199 | } | |
200 | ||
2244b95a CL |
201 | static inline unsigned long zone_page_state(struct zone *zone, |
202 | enum zone_stat_item item) | |
203 | { | |
204 | long x = atomic_long_read(&zone->vm_stat[item]); | |
205 | #ifdef CONFIG_SMP | |
206 | if (x < 0) | |
207 | x = 0; | |
208 | #endif | |
209 | return x; | |
210 | } | |
211 | ||
aa454840 CL |
212 | /* |
213 | * More accurate version that also considers the currently pending | |
214 | * deltas. For that we need to loop over all cpus to find the current | |
215 | * deltas. There is no synchronization so the result cannot be | |
216 | * exactly accurate either. | |
217 | */ | |
218 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | |
219 | enum zone_stat_item item) | |
220 | { | |
221 | long x = atomic_long_read(&zone->vm_stat[item]); | |
222 | ||
223 | #ifdef CONFIG_SMP | |
224 | int cpu; | |
225 | for_each_online_cpu(cpu) | |
28f836b6 | 226 | x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item]; |
aa454840 CL |
227 | |
228 | if (x < 0) | |
229 | x = 0; | |
230 | #endif | |
231 | return x; | |
232 | } | |
233 | ||
2244b95a | 234 | #ifdef CONFIG_NUMA |
3ac44a34 MG |
235 | /* See __count_vm_event comment on why raw_cpu_inc is used. */ |
236 | static inline void | |
237 | __count_numa_event(struct zone *zone, enum numa_stat_item item) | |
238 | { | |
239 | struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; | |
240 | ||
241 | raw_cpu_inc(pzstats->vm_numa_event[item]); | |
242 | } | |
243 | ||
3e23060b MG |
244 | static inline void |
245 | __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) | |
246 | { | |
247 | struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; | |
248 | ||
249 | raw_cpu_add(pzstats->vm_numa_event[item], delta); | |
250 | } | |
251 | ||
75ef7184 | 252 | extern unsigned long sum_zone_node_page_state(int node, |
3a321d2a | 253 | enum zone_stat_item item); |
f19298b9 | 254 | extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); |
75ef7184 MG |
255 | extern unsigned long node_page_state(struct pglist_data *pgdat, |
256 | enum node_stat_item item); | |
ea426c2a RG |
257 | extern unsigned long node_page_state_pages(struct pglist_data *pgdat, |
258 | enum node_stat_item item); | |
f19298b9 | 259 | extern void fold_vm_numa_events(void); |
2244b95a | 260 | #else |
c41f012a | 261 | #define sum_zone_node_page_state(node, item) global_zone_page_state(item) |
75ef7184 | 262 | #define node_page_state(node, item) global_node_page_state(item) |
ea426c2a | 263 | #define node_page_state_pages(node, item) global_node_page_state_pages(item) |
f19298b9 MG |
264 | static inline void fold_vm_numa_events(void) |
265 | { | |
266 | } | |
ca889e6c | 267 | #endif /* CONFIG_NUMA */ |
2244b95a | 268 | |
2244b95a | 269 | #ifdef CONFIG_SMP |
6cdb18ad | 270 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
2244b95a CL |
271 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
272 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
f6ac2354 | 273 | |
75ef7184 MG |
274 | void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); |
275 | void __inc_node_page_state(struct page *, enum node_stat_item); | |
276 | void __dec_node_page_state(struct page *, enum node_stat_item); | |
277 | ||
6cdb18ad | 278 | void mod_zone_page_state(struct zone *, enum zone_stat_item, long); |
2244b95a CL |
279 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
280 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
281 | ||
75ef7184 MG |
282 | void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); |
283 | void inc_node_page_state(struct page *, enum node_stat_item); | |
284 | void dec_node_page_state(struct page *, enum node_stat_item); | |
285 | ||
75ef7184 | 286 | extern void inc_node_state(struct pglist_data *, enum node_stat_item); |
c8785385 | 287 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
75ef7184 | 288 | extern void __inc_node_state(struct pglist_data *, enum node_stat_item); |
c8785385 CL |
289 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
290 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | |
75ef7184 | 291 | extern void __dec_node_state(struct pglist_data *, enum node_stat_item); |
2244b95a | 292 | |
0eb77e98 | 293 | void quiet_vmstat(void); |
2bb921e5 | 294 | void cpu_vm_stats_fold(int cpu); |
a6cccdc3 | 295 | void refresh_zone_stat_thresholds(void); |
b44129b3 | 296 | |
52b6f46b | 297 | struct ctl_table; |
32927393 CH |
298 | int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, |
299 | loff_t *ppos); | |
52b6f46b | 300 | |
28f836b6 | 301 | void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); |
5a883813 | 302 | |
b44129b3 MG |
303 | int calculate_pressure_threshold(struct zone *zone); |
304 | int calculate_normal_threshold(struct zone *zone); | |
305 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, | |
306 | int (*calculate_pressure)(struct zone *)); | |
2244b95a CL |
307 | #else /* CONFIG_SMP */ |
308 | ||
309 | /* | |
310 | * We do not maintain differentials in a single processor configuration. | |
311 | * The functions directly modify the zone and global counters. | |
312 | */ | |
313 | static inline void __mod_zone_page_state(struct zone *zone, | |
6cdb18ad | 314 | enum zone_stat_item item, long delta) |
2244b95a CL |
315 | { |
316 | zone_page_state_add(delta, zone, item); | |
317 | } | |
318 | ||
75ef7184 MG |
319 | static inline void __mod_node_page_state(struct pglist_data *pgdat, |
320 | enum node_stat_item item, int delta) | |
321 | { | |
be458311 | 322 | if (vmstat_item_in_bytes(item)) { |
629484ae JW |
323 | /* |
324 | * Only cgroups use subpage accounting right now; at | |
325 | * the global level, these items still change in | |
326 | * multiples of whole pages. Store them as pages | |
327 | * internally to keep the per-cpu counters compact. | |
328 | */ | |
be458311 RG |
329 | VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
330 | delta >>= PAGE_SHIFT; | |
331 | } | |
332 | ||
75ef7184 MG |
333 | node_page_state_add(delta, pgdat, item); |
334 | } | |
335 | ||
7f4599e9 CL |
336 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
337 | { | |
338 | atomic_long_inc(&zone->vm_stat[item]); | |
75ef7184 MG |
339 | atomic_long_inc(&vm_zone_stat[item]); |
340 | } | |
341 | ||
342 | static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) | |
343 | { | |
344 | atomic_long_inc(&pgdat->vm_stat[item]); | |
345 | atomic_long_inc(&vm_node_stat[item]); | |
7f4599e9 CL |
346 | } |
347 | ||
c8785385 CL |
348 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
349 | { | |
350 | atomic_long_dec(&zone->vm_stat[item]); | |
75ef7184 MG |
351 | atomic_long_dec(&vm_zone_stat[item]); |
352 | } | |
353 | ||
354 | static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) | |
355 | { | |
356 | atomic_long_dec(&pgdat->vm_stat[item]); | |
357 | atomic_long_dec(&vm_node_stat[item]); | |
c8785385 CL |
358 | } |
359 | ||
6a3ed212 JW |
360 | static inline void __inc_zone_page_state(struct page *page, |
361 | enum zone_stat_item item) | |
362 | { | |
363 | __inc_zone_state(page_zone(page), item); | |
364 | } | |
365 | ||
75ef7184 MG |
366 | static inline void __inc_node_page_state(struct page *page, |
367 | enum node_stat_item item) | |
368 | { | |
369 | __inc_node_state(page_pgdat(page), item); | |
370 | } | |
371 | ||
372 | ||
2244b95a CL |
373 | static inline void __dec_zone_page_state(struct page *page, |
374 | enum zone_stat_item item) | |
375 | { | |
57ce36fe | 376 | __dec_zone_state(page_zone(page), item); |
2244b95a CL |
377 | } |
378 | ||
75ef7184 MG |
379 | static inline void __dec_node_page_state(struct page *page, |
380 | enum node_stat_item item) | |
381 | { | |
382 | __dec_node_state(page_pgdat(page), item); | |
383 | } | |
384 | ||
385 | ||
2244b95a CL |
386 | /* |
387 | * We only use atomic operations to update counters. So there is no need to | |
388 | * disable interrupts. | |
389 | */ | |
390 | #define inc_zone_page_state __inc_zone_page_state | |
391 | #define dec_zone_page_state __dec_zone_page_state | |
392 | #define mod_zone_page_state __mod_zone_page_state | |
393 | ||
75ef7184 MG |
394 | #define inc_node_page_state __inc_node_page_state |
395 | #define dec_node_page_state __dec_node_page_state | |
396 | #define mod_node_page_state __mod_node_page_state | |
397 | ||
6a3ed212 | 398 | #define inc_zone_state __inc_zone_state |
75ef7184 | 399 | #define inc_node_state __inc_node_state |
6a3ed212 JW |
400 | #define dec_zone_state __dec_zone_state |
401 | ||
b44129b3 | 402 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
88f5acf8 | 403 | |
a6cccdc3 | 404 | static inline void refresh_zone_stat_thresholds(void) { } |
2bb921e5 | 405 | static inline void cpu_vm_stats_fold(int cpu) { } |
0eb77e98 | 406 | static inline void quiet_vmstat(void) { } |
a6cccdc3 | 407 | |
5a883813 | 408 | static inline void drain_zonestat(struct zone *zone, |
28f836b6 | 409 | struct per_cpu_zonestat *pzstats) { } |
fa25c503 KM |
410 | #endif /* CONFIG_SMP */ |
411 | ||
a53e17e4 MWO |
412 | static inline void __zone_stat_mod_folio(struct folio *folio, |
413 | enum zone_stat_item item, long nr) | |
414 | { | |
415 | __mod_zone_page_state(folio_zone(folio), item, nr); | |
416 | } | |
417 | ||
418 | static inline void __zone_stat_add_folio(struct folio *folio, | |
419 | enum zone_stat_item item) | |
420 | { | |
421 | __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); | |
422 | } | |
423 | ||
424 | static inline void __zone_stat_sub_folio(struct folio *folio, | |
425 | enum zone_stat_item item) | |
426 | { | |
427 | __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); | |
428 | } | |
429 | ||
430 | static inline void zone_stat_mod_folio(struct folio *folio, | |
431 | enum zone_stat_item item, long nr) | |
432 | { | |
433 | mod_zone_page_state(folio_zone(folio), item, nr); | |
434 | } | |
435 | ||
436 | static inline void zone_stat_add_folio(struct folio *folio, | |
437 | enum zone_stat_item item) | |
438 | { | |
439 | mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); | |
440 | } | |
441 | ||
442 | static inline void zone_stat_sub_folio(struct folio *folio, | |
443 | enum zone_stat_item item) | |
444 | { | |
445 | mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); | |
446 | } | |
447 | ||
448 | static inline void __node_stat_mod_folio(struct folio *folio, | |
449 | enum node_stat_item item, long nr) | |
450 | { | |
451 | __mod_node_page_state(folio_pgdat(folio), item, nr); | |
452 | } | |
453 | ||
454 | static inline void __node_stat_add_folio(struct folio *folio, | |
455 | enum node_stat_item item) | |
456 | { | |
457 | __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); | |
458 | } | |
459 | ||
460 | static inline void __node_stat_sub_folio(struct folio *folio, | |
461 | enum node_stat_item item) | |
462 | { | |
463 | __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); | |
464 | } | |
465 | ||
466 | static inline void node_stat_mod_folio(struct folio *folio, | |
467 | enum node_stat_item item, long nr) | |
468 | { | |
469 | mod_node_page_state(folio_pgdat(folio), item, nr); | |
470 | } | |
471 | ||
472 | static inline void node_stat_add_folio(struct folio *folio, | |
473 | enum node_stat_item item) | |
474 | { | |
475 | mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); | |
476 | } | |
477 | ||
478 | static inline void node_stat_sub_folio(struct folio *folio, | |
479 | enum node_stat_item item) | |
480 | { | |
481 | mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); | |
482 | } | |
483 | ||
d1ce749a BZ |
484 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, |
485 | int migratetype) | |
486 | { | |
487 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); | |
488 | if (is_migrate_cma(migratetype)) | |
489 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); | |
490 | } | |
491 | ||
fa25c503 | 492 | extern const char * const vmstat_text[]; |
2244b95a | 493 | |
9d7ea9a2 KK |
494 | static inline const char *zone_stat_name(enum zone_stat_item item) |
495 | { | |
496 | return vmstat_text[item]; | |
497 | } | |
498 | ||
499 | #ifdef CONFIG_NUMA | |
500 | static inline const char *numa_stat_name(enum numa_stat_item item) | |
501 | { | |
502 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
503 | item]; | |
504 | } | |
505 | #endif /* CONFIG_NUMA */ | |
506 | ||
507 | static inline const char *node_stat_name(enum node_stat_item item) | |
508 | { | |
509 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
f19298b9 | 510 | NR_VM_NUMA_EVENT_ITEMS + |
9d7ea9a2 KK |
511 | item]; |
512 | } | |
513 | ||
514 | static inline const char *lru_list_name(enum lru_list lru) | |
515 | { | |
516 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" | |
517 | } | |
518 | ||
519 | static inline const char *writeback_stat_name(enum writeback_stat_item item) | |
520 | { | |
521 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
f19298b9 | 522 | NR_VM_NUMA_EVENT_ITEMS + |
9d7ea9a2 KK |
523 | NR_VM_NODE_STAT_ITEMS + |
524 | item]; | |
525 | } | |
526 | ||
ebc5d83d | 527 | #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) |
9d7ea9a2 KK |
528 | static inline const char *vm_event_name(enum vm_event_item item) |
529 | { | |
530 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + | |
f19298b9 | 531 | NR_VM_NUMA_EVENT_ITEMS + |
9d7ea9a2 KK |
532 | NR_VM_NODE_STAT_ITEMS + |
533 | NR_VM_WRITEBACK_STAT_ITEMS + | |
534 | item]; | |
535 | } | |
ebc5d83d | 536 | #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ |
9d7ea9a2 | 537 | |
c47d5032 SB |
538 | #ifdef CONFIG_MEMCG |
539 | ||
540 | void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, | |
541 | int val); | |
542 | ||
543 | static inline void mod_lruvec_state(struct lruvec *lruvec, | |
544 | enum node_stat_item idx, int val) | |
545 | { | |
546 | unsigned long flags; | |
547 | ||
548 | local_irq_save(flags); | |
549 | __mod_lruvec_state(lruvec, idx, val); | |
550 | local_irq_restore(flags); | |
551 | } | |
552 | ||
553 | void __mod_lruvec_page_state(struct page *page, | |
554 | enum node_stat_item idx, int val); | |
555 | ||
556 | static inline void mod_lruvec_page_state(struct page *page, | |
557 | enum node_stat_item idx, int val) | |
558 | { | |
559 | unsigned long flags; | |
560 | ||
561 | local_irq_save(flags); | |
562 | __mod_lruvec_page_state(page, idx, val); | |
563 | local_irq_restore(flags); | |
564 | } | |
565 | ||
566 | #else | |
567 | ||
568 | static inline void __mod_lruvec_state(struct lruvec *lruvec, | |
569 | enum node_stat_item idx, int val) | |
570 | { | |
571 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | |
572 | } | |
573 | ||
574 | static inline void mod_lruvec_state(struct lruvec *lruvec, | |
575 | enum node_stat_item idx, int val) | |
576 | { | |
577 | mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | |
578 | } | |
579 | ||
580 | static inline void __mod_lruvec_page_state(struct page *page, | |
581 | enum node_stat_item idx, int val) | |
582 | { | |
583 | __mod_node_page_state(page_pgdat(page), idx, val); | |
584 | } | |
585 | ||
586 | static inline void mod_lruvec_page_state(struct page *page, | |
587 | enum node_stat_item idx, int val) | |
588 | { | |
589 | mod_node_page_state(page_pgdat(page), idx, val); | |
590 | } | |
591 | ||
592 | #endif /* CONFIG_MEMCG */ | |
593 | ||
c47d5032 SB |
594 | static inline void __inc_lruvec_page_state(struct page *page, |
595 | enum node_stat_item idx) | |
596 | { | |
597 | __mod_lruvec_page_state(page, idx, 1); | |
598 | } | |
599 | ||
600 | static inline void __dec_lruvec_page_state(struct page *page, | |
601 | enum node_stat_item idx) | |
602 | { | |
603 | __mod_lruvec_page_state(page, idx, -1); | |
604 | } | |
605 | ||
a53e17e4 MWO |
606 | static inline void __lruvec_stat_mod_folio(struct folio *folio, |
607 | enum node_stat_item idx, int val) | |
608 | { | |
609 | __mod_lruvec_page_state(&folio->page, idx, val); | |
610 | } | |
611 | ||
612 | static inline void __lruvec_stat_add_folio(struct folio *folio, | |
613 | enum node_stat_item idx) | |
614 | { | |
615 | __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); | |
616 | } | |
617 | ||
618 | static inline void __lruvec_stat_sub_folio(struct folio *folio, | |
619 | enum node_stat_item idx) | |
620 | { | |
621 | __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); | |
622 | } | |
623 | ||
c47d5032 SB |
624 | static inline void inc_lruvec_page_state(struct page *page, |
625 | enum node_stat_item idx) | |
626 | { | |
627 | mod_lruvec_page_state(page, idx, 1); | |
628 | } | |
629 | ||
630 | static inline void dec_lruvec_page_state(struct page *page, | |
631 | enum node_stat_item idx) | |
632 | { | |
633 | mod_lruvec_page_state(page, idx, -1); | |
634 | } | |
635 | ||
a53e17e4 MWO |
636 | static inline void lruvec_stat_mod_folio(struct folio *folio, |
637 | enum node_stat_item idx, int val) | |
638 | { | |
639 | mod_lruvec_page_state(&folio->page, idx, val); | |
640 | } | |
641 | ||
642 | static inline void lruvec_stat_add_folio(struct folio *folio, | |
643 | enum node_stat_item idx) | |
644 | { | |
645 | lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); | |
646 | } | |
647 | ||
648 | static inline void lruvec_stat_sub_folio(struct folio *folio, | |
649 | enum node_stat_item idx) | |
650 | { | |
651 | lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); | |
652 | } | |
2244b95a | 653 | #endif /* _LINUX_VMSTAT_H */ |