Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211
[linux-2.6-block.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a 7#include <linux/mmzone.h>
f042e707 8#include <linux/vm_event_item.h>
60063497 9#include <linux/atomic.h>
f6ac2354 10
c748e134
AB
11extern int sysctl_stat_interval;
12
780a0656
AM
13#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
f8891e5e
CL
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
26};
27
f8891e5e
CL
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
30static inline void __count_vm_event(enum vm_event_item item)
31{
dd17c8f7 32 __this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
33}
34
35static inline void count_vm_event(enum vm_event_item item)
36{
dd17c8f7 37 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
38}
39
40static inline void __count_vm_events(enum vm_event_item item, long delta)
41{
dd17c8f7 42 __this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
43}
44
45static inline void count_vm_events(enum vm_event_item item, long delta)
46{
dd17c8f7 47 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
48}
49
50extern void all_vm_events(unsigned long *);
e903387f 51#ifdef CONFIG_HOTPLUG
f8891e5e 52extern void vm_events_fold_cpu(int cpu);
e903387f
MD
53#else
54static inline void vm_events_fold_cpu(int cpu)
55{
56}
57#endif
f8891e5e
CL
58
59#else
60
61/* Disable counters */
780a0656
AM
62static inline void count_vm_event(enum vm_event_item item)
63{
64}
65static inline void count_vm_events(enum vm_event_item item, long delta)
66{
67}
68static inline void __count_vm_event(enum vm_event_item item)
69{
70}
71static inline void __count_vm_events(enum vm_event_item item, long delta)
72{
73}
74static inline void all_vm_events(unsigned long *ret)
75{
76}
77static inline void vm_events_fold_cpu(int cpu)
78{
79}
f8891e5e
CL
80
81#endif /* CONFIG_VM_EVENT_COUNTERS */
82
03c5a6e1
MG
83#ifdef CONFIG_NUMA_BALANCING
84#define count_vm_numa_event(x) count_vm_event(x)
85#define count_vm_numa_events(x, y) count_vm_events(x, y)
86#else
87#define count_vm_numa_event(x) do {} while (0)
3c0ff468 88#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
89#endif /* CONFIG_NUMA_BALANCING */
90
f8891e5e 91#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
92 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
93 zone_idx(zone), delta)
f6ac2354 94
2244b95a
CL
95/*
96 * Zone based page accounting with per cpu differentials.
97 */
98extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99
100static inline void zone_page_state_add(long x, struct zone *zone,
101 enum zone_stat_item item)
102{
103 atomic_long_add(x, &zone->vm_stat[item]);
104 atomic_long_add(x, &vm_stat[item]);
105}
106
107static inline unsigned long global_page_state(enum zone_stat_item item)
108{
109 long x = atomic_long_read(&vm_stat[item]);
110#ifdef CONFIG_SMP
111 if (x < 0)
112 x = 0;
113#endif
114 return x;
115}
116
117static inline unsigned long zone_page_state(struct zone *zone,
118 enum zone_stat_item item)
119{
120 long x = atomic_long_read(&zone->vm_stat[item]);
121#ifdef CONFIG_SMP
122 if (x < 0)
123 x = 0;
124#endif
125 return x;
126}
127
aa454840
CL
128/*
129 * More accurate version that also considers the currently pending
130 * deltas. For that we need to loop over all cpus to find the current
131 * deltas. There is no synchronization so the result cannot be
132 * exactly accurate either.
133 */
134static inline unsigned long zone_page_state_snapshot(struct zone *zone,
135 enum zone_stat_item item)
136{
137 long x = atomic_long_read(&zone->vm_stat[item]);
138
139#ifdef CONFIG_SMP
140 int cpu;
141 for_each_online_cpu(cpu)
142 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
143
144 if (x < 0)
145 x = 0;
146#endif
147 return x;
148}
149
adea02a1
WF
150extern unsigned long global_reclaimable_pages(void);
151extern unsigned long zone_reclaimable_pages(struct zone *zone);
4f98a2fe 152
2244b95a
CL
153#ifdef CONFIG_NUMA
154/*
155 * Determine the per node value of a stat item. This function
156 * is called frequently in a NUMA machine, so try to be as
157 * frugal as possible.
158 */
159static inline unsigned long node_page_state(int node,
160 enum zone_stat_item item)
161{
162 struct zone *zones = NODE_DATA(node)->node_zones;
163
164 return
4b51d669
CL
165#ifdef CONFIG_ZONE_DMA
166 zone_page_state(&zones[ZONE_DMA], item) +
167#endif
fb0e7942 168#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
169 zone_page_state(&zones[ZONE_DMA32], item) +
170#endif
2244b95a
CL
171#ifdef CONFIG_HIGHMEM
172 zone_page_state(&zones[ZONE_HIGHMEM], item) +
173#endif
2a1e274a
MG
174 zone_page_state(&zones[ZONE_NORMAL], item) +
175 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 176}
ca889e6c 177
78afd561 178extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
ca889e6c 179
2244b95a 180#else
ca889e6c 181
2244b95a 182#define node_page_state(node, item) global_page_state(item)
78afd561 183#define zone_statistics(_zl, _z, gfp) do { } while (0)
ca889e6c
CL
184
185#endif /* CONFIG_NUMA */
2244b95a 186
2244b95a
CL
187#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
188#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
189
ca889e6c
CL
190extern void inc_zone_state(struct zone *, enum zone_stat_item);
191
2244b95a
CL
192#ifdef CONFIG_SMP
193void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
194void __inc_zone_page_state(struct page *, enum zone_stat_item);
195void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 196
2244b95a
CL
197void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
198void inc_zone_page_state(struct page *, enum zone_stat_item);
199void dec_zone_page_state(struct page *, enum zone_stat_item);
200
201extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
202extern void __inc_zone_state(struct zone *, enum zone_stat_item);
203extern void dec_zone_state(struct zone *, enum zone_stat_item);
204extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a
CL
205
206void refresh_cpu_vm_stats(int);
a6cccdc3 207void refresh_zone_stat_thresholds(void);
b44129b3 208
5a883813
MK
209void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
210
b44129b3
MG
211int calculate_pressure_threshold(struct zone *zone);
212int calculate_normal_threshold(struct zone *zone);
213void set_pgdat_percpu_threshold(pg_data_t *pgdat,
214 int (*calculate_pressure)(struct zone *));
2244b95a
CL
215#else /* CONFIG_SMP */
216
217/*
218 * We do not maintain differentials in a single processor configuration.
219 * The functions directly modify the zone and global counters.
220 */
221static inline void __mod_zone_page_state(struct zone *zone,
222 enum zone_stat_item item, int delta)
223{
224 zone_page_state_add(delta, zone, item);
225}
226
7f4599e9
CL
227static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
228{
229 atomic_long_inc(&zone->vm_stat[item]);
230 atomic_long_inc(&vm_stat[item]);
231}
232
2244b95a
CL
233static inline void __inc_zone_page_state(struct page *page,
234 enum zone_stat_item item)
235{
7f4599e9 236 __inc_zone_state(page_zone(page), item);
2244b95a
CL
237}
238
c8785385
CL
239static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
240{
241 atomic_long_dec(&zone->vm_stat[item]);
242 atomic_long_dec(&vm_stat[item]);
243}
244
2244b95a
CL
245static inline void __dec_zone_page_state(struct page *page,
246 enum zone_stat_item item)
247{
57ce36fe 248 __dec_zone_state(page_zone(page), item);
2244b95a
CL
249}
250
251/*
252 * We only use atomic operations to update counters. So there is no need to
253 * disable interrupts.
254 */
255#define inc_zone_page_state __inc_zone_page_state
256#define dec_zone_page_state __dec_zone_page_state
257#define mod_zone_page_state __mod_zone_page_state
258
b44129b3 259#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 260
2244b95a 261static inline void refresh_cpu_vm_stats(int cpu) { }
a6cccdc3
KM
262static inline void refresh_zone_stat_thresholds(void) { }
263
5a883813
MK
264static inline void drain_zonestat(struct zone *zone,
265 struct per_cpu_pageset *pset) { }
fa25c503
KM
266#endif /* CONFIG_SMP */
267
d1ce749a
BZ
268static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
269 int migratetype)
270{
271 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
272 if (is_migrate_cma(migratetype))
273 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
274}
275
fa25c503 276extern const char * const vmstat_text[];
2244b95a
CL
277
278#endif /* _LINUX_VMSTAT_H */