Merge branch 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a
CL
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
f6ac2354 9
4b51d669
CL
10#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
27bf71c2
CL
16#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
3b116300 28
2a1e274a 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
f8891e5e
CL
30
31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
3b116300
AL
41#ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
bbfd28ee
LS
43#endif
44#ifdef CONFIG_UNEVICTABLE_LRU
45 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
46 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
47 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
5344b7e6
NP
48 UNEVICTABLE_PGMLOCKED,
49 UNEVICTABLE_PGMUNLOCKED,
50 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
51 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
985737cf 52 UNEVICTABLE_MLOCKFREED,
3b116300 53#endif
f8891e5e
CL
54 NR_VM_EVENT_ITEMS
55};
56
c748e134
AB
57extern int sysctl_stat_interval;
58
780a0656
AM
59#ifdef CONFIG_VM_EVENT_COUNTERS
60/*
61 * Light weight per cpu counter implementation.
62 *
63 * Counters should only be incremented and no critical kernel component
64 * should rely on the counter values.
65 *
66 * Counters are handled completely inline. On many platforms the code
67 * generated will simply be the increment of a global address.
68 */
69
f8891e5e
CL
70struct vm_event_state {
71 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
72};
73
f8891e5e
CL
74DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
75
76static inline void __count_vm_event(enum vm_event_item item)
77{
38cbcdc0 78 __get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
79}
80
81static inline void count_vm_event(enum vm_event_item item)
82{
38cbcdc0 83 get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
84 put_cpu();
85}
86
87static inline void __count_vm_events(enum vm_event_item item, long delta)
88{
38cbcdc0 89 __get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
90}
91
92static inline void count_vm_events(enum vm_event_item item, long delta)
93{
38cbcdc0 94 get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
95 put_cpu();
96}
97
98extern void all_vm_events(unsigned long *);
e903387f 99#ifdef CONFIG_HOTPLUG
f8891e5e 100extern void vm_events_fold_cpu(int cpu);
e903387f
MD
101#else
102static inline void vm_events_fold_cpu(int cpu)
103{
104}
105#endif
f8891e5e
CL
106
107#else
108
109/* Disable counters */
780a0656
AM
110static inline void count_vm_event(enum vm_event_item item)
111{
112}
113static inline void count_vm_events(enum vm_event_item item, long delta)
114{
115}
116static inline void __count_vm_event(enum vm_event_item item)
117{
118}
119static inline void __count_vm_events(enum vm_event_item item, long delta)
120{
121}
122static inline void all_vm_events(unsigned long *ret)
123{
124}
125static inline void vm_events_fold_cpu(int cpu)
126{
127}
f8891e5e
CL
128
129#endif /* CONFIG_VM_EVENT_COUNTERS */
130
131#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
132 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
133 zone_idx(zone), delta)
f6ac2354 134
2244b95a
CL
135/*
136 * Zone based page accounting with per cpu differentials.
137 */
138extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
139
140static inline void zone_page_state_add(long x, struct zone *zone,
141 enum zone_stat_item item)
142{
143 atomic_long_add(x, &zone->vm_stat[item]);
144 atomic_long_add(x, &vm_stat[item]);
145}
146
147static inline unsigned long global_page_state(enum zone_stat_item item)
148{
149 long x = atomic_long_read(&vm_stat[item]);
150#ifdef CONFIG_SMP
151 if (x < 0)
152 x = 0;
153#endif
154 return x;
155}
156
157static inline unsigned long zone_page_state(struct zone *zone,
158 enum zone_stat_item item)
159{
160 long x = atomic_long_read(&zone->vm_stat[item]);
161#ifdef CONFIG_SMP
162 if (x < 0)
163 x = 0;
164#endif
165 return x;
166}
167
4f98a2fe
RR
168extern unsigned long global_lru_pages(void);
169
170static inline unsigned long zone_lru_pages(struct zone *zone)
171{
172 return (zone_page_state(zone, NR_ACTIVE_ANON)
173 + zone_page_state(zone, NR_ACTIVE_FILE)
174 + zone_page_state(zone, NR_INACTIVE_ANON)
175 + zone_page_state(zone, NR_INACTIVE_FILE));
176}
177
2244b95a
CL
178#ifdef CONFIG_NUMA
179/*
180 * Determine the per node value of a stat item. This function
181 * is called frequently in a NUMA machine, so try to be as
182 * frugal as possible.
183 */
184static inline unsigned long node_page_state(int node,
185 enum zone_stat_item item)
186{
187 struct zone *zones = NODE_DATA(node)->node_zones;
188
189 return
4b51d669
CL
190#ifdef CONFIG_ZONE_DMA
191 zone_page_state(&zones[ZONE_DMA], item) +
192#endif
fb0e7942 193#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
194 zone_page_state(&zones[ZONE_DMA32], item) +
195#endif
2244b95a
CL
196#ifdef CONFIG_HIGHMEM
197 zone_page_state(&zones[ZONE_HIGHMEM], item) +
198#endif
2a1e274a
MG
199 zone_page_state(&zones[ZONE_NORMAL], item) +
200 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 201}
ca889e6c 202
18ea7e71 203extern void zone_statistics(struct zone *, struct zone *);
ca889e6c 204
2244b95a 205#else
ca889e6c 206
2244b95a 207#define node_page_state(node, item) global_page_state(item)
ca889e6c
CL
208#define zone_statistics(_zl,_z) do { } while (0)
209
210#endif /* CONFIG_NUMA */
2244b95a
CL
211
212#define __add_zone_page_state(__z, __i, __d) \
213 __mod_zone_page_state(__z, __i, __d)
214#define __sub_zone_page_state(__z, __i, __d) \
215 __mod_zone_page_state(__z, __i,-(__d))
216
217#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
218#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
219
220static inline void zap_zone_vm_stats(struct zone *zone)
221{
222 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
223}
224
ca889e6c
CL
225extern void inc_zone_state(struct zone *, enum zone_stat_item);
226
2244b95a
CL
227#ifdef CONFIG_SMP
228void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
229void __inc_zone_page_state(struct page *, enum zone_stat_item);
230void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 231
2244b95a
CL
232void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
233void inc_zone_page_state(struct page *, enum zone_stat_item);
234void dec_zone_page_state(struct page *, enum zone_stat_item);
235
236extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
237extern void __inc_zone_state(struct zone *, enum zone_stat_item);
238extern void dec_zone_state(struct zone *, enum zone_stat_item);
239extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a
CL
240
241void refresh_cpu_vm_stats(int);
2244b95a
CL
242#else /* CONFIG_SMP */
243
244/*
245 * We do not maintain differentials in a single processor configuration.
246 * The functions directly modify the zone and global counters.
247 */
248static inline void __mod_zone_page_state(struct zone *zone,
249 enum zone_stat_item item, int delta)
250{
251 zone_page_state_add(delta, zone, item);
252}
253
7f4599e9
CL
254static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
255{
256 atomic_long_inc(&zone->vm_stat[item]);
257 atomic_long_inc(&vm_stat[item]);
258}
259
2244b95a
CL
260static inline void __inc_zone_page_state(struct page *page,
261 enum zone_stat_item item)
262{
7f4599e9 263 __inc_zone_state(page_zone(page), item);
2244b95a
CL
264}
265
c8785385
CL
266static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
267{
268 atomic_long_dec(&zone->vm_stat[item]);
269 atomic_long_dec(&vm_stat[item]);
270}
271
2244b95a
CL
272static inline void __dec_zone_page_state(struct page *page,
273 enum zone_stat_item item)
274{
57ce36fe 275 __dec_zone_state(page_zone(page), item);
2244b95a
CL
276}
277
278/*
279 * We only use atomic operations to update counters. So there is no need to
280 * disable interrupts.
281 */
282#define inc_zone_page_state __inc_zone_page_state
283#define dec_zone_page_state __dec_zone_page_state
284#define mod_zone_page_state __mod_zone_page_state
285
286static inline void refresh_cpu_vm_stats(int cpu) { }
2244b95a
CL
287#endif
288
289#endif /* _LINUX_VMSTAT_H */