[PATCH] Fix longstanding load balancing bug in the scheduler
[linux-2.6-block.git] / include / linux / percpu.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
3#include <linux/spinlock.h> /* For preempt_disable() */
4#include <linux/slab.h> /* For kmalloc() */
5#include <linux/smp.h>
6#include <linux/string.h> /* For memset() */
7#include <asm/percpu.h>
8
9/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
10#ifndef PERCPU_ENOUGH_ROOM
11#define PERCPU_ENOUGH_ROOM 32768
12#endif
13
14/* Must be an lvalue. */
15#define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
16#define put_cpu_var(var) preempt_enable()
17
18#ifdef CONFIG_SMP
19
20struct percpu_data {
21 void *ptrs[NR_CPUS];
1da177e4
LT
22};
23
24/*
25 * Use this to get to a cpu's version of the per-cpu object allocated using
26 * alloc_percpu. Non-atomic access to the current CPU's version should
27 * probably be combined with get_cpu()/put_cpu().
28 */
29#define per_cpu_ptr(ptr, cpu) \
30({ \
31 struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \
32 (__typeof__(ptr))__p->ptrs[(cpu)]; \
33})
34
f9f75005 35extern void *__alloc_percpu(size_t size);
1da177e4
LT
36extern void free_percpu(const void *);
37
38#else /* CONFIG_SMP */
39
66341a90 40#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
1da177e4 41
f9f75005 42static inline void *__alloc_percpu(size_t size)
1da177e4
LT
43{
44 void *ret = kmalloc(size, GFP_KERNEL);
45 if (ret)
46 memset(ret, 0, size);
47 return ret;
48}
49static inline void free_percpu(const void *ptr)
50{
51 kfree(ptr);
52}
53
54#endif /* CONFIG_SMP */
55
56/* Simple wrapper for the common case: zeros memory. */
f9f75005 57#define alloc_percpu(type) ((type *)(__alloc_percpu(sizeof(type))))
1da177e4
LT
58
59#endif /* __LINUX_PERCPU_H */