percpu, module: implement reserved allocation and use it for module percpu variables
[linux-2.6-block.git] / include / linux / percpu.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
7ff6f082 3
0a3021f4 4#include <linux/preempt.h>
1da177e4
LT
5#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h>
7ff6f082 7#include <linux/cpumask.h>
6a242909 8#include <linux/pfn.h>
7ff6f082 9
1da177e4
LT
10#include <asm/percpu.h>
11
d3770449 12#ifndef PER_CPU_BASE_SECTION
5280e004 13#ifdef CONFIG_SMP
0bd74fa8 14#define PER_CPU_BASE_SECTION ".data.percpu"
d3770449
BG
15#else
16#define PER_CPU_BASE_SECTION ".data"
17#endif
18#endif
19
20#ifdef CONFIG_SMP
5280e004 21
44c81433 22#ifdef MODULE
0bd74fa8 23#define PER_CPU_SHARED_ALIGNED_SECTION ""
44c81433 24#else
0bd74fa8 25#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
44c81433 26#endif
0bd74fa8 27#define PER_CPU_FIRST_SECTION ".first"
44c81433 28
0bd74fa8
BG
29#else
30
0bd74fa8
BG
31#define PER_CPU_SHARED_ALIGNED_SECTION ""
32#define PER_CPU_FIRST_SECTION ""
33
34#endif
63cc8c75 35
0bd74fa8
BG
36#define DEFINE_PER_CPU_SECTION(type, name, section) \
37 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
63cc8c75 38 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
0bd74fa8 39
5280e004 40#define DEFINE_PER_CPU(type, name) \
0bd74fa8 41 DEFINE_PER_CPU_SECTION(type, name, "")
5280e004 42
0bd74fa8
BG
43#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
44 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
45 ____cacheline_aligned_in_smp
63cc8c75 46
0bd74fa8
BG
47#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
48 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
49
50#define DEFINE_PER_CPU_FIRST(type, name) \
51 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
5280e004 52
53#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
54#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
55
6a242909 56/* enough to cover all DEFINE_PER_CPUs in modules */
b00742d3 57#ifdef CONFIG_MODULES
6a242909 58#define PERCPU_MODULE_RESERVE (8 << 10)
b00742d3 59#else
6a242909 60#define PERCPU_MODULE_RESERVE 0
1da177e4
LT
61#endif
62
6a242909 63#ifndef PERCPU_ENOUGH_ROOM
b00742d3 64#define PERCPU_ENOUGH_ROOM \
6a242909
TH
65 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
66 PERCPU_MODULE_RESERVE)
67#endif
b00742d3 68
632bbfee
JB
69/*
70 * Must be an lvalue. Since @var must be a simple identifier,
71 * we force a syntax error here if it isn't.
72 */
73#define get_cpu_var(var) (*({ \
a666ecfb 74 extern int simple_identifier_##var(void); \
632bbfee
JB
75 preempt_disable(); \
76 &__get_cpu_var(var); }))
1da177e4
LT
77#define put_cpu_var(var) preempt_enable()
78
79#ifdef CONFIG_SMP
80
fbf59bc9 81#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
1da177e4 82
8d408b4b 83/* minimum unit size, also is the maximum supported allocation size */
6a242909 84#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
8d408b4b
TH
85
86/*
87 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
88 * back on the first chunk if arch is manually allocating and mapping
89 * it for faster access (as a part of large page mapping for example).
90 * Note that dynamic percpu allocator covers both static and dynamic
91 * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
92 *
93 * On typical configuration with modules, the following values leave
94 * about 8k of free space on the first chunk after boot on both x86_32
95 * and 64 when module support is enabled. When module support is
96 * disabled, it's much tighter.
97 */
98#ifndef PERCPU_DYNAMIC_RESERVE
99# if BITS_PER_LONG > 32
100# ifdef CONFIG_MODULES
6a242909 101# define PERCPU_DYNAMIC_RESERVE (24 << 10)
8d408b4b 102# else
6a242909 103# define PERCPU_DYNAMIC_RESERVE (16 << 10)
8d408b4b
TH
104# endif
105# else
106# ifdef CONFIG_MODULES
6a242909 107# define PERCPU_DYNAMIC_RESERVE (16 << 10)
8d408b4b 108# else
6a242909 109# define PERCPU_DYNAMIC_RESERVE (8 << 10)
8d408b4b
TH
110# endif
111# endif
112#endif /* PERCPU_DYNAMIC_RESERVE */
113
fbf59bc9 114extern void *pcpu_base_addr;
1da177e4 115
8d408b4b 116typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
fbf59bc9
TH
117typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
118
8d408b4b 119extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
edcb4639
TH
120 size_t static_size, size_t reserved_size,
121 ssize_t unit_size, ssize_t dyn_size,
122 void *base_addr,
123 pcpu_populate_pte_fn_t populate_pte_fn);
8d408b4b 124
f2a8205c
TH
125/*
126 * Use this to get to a cpu's version of the per-cpu object
127 * dynamically allocated. Non-atomic access to the current CPU's
128 * version should probably be combined with get_cpu()/put_cpu().
129 */
fbf59bc9
TH
130#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
131
edcb4639
TH
132extern void *__alloc_reserved_percpu(size_t size, size_t align);
133
fbf59bc9
TH
134#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
135
136struct percpu_data {
137 void *ptrs[1];
138};
139
140#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
141
f2a8205c
TH
142#define per_cpu_ptr(ptr, cpu) \
143({ \
144 struct percpu_data *__p = __percpu_disguise(ptr); \
145 (__typeof__(ptr))__p->ptrs[(cpu)]; \
146})
147
fbf59bc9
TH
148#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
149
f2a8205c
TH
150extern void *__alloc_percpu(size_t size, size_t align);
151extern void free_percpu(void *__pdata);
1da177e4
LT
152
153#else /* CONFIG_SMP */
154
b36128c8 155#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
7ff6f082 156
f2a8205c 157static inline void *__alloc_percpu(size_t size, size_t align)
7ff6f082 158{
f2a8205c
TH
159 /*
160 * Can't easily make larger alignment work with kmalloc. WARN
161 * on it. Larger alignment should only be used for module
162 * percpu sections on SMP for which this path isn't used.
163 */
e3176036 164 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
d2b02615 165 return kzalloc(size, GFP_KERNEL);
7ff6f082
MP
166}
167
f2a8205c 168static inline void free_percpu(void *p)
7ff6f082 169{
f2a8205c 170 kfree(p);
1da177e4
LT
171}
172
173#endif /* CONFIG_SMP */
174
313e458f
RR
175#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
176 __alignof__(type))
1da177e4
LT
177
178#endif /* __LINUX_PERCPU_H */