percpu: build first chunk allocators selectively
[linux-2.6-block.git] / include / linux / percpu.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
7ff6f082 3
0a3021f4 4#include <linux/preempt.h>
1da177e4
LT
5#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h>
7ff6f082 7#include <linux/cpumask.h>
6a242909 8#include <linux/pfn.h>
7ff6f082 9
1da177e4
LT
10#include <asm/percpu.h>
11
6a242909 12/* enough to cover all DEFINE_PER_CPUs in modules */
b00742d3 13#ifdef CONFIG_MODULES
6a242909 14#define PERCPU_MODULE_RESERVE (8 << 10)
b00742d3 15#else
6a242909 16#define PERCPU_MODULE_RESERVE 0
1da177e4
LT
17#endif
18
6a242909 19#ifndef PERCPU_ENOUGH_ROOM
b00742d3 20#define PERCPU_ENOUGH_ROOM \
6a242909
TH
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
23#endif
b00742d3 24
632bbfee
JB
25/*
26 * Must be an lvalue. Since @var must be a simple identifier,
27 * we force a syntax error here if it isn't.
28 */
29#define get_cpu_var(var) (*({ \
a666ecfb 30 extern int simple_identifier_##var(void); \
632bbfee
JB
31 preempt_disable(); \
32 &__get_cpu_var(var); }))
1da177e4
LT
33#define put_cpu_var(var) preempt_enable()
34
35#ifdef CONFIG_SMP
36
e74e3962 37#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
1da177e4 38
8d408b4b 39/* minimum unit size, also is the maximum supported allocation size */
6a242909 40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
8d408b4b
TH
41
42/*
43 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
6b19b0c2
TH
44 * back on the first chunk for dynamic percpu allocation if arch is
45 * manually allocating and mapping it for faster access (as a part of
46 * large page mapping for example).
8d408b4b 47 *
6b19b0c2
TH
48 * The following values give between one and two pages of free space
49 * after typical minimal boot (2-way SMP, single disk and NIC) with
50 * both defconfig and a distro config on x86_64 and 32. More
51 * intelligent way to determine this would be nice.
8d408b4b 52 */
6b19b0c2
TH
53#if BITS_PER_LONG > 32
54#define PERCPU_DYNAMIC_RESERVE (20 << 10)
55#else
56#define PERCPU_DYNAMIC_RESERVE (12 << 10)
57#endif
8d408b4b 58
fbf59bc9 59extern void *pcpu_base_addr;
2f39e637 60extern const int *pcpu_unit_map;
1da177e4 61
d4b95f80
TH
62typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size);
63typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
64typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
a530b795 65typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
8c4bfc6e 66typedef void (*pcpu_fc_map_fn_t)(void *ptr, size_t size, void *addr);
fbf59bc9 67
ce3141a2 68extern size_t __init pcpu_setup_first_chunk(
edcb4639 69 size_t static_size, size_t reserved_size,
38a6be52 70 ssize_t dyn_size, size_t unit_size,
2f39e637 71 void *base_addr, const int *unit_map);
8d408b4b 72
08fc4580 73#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
66c3a757
TH
74extern ssize_t __init pcpu_embed_first_chunk(
75 size_t static_size, size_t reserved_size,
788e5abc 76 ssize_t dyn_size);
08fc4580 77#endif
66c3a757 78
08fc4580 79#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
00ae4064 80extern ssize_t __init pcpu_page_first_chunk(
d4b95f80
TH
81 size_t static_size, size_t reserved_size,
82 pcpu_fc_alloc_fn_t alloc_fn,
83 pcpu_fc_free_fn_t free_fn,
84 pcpu_fc_populate_pte_fn_t populate_pte_fn);
08fc4580 85#endif
d4b95f80 86
08fc4580 87#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
a530b795
TH
88extern int __init pcpu_lpage_build_unit_map(
89 size_t static_size, size_t reserved_size,
90 ssize_t *dyn_sizep, size_t *unit_sizep,
91 size_t lpage_size, int *unit_map,
92 pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
93
8c4bfc6e
TH
94extern ssize_t __init pcpu_lpage_first_chunk(
95 size_t static_size, size_t reserved_size,
a530b795
TH
96 size_t dyn_size, size_t unit_size,
97 size_t lpage_size, const int *unit_map,
98 int nr_units,
8c4bfc6e
TH
99 pcpu_fc_alloc_fn_t alloc_fn,
100 pcpu_fc_free_fn_t free_fn,
101 pcpu_fc_map_fn_t map_fn);
102
103extern void *pcpu_lpage_remapped(void *kaddr);
104#else
8c4bfc6e
TH
105static inline void *pcpu_lpage_remapped(void *kaddr)
106{
107 return NULL;
108}
109#endif
110
f2a8205c
TH
111/*
112 * Use this to get to a cpu's version of the per-cpu object
113 * dynamically allocated. Non-atomic access to the current CPU's
114 * version should probably be combined with get_cpu()/put_cpu().
115 */
fbf59bc9
TH
116#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
117
edcb4639
TH
118extern void *__alloc_reserved_percpu(size_t size, size_t align);
119
e74e3962 120#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
fbf59bc9
TH
121
122struct percpu_data {
123 void *ptrs[1];
124};
125
2e1483c9
CM
126/* pointer disguising messes up the kmemleak objects tracking */
127#ifndef CONFIG_DEBUG_KMEMLEAK
fbf59bc9 128#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
2e1483c9
CM
129#else
130#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
131#endif
fbf59bc9 132
f2a8205c
TH
133#define per_cpu_ptr(ptr, cpu) \
134({ \
135 struct percpu_data *__p = __percpu_disguise(ptr); \
136 (__typeof__(ptr))__p->ptrs[(cpu)]; \
137})
138
e74e3962 139#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
fbf59bc9 140
f2a8205c
TH
141extern void *__alloc_percpu(size_t size, size_t align);
142extern void free_percpu(void *__pdata);
1da177e4 143
e74e3962
TH
144#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
145extern void __init setup_per_cpu_areas(void);
146#endif
147
1da177e4
LT
148#else /* CONFIG_SMP */
149
b36128c8 150#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
7ff6f082 151
f2a8205c 152static inline void *__alloc_percpu(size_t size, size_t align)
7ff6f082 153{
f2a8205c
TH
154 /*
155 * Can't easily make larger alignment work with kmalloc. WARN
156 * on it. Larger alignment should only be used for module
157 * percpu sections on SMP for which this path isn't used.
158 */
e3176036 159 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
d2b02615 160 return kzalloc(size, GFP_KERNEL);
7ff6f082
MP
161}
162
f2a8205c 163static inline void free_percpu(void *p)
7ff6f082 164{
f2a8205c 165 kfree(p);
1da177e4
LT
166}
167
e74e3962
TH
168static inline void __init setup_per_cpu_areas(void) { }
169
a76761b6
TH
170static inline void *pcpu_lpage_remapped(void *kaddr)
171{
172 return NULL;
173}
174
1da177e4
LT
175#endif /* CONFIG_SMP */
176
313e458f
RR
177#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
178 __alignof__(type))
1da177e4 179
066123a5
TH
180/*
181 * Optional methods for optimized non-lvalue per-cpu variable access.
182 *
183 * @var can be a percpu variable or a field of it and its size should
184 * equal char, int or long. percpu_read() evaluates to a lvalue and
185 * all others to void.
186 *
187 * These operations are guaranteed to be atomic w.r.t. preemption.
188 * The generic versions use plain get/put_cpu_var(). Archs are
189 * encouraged to implement single-instruction alternatives which don't
190 * require preemption protection.
191 */
192#ifndef percpu_read
193# define percpu_read(var) \
194 ({ \
195 typeof(per_cpu_var(var)) __tmp_var__; \
196 __tmp_var__ = get_cpu_var(var); \
197 put_cpu_var(var); \
198 __tmp_var__; \
199 })
200#endif
201
202#define __percpu_generic_to_op(var, val, op) \
203do { \
204 get_cpu_var(var) op val; \
205 put_cpu_var(var); \
206} while (0)
207
208#ifndef percpu_write
209# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
210#endif
211
212#ifndef percpu_add
213# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
214#endif
215
216#ifndef percpu_sub
217# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
218#endif
219
220#ifndef percpu_and
221# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
222#endif
223
224#ifndef percpu_or
225# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
226#endif
227
228#ifndef percpu_xor
229# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
230#endif
231
1da177e4 232#endif /* __LINUX_PERCPU_H */