1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
4 #include <linux/preempt.h>
6 #include <linux/cpumask.h>
8 #include <linux/init.h>
10 #include <asm/percpu.h>
12 /* enough to cover all DEFINE_PER_CPUs in modules */
14 #define PERCPU_MODULE_RESERVE (8 << 10)
16 #define PERCPU_MODULE_RESERVE 0
19 #ifndef PERCPU_ENOUGH_ROOM
20 #define PERCPU_ENOUGH_ROOM \
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
26 * Must be an lvalue. Since @var must be a simple identifier,
27 * we force a syntax error here if it isn't.
29 #define get_cpu_var(var) (*({ \
31 &__get_cpu_var(var); }))
34 * The weird & is necessary because sparse considers (void)(var) to be
35 * a direct dereference of percpu variable (var).
37 #define put_cpu_var(var) do { \
44 /* minimum unit size, also is the maximum supported allocation size */
45 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
48 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
49 * back on the first chunk for dynamic percpu allocation if arch is
50 * manually allocating and mapping it for faster access (as a part of
51 * large page mapping for example).
53 * The following values give between one and two pages of free space
54 * after typical minimal boot (2-way SMP, single disk and NIC) with
55 * both defconfig and a distro config on x86_64 and 32. More
56 * intelligent way to determine this would be nice.
58 #if BITS_PER_LONG > 32
59 #define PERCPU_DYNAMIC_RESERVE (20 << 10)
61 #define PERCPU_DYNAMIC_RESERVE (12 << 10)
64 extern void *pcpu_base_addr;
65 extern const unsigned long *pcpu_unit_offsets;
67 struct pcpu_group_info {
68 int nr_units; /* aligned # of units */
69 unsigned long base_offset; /* base address offset */
70 unsigned int *cpu_map; /* unit->cpu map, empty
71 * entries contain NR_CPUS */
74 struct pcpu_alloc_info {
81 size_t __ai_size; /* internal, don't use */
82 int nr_groups; /* 0 if grouping unnecessary */
83 struct pcpu_group_info groups[];
93 extern const char *pcpu_fc_names[PCPU_FC_NR];
95 extern enum pcpu_fc pcpu_chosen_fc;
97 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
99 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
100 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
101 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
103 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
105 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
107 extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
108 size_t reserved_size, ssize_t dyn_size,
110 pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
112 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
115 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
116 extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
118 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
119 pcpu_fc_alloc_fn_t alloc_fn,
120 pcpu_fc_free_fn_t free_fn);
123 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
124 extern int __init pcpu_page_first_chunk(size_t reserved_size,
125 pcpu_fc_alloc_fn_t alloc_fn,
126 pcpu_fc_free_fn_t free_fn,
127 pcpu_fc_populate_pte_fn_t populate_pte_fn);
131 * Use this to get to a cpu's version of the per-cpu object
132 * dynamically allocated. Non-atomic access to the current CPU's
133 * version should probably be combined with get_cpu()/put_cpu().
135 #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
137 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
139 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
140 extern void __init setup_per_cpu_areas(void);
143 #else /* CONFIG_SMP */
145 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
147 static inline void __init setup_per_cpu_areas(void) { }
149 static inline void *pcpu_lpage_remapped(void *kaddr)
154 #endif /* CONFIG_SMP */
156 extern void __percpu *__alloc_percpu(size_t size, size_t align);
157 extern void free_percpu(void __percpu *__pdata);
158 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
160 #define alloc_percpu(type) \
161 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
164 * Optional methods for optimized non-lvalue per-cpu variable access.
166 * @var can be a percpu variable or a field of it and its size should
167 * equal char, int or long. percpu_read() evaluates to a lvalue and
168 * all others to void.
170 * These operations are guaranteed to be atomic w.r.t. preemption.
171 * The generic versions use plain get/put_cpu_var(). Archs are
172 * encouraged to implement single-instruction alternatives which don't
173 * require preemption protection.
176 # define percpu_read(var) \
178 typeof(var) *pr_ptr__ = &(var); \
179 typeof(var) pr_ret__; \
180 pr_ret__ = get_cpu_var(*pr_ptr__); \
181 put_cpu_var(*pr_ptr__); \
186 #define __percpu_generic_to_op(var, val, op) \
188 typeof(var) *pgto_ptr__ = &(var); \
189 get_cpu_var(*pgto_ptr__) op val; \
190 put_cpu_var(*pgto_ptr__); \
194 # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
198 # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
202 # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
206 # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
210 # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
214 # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
218 * Branching function to split up a function into a set of functions that
219 * are called for different scalar sizes of the objects handled.
222 extern void __bad_size_call_parameter(void);
224 #define __pcpu_size_call_return(stem, variable) \
225 ({ typeof(variable) pscr_ret__; \
226 __verify_pcpu_ptr(&(variable)); \
227 switch(sizeof(variable)) { \
228 case 1: pscr_ret__ = stem##1(variable);break; \
229 case 2: pscr_ret__ = stem##2(variable);break; \
230 case 4: pscr_ret__ = stem##4(variable);break; \
231 case 8: pscr_ret__ = stem##8(variable);break; \
233 __bad_size_call_parameter();break; \
238 #define __pcpu_size_call(stem, variable, ...) \
240 __verify_pcpu_ptr(&(variable)); \
241 switch(sizeof(variable)) { \
242 case 1: stem##1(variable, __VA_ARGS__);break; \
243 case 2: stem##2(variable, __VA_ARGS__);break; \
244 case 4: stem##4(variable, __VA_ARGS__);break; \
245 case 8: stem##8(variable, __VA_ARGS__);break; \
247 __bad_size_call_parameter();break; \
252 * Optimized manipulation for memory allocated through the per cpu
253 * allocator or for addresses of per cpu variables.
255 * These operation guarantee exclusivity of access for other operations
256 * on the *same* processor. The assumption is that per cpu data is only
257 * accessed by a single processor instance (the current one).
259 * The first group is used for accesses that must be done in a
260 * preemption safe way since we know that the context is not preempt
261 * safe. Interrupts may occur. If the interrupt modifies the variable
262 * too then RMW actions will not be reliable.
264 * The arch code can provide optimized functions in two ways:
266 * 1. Override the function completely. F.e. define this_cpu_add().
267 * The arch must then ensure that the various scalar format passed
268 * are handled correctly.
270 * 2. Provide functions for certain scalar sizes. F.e. provide
271 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
272 * sized RMW actions. If arch code does not provide operations for
273 * a scalar size then the fallback in the generic code will be
277 #define _this_cpu_generic_read(pcp) \
278 ({ typeof(pcp) ret__; \
280 ret__ = *this_cpu_ptr(&(pcp)); \
285 #ifndef this_cpu_read
286 # ifndef this_cpu_read_1
287 # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
289 # ifndef this_cpu_read_2
290 # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
292 # ifndef this_cpu_read_4
293 # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
295 # ifndef this_cpu_read_8
296 # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
298 # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
301 #define _this_cpu_generic_to_op(pcp, val, op) \
304 *__this_cpu_ptr(&(pcp)) op val; \
308 #ifndef this_cpu_write
309 # ifndef this_cpu_write_1
310 # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
312 # ifndef this_cpu_write_2
313 # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
315 # ifndef this_cpu_write_4
316 # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
318 # ifndef this_cpu_write_8
319 # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
321 # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
325 # ifndef this_cpu_add_1
326 # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
328 # ifndef this_cpu_add_2
329 # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
331 # ifndef this_cpu_add_4
332 # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
334 # ifndef this_cpu_add_8
335 # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
337 # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
341 # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
345 # define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
349 # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
353 # ifndef this_cpu_and_1
354 # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
356 # ifndef this_cpu_and_2
357 # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
359 # ifndef this_cpu_and_4
360 # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
362 # ifndef this_cpu_and_8
363 # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
365 # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
369 # ifndef this_cpu_or_1
370 # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
372 # ifndef this_cpu_or_2
373 # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
375 # ifndef this_cpu_or_4
376 # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
378 # ifndef this_cpu_or_8
379 # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
381 # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
385 # ifndef this_cpu_xor_1
386 # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
388 # ifndef this_cpu_xor_2
389 # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
391 # ifndef this_cpu_xor_4
392 # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
394 # ifndef this_cpu_xor_8
395 # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
397 # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
401 * Generic percpu operations that do not require preemption handling.
402 * Either we do not care about races or the caller has the
403 * responsibility of handling preemptions issues. Arch code can still
404 * override these instructions since the arch per cpu code may be more
405 * efficient and may actually get race freeness for free (that is the
406 * case for x86 for example).
408 * If there is no other protection through preempt disable and/or
409 * disabling interupts then one of these RMW operations can show unexpected
410 * behavior because the execution thread was rescheduled on another processor
411 * or an interrupt occurred and the same percpu variable was modified from
412 * the interrupt context.
414 #ifndef __this_cpu_read
415 # ifndef __this_cpu_read_1
416 # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
418 # ifndef __this_cpu_read_2
419 # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
421 # ifndef __this_cpu_read_4
422 # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
424 # ifndef __this_cpu_read_8
425 # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
427 # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
430 #define __this_cpu_generic_to_op(pcp, val, op) \
432 *__this_cpu_ptr(&(pcp)) op val; \
435 #ifndef __this_cpu_write
436 # ifndef __this_cpu_write_1
437 # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
439 # ifndef __this_cpu_write_2
440 # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
442 # ifndef __this_cpu_write_4
443 # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
445 # ifndef __this_cpu_write_8
446 # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
448 # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
451 #ifndef __this_cpu_add
452 # ifndef __this_cpu_add_1
453 # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
455 # ifndef __this_cpu_add_2
456 # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
458 # ifndef __this_cpu_add_4
459 # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
461 # ifndef __this_cpu_add_8
462 # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
464 # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
467 #ifndef __this_cpu_sub
468 # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
471 #ifndef __this_cpu_inc
472 # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
475 #ifndef __this_cpu_dec
476 # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
479 #ifndef __this_cpu_and
480 # ifndef __this_cpu_and_1
481 # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
483 # ifndef __this_cpu_and_2
484 # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
486 # ifndef __this_cpu_and_4
487 # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
489 # ifndef __this_cpu_and_8
490 # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
492 # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
495 #ifndef __this_cpu_or
496 # ifndef __this_cpu_or_1
497 # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
499 # ifndef __this_cpu_or_2
500 # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
502 # ifndef __this_cpu_or_4
503 # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
505 # ifndef __this_cpu_or_8
506 # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
508 # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
511 #ifndef __this_cpu_xor
512 # ifndef __this_cpu_xor_1
513 # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
515 # ifndef __this_cpu_xor_2
516 # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
518 # ifndef __this_cpu_xor_4
519 # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
521 # ifndef __this_cpu_xor_8
522 # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
524 # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
528 * IRQ safe versions of the per cpu RMW operations. Note that these operations
529 * are *not* safe against modification of the same variable from another
530 * processors (which one gets when using regular atomic operations)
531 . They are guaranteed to be atomic vs. local interrupts and
534 #define irqsafe_cpu_generic_to_op(pcp, val, op) \
536 unsigned long flags; \
537 local_irq_save(flags); \
538 *__this_cpu_ptr(&(pcp)) op val; \
539 local_irq_restore(flags); \
542 #ifndef irqsafe_cpu_add
543 # ifndef irqsafe_cpu_add_1
544 # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
546 # ifndef irqsafe_cpu_add_2
547 # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
549 # ifndef irqsafe_cpu_add_4
550 # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
552 # ifndef irqsafe_cpu_add_8
553 # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
555 # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
558 #ifndef irqsafe_cpu_sub
559 # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
562 #ifndef irqsafe_cpu_inc
563 # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
566 #ifndef irqsafe_cpu_dec
567 # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
570 #ifndef irqsafe_cpu_and
571 # ifndef irqsafe_cpu_and_1
572 # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
574 # ifndef irqsafe_cpu_and_2
575 # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
577 # ifndef irqsafe_cpu_and_4
578 # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
580 # ifndef irqsafe_cpu_and_8
581 # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
583 # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
586 #ifndef irqsafe_cpu_or
587 # ifndef irqsafe_cpu_or_1
588 # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
590 # ifndef irqsafe_cpu_or_2
591 # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
593 # ifndef irqsafe_cpu_or_4
594 # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
596 # ifndef irqsafe_cpu_or_8
597 # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
599 # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
602 #ifndef irqsafe_cpu_xor
603 # ifndef irqsafe_cpu_xor_1
604 # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
606 # ifndef irqsafe_cpu_xor_2
607 # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
609 # ifndef irqsafe_cpu_xor_4
610 # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
612 # ifndef irqsafe_cpu_xor_8
613 # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
615 # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
618 #endif /* __LINUX_PERCPU_H */