Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | |
7ff6f082 | 3 | |
0a3021f4 | 4 | #include <linux/preempt.h> |
1da177e4 LT |
5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | |
7ff6f082 MP |
7 | #include <linux/cpumask.h> |
8 | ||
1da177e4 LT |
9 | #include <asm/percpu.h> |
10 | ||
d3770449 | 11 | #ifndef PER_CPU_BASE_SECTION |
5280e004 | 12 | #ifdef CONFIG_SMP |
0bd74fa8 | 13 | #define PER_CPU_BASE_SECTION ".data.percpu" |
d3770449 BG |
14 | #else |
15 | #define PER_CPU_BASE_SECTION ".data" | |
16 | #endif | |
17 | #endif | |
18 | ||
19 | #ifdef CONFIG_SMP | |
5280e004 | 20 | |
44c81433 | 21 | #ifdef MODULE |
0bd74fa8 | 22 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
44c81433 | 23 | #else |
0bd74fa8 | 24 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" |
44c81433 | 25 | #endif |
0bd74fa8 | 26 | #define PER_CPU_FIRST_SECTION ".first" |
44c81433 | 27 | |
0bd74fa8 BG |
28 | #else |
29 | ||
0bd74fa8 BG |
30 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
31 | #define PER_CPU_FIRST_SECTION "" | |
32 | ||
33 | #endif | |
63cc8c75 | 34 | |
0bd74fa8 BG |
35 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ |
36 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | |
63cc8c75 | 37 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
0bd74fa8 | 38 | |
5280e004 | 39 | #define DEFINE_PER_CPU(type, name) \ |
0bd74fa8 | 40 | DEFINE_PER_CPU_SECTION(type, name, "") |
5280e004 | 41 | |
0bd74fa8 BG |
42 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
43 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | |
44 | ____cacheline_aligned_in_smp | |
63cc8c75 | 45 | |
0bd74fa8 BG |
46 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
47 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") | |
48 | ||
49 | #define DEFINE_PER_CPU_FIRST(type, name) \ | |
50 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | |
5280e004 | 51 | |
52 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | |
53 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | |
54 | ||
1da177e4 LT |
55 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ |
56 | #ifndef PERCPU_ENOUGH_ROOM | |
b00742d3 JF |
57 | #ifdef CONFIG_MODULES |
58 | #define PERCPU_MODULE_RESERVE 8192 | |
59 | #else | |
60 | #define PERCPU_MODULE_RESERVE 0 | |
1da177e4 LT |
61 | #endif |
62 | ||
b00742d3 JF |
63 | #define PERCPU_ENOUGH_ROOM \ |
64 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | |
65 | #endif /* PERCPU_ENOUGH_ROOM */ | |
66 | ||
632bbfee JB |
67 | /* |
68 | * Must be an lvalue. Since @var must be a simple identifier, | |
69 | * we force a syntax error here if it isn't. | |
70 | */ | |
71 | #define get_cpu_var(var) (*({ \ | |
a666ecfb | 72 | extern int simple_identifier_##var(void); \ |
632bbfee JB |
73 | preempt_disable(); \ |
74 | &__get_cpu_var(var); })) | |
1da177e4 LT |
75 | #define put_cpu_var(var) preempt_enable() |
76 | ||
77 | #ifdef CONFIG_SMP | |
78 | ||
fbf59bc9 | 79 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA |
1da177e4 | 80 | |
8d408b4b TH |
81 | /* minimum unit size, also is the maximum supported allocation size */ |
82 | #define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT) | |
83 | ||
84 | /* | |
85 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | |
86 | * back on the first chunk if arch is manually allocating and mapping | |
87 | * it for faster access (as a part of large page mapping for example). | |
88 | * Note that dynamic percpu allocator covers both static and dynamic | |
89 | * areas, so these values are bigger than PERCPU_MODULE_RESERVE. | |
90 | * | |
91 | * On typical configuration with modules, the following values leave | |
92 | * about 8k of free space on the first chunk after boot on both x86_32 | |
93 | * and 64 when module support is enabled. When module support is | |
94 | * disabled, it's much tighter. | |
95 | */ | |
96 | #ifndef PERCPU_DYNAMIC_RESERVE | |
97 | # if BITS_PER_LONG > 32 | |
98 | # ifdef CONFIG_MODULES | |
99 | # define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT) | |
100 | # else | |
101 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) | |
102 | # endif | |
103 | # else | |
104 | # ifdef CONFIG_MODULES | |
105 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) | |
106 | # else | |
107 | # define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT) | |
108 | # endif | |
109 | # endif | |
110 | #endif /* PERCPU_DYNAMIC_RESERVE */ | |
111 | ||
fbf59bc9 | 112 | extern void *pcpu_base_addr; |
1da177e4 | 113 | |
8d408b4b | 114 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); |
fbf59bc9 TH |
115 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); |
116 | ||
8d408b4b TH |
117 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, |
118 | size_t static_size, size_t unit_size, | |
119 | size_t free_size, void *base_addr, | |
120 | pcpu_populate_pte_fn_t populate_pte_fn); | |
121 | ||
f2a8205c TH |
122 | /* |
123 | * Use this to get to a cpu's version of the per-cpu object | |
124 | * dynamically allocated. Non-atomic access to the current CPU's | |
125 | * version should probably be combined with get_cpu()/put_cpu(). | |
126 | */ | |
fbf59bc9 TH |
127 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
128 | ||
129 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | |
130 | ||
131 | struct percpu_data { | |
132 | void *ptrs[1]; | |
133 | }; | |
134 | ||
135 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | |
136 | ||
f2a8205c TH |
137 | #define per_cpu_ptr(ptr, cpu) \ |
138 | ({ \ | |
139 | struct percpu_data *__p = __percpu_disguise(ptr); \ | |
140 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | |
141 | }) | |
142 | ||
fbf59bc9 TH |
143 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
144 | ||
f2a8205c TH |
145 | extern void *__alloc_percpu(size_t size, size_t align); |
146 | extern void free_percpu(void *__pdata); | |
1da177e4 LT |
147 | |
148 | #else /* CONFIG_SMP */ | |
149 | ||
b36128c8 | 150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
7ff6f082 | 151 | |
f2a8205c | 152 | static inline void *__alloc_percpu(size_t size, size_t align) |
7ff6f082 | 153 | { |
f2a8205c TH |
154 | /* |
155 | * Can't easily make larger alignment work with kmalloc. WARN | |
156 | * on it. Larger alignment should only be used for module | |
157 | * percpu sections on SMP for which this path isn't used. | |
158 | */ | |
e3176036 | 159 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
d2b02615 | 160 | return kzalloc(size, GFP_KERNEL); |
7ff6f082 MP |
161 | } |
162 | ||
f2a8205c | 163 | static inline void free_percpu(void *p) |
7ff6f082 | 164 | { |
f2a8205c | 165 | kfree(p); |
1da177e4 LT |
166 | } |
167 | ||
168 | #endif /* CONFIG_SMP */ | |
169 | ||
313e458f RR |
170 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
171 | __alignof__(type)) | |
1da177e4 LT |
172 | |
173 | #endif /* __LINUX_PERCPU_H */ |