Commit | Line | Data |
---|---|---|
457c8996 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
62fde541 TH |
2 | /* |
3 | * linux/percpu-defs.h - basic definitions for percpu areas | |
4 | * | |
5 | * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. | |
6 | * | |
7 | * This file is separate from linux/percpu.h to avoid cyclic inclusion | |
8 | * dependency from arch header files. Only to be included from | |
9 | * asm/percpu.h. | |
10 | * | |
11 | * This file includes macros necessary to declare percpu sections and | |
12 | * variables, and definitions of percpu accessors and operations. It | |
13 | * should provide enough percpu features to arch header files even when | |
14 | * they can only include asm/percpu.h to avoid cyclic inclusion dependency. | |
15 | */ | |
16 | ||
5028eaa9 DH |
17 | #ifndef _LINUX_PERCPU_DEFS_H |
18 | #define _LINUX_PERCPU_DEFS_H | |
19 | ||
62fde541 TH |
20 | #ifdef CONFIG_SMP |
21 | ||
22 | #ifdef MODULE | |
23 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | |
24 | #define PER_CPU_ALIGNED_SECTION "" | |
25 | #else | |
26 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" | |
27 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | |
28 | #endif | |
29 | #define PER_CPU_FIRST_SECTION "..first" | |
30 | ||
31 | #else | |
32 | ||
33 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | |
34 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | |
35 | #define PER_CPU_FIRST_SECTION "" | |
36 | ||
37 | #endif | |
38 | ||
5028eaa9 DH |
39 | /* |
40 | * Base implementations of per-CPU variable declarations and definitions, where | |
41 | * the section in which the variable is to be placed is provided by the | |
7c756e6e | 42 | * 'sec' argument. This may be used to affect the parameters governing the |
5028eaa9 DH |
43 | * variable's storage. |
44 | * | |
45 | * NOTE! The sections for the DECLARE and for the DEFINE must match, lest | |
46 | * linkage errors occur due the compiler generating the wrong code to access | |
47 | * that section. | |
48 | */ | |
7c756e6e | 49 | #define __PCPU_ATTRS(sec) \ |
e0fdb0e0 | 50 | __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ |
7c756e6e TH |
51 | PER_CPU_ATTRIBUTES |
52 | ||
53 | #define __PCPU_DUMMY_ATTRS \ | |
33def849 | 54 | __section(".discard") __attribute__((unused)) |
7c756e6e TH |
55 | |
56 | /* | |
57 | * s390 and alpha modules require percpu variables to be defined as | |
58 | * weak to force the compiler to generate GOT based external | |
59 | * references for them. This is necessary because percpu sections | |
60 | * will be located outside of the usually addressable area. | |
61 | * | |
62 | * This definition puts the following two extra restrictions when | |
63 | * defining percpu variables. | |
64 | * | |
65 | * 1. The symbol must be globally unique, even the static ones. | |
66 | * 2. Static percpu variables cannot be defined inside a function. | |
67 | * | |
68 | * Archs which need weak percpu definitions should define | |
69 | * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. | |
70 | * | |
71 | * To ensure that the generic code observes the above two | |
72 | * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak | |
73 | * definition is used for all cases. | |
74 | */ | |
75 | #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) | |
76 | /* | |
77 | * __pcpu_scope_* dummy variable is used to enforce scope. It | |
78 | * receives the static modifier when it's used in front of | |
79 | * DEFINE_PER_CPU() and will trigger build failure if | |
80 | * DECLARE_PER_CPU() is used for the same variable. | |
81 | * | |
82 | * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness | |
83 | * such that hidden weak symbol collision, which will cause unrelated | |
84 | * variables to share the same address, can be detected during build. | |
85 | */ | |
86 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ | |
87 | extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | |
dd17c8f7 | 88 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
7c756e6e TH |
89 | |
90 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ | |
91 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | |
0f5e4816 | 92 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
7c756e6e | 93 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
b1a0fbfd | 94 | extern __PCPU_ATTRS(sec) __typeof__(type) name; \ |
69a60bc7 | 95 | __PCPU_ATTRS(sec) __weak __typeof__(type) name |
7c756e6e TH |
96 | #else |
97 | /* | |
98 | * Normal declaration and definition macros. | |
99 | */ | |
100 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ | |
dd17c8f7 | 101 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
7c756e6e TH |
102 | |
103 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ | |
69a60bc7 | 104 | __PCPU_ATTRS(sec) __typeof__(type) name |
7c756e6e | 105 | #endif |
5028eaa9 DH |
106 | |
107 | /* | |
108 | * Variant on the per-CPU variable declaration/definition theme used for | |
109 | * ordinary per-CPU variables. | |
110 | */ | |
111 | #define DECLARE_PER_CPU(type, name) \ | |
112 | DECLARE_PER_CPU_SECTION(type, name, "") | |
113 | ||
114 | #define DEFINE_PER_CPU(type, name) \ | |
115 | DEFINE_PER_CPU_SECTION(type, name, "") | |
116 | ||
117 | /* | |
118 | * Declaration/definition used for per-CPU variables that must come first in | |
119 | * the set of variables. | |
120 | */ | |
121 | #define DECLARE_PER_CPU_FIRST(type, name) \ | |
122 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | |
123 | ||
124 | #define DEFINE_PER_CPU_FIRST(type, name) \ | |
125 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | |
126 | ||
127 | /* | |
128 | * Declaration/definition used for per-CPU variables that must be cacheline | |
129 | * aligned under SMP conditions so that, whilst a particular instance of the | |
130 | * data corresponds to a particular CPU, inefficiencies due to direct access by | |
131 | * other CPUs are reduced by preventing the data from unnecessarily spanning | |
132 | * cachelines. | |
133 | * | |
134 | * An example of this would be statistical data, where each CPU's set of data | |
135 | * is updated by that CPU alone, but the data from across all CPUs is collated | |
136 | * by a CPU processing a read from a proc file. | |
137 | */ | |
138 | #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
139 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | |
140 | ____cacheline_aligned_in_smp | |
141 | ||
142 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
143 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | |
144 | ____cacheline_aligned_in_smp | |
145 | ||
53f82452 JF |
146 | #define DECLARE_PER_CPU_ALIGNED(type, name) \ |
147 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ | |
148 | ____cacheline_aligned | |
149 | ||
150 | #define DEFINE_PER_CPU_ALIGNED(type, name) \ | |
151 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ | |
152 | ____cacheline_aligned | |
153 | ||
5028eaa9 DH |
154 | /* |
155 | * Declaration/definition used for per-CPU variables that must be page aligned. | |
156 | */ | |
3e352aa8 | 157 | #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ |
3d9a854c | 158 | DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
3e352aa8 | 159 | __aligned(PAGE_SIZE) |
5028eaa9 DH |
160 | |
161 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | |
3d9a854c | 162 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
3e352aa8 | 163 | __aligned(PAGE_SIZE) |
5028eaa9 | 164 | |
c957ef2c SL |
165 | /* |
166 | * Declaration/definition used for per-CPU variables that must be read mostly. | |
167 | */ | |
168 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ | |
330d2822 | 169 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
c957ef2c SL |
170 | |
171 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ | |
330d2822 | 172 | DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
c957ef2c | 173 | |
ac26963a BS |
174 | /* |
175 | * Declaration/definition used for per-CPU variables that should be accessed | |
176 | * as decrypted when memory encryption is enabled in the guest. | |
177 | */ | |
264b0d2b | 178 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
ac26963a BS |
179 | #define DECLARE_PER_CPU_DECRYPTED(type, name) \ |
180 | DECLARE_PER_CPU_SECTION(type, name, "..decrypted") | |
181 | ||
182 | #define DEFINE_PER_CPU_DECRYPTED(type, name) \ | |
183 | DEFINE_PER_CPU_SECTION(type, name, "..decrypted") | |
184 | #else | |
185 | #define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name) | |
186 | #endif | |
187 | ||
5028eaa9 | 188 | /* |
545695fb TH |
189 | * Intermodule exports for per-CPU variables. sparse forgets about |
190 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to | |
191 | * noop if __CHECKER__. | |
5028eaa9 | 192 | */ |
545695fb | 193 | #ifndef __CHECKER__ |
dd17c8f7 RR |
194 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) |
195 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) | |
545695fb TH |
196 | #else |
197 | #define EXPORT_PER_CPU_SYMBOL(var) | |
198 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) | |
199 | #endif | |
5028eaa9 | 200 | |
62fde541 TH |
201 | /* |
202 | * Accessors and operations. | |
203 | */ | |
204 | #ifndef __ASSEMBLY__ | |
205 | ||
9c28278a | 206 | /* |
6fbc07bb TH |
207 | * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating |
208 | * @ptr and is invoked once before a percpu area is accessed by all | |
209 | * accessors and operations. This is performed in the generic part of | |
210 | * percpu and arch overrides don't need to worry about it; however, if an | |
211 | * arch wants to implement an arch-specific percpu accessor or operation, | |
212 | * it may use __verify_pcpu_ptr() to verify the parameters. | |
9c28278a TH |
213 | * |
214 | * + 0 is required in order to convert the pointer type from a | |
215 | * potential array type to a pointer to a single item of the array. | |
216 | */ | |
eba11788 TH |
217 | #define __verify_pcpu_ptr(ptr) \ |
218 | do { \ | |
9c28278a TH |
219 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ |
220 | (void)__vpp_verify; \ | |
221 | } while (0) | |
222 | ||
62fde541 TH |
223 | #ifdef CONFIG_SMP |
224 | ||
225 | /* | |
226 | * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() | |
227 | * to prevent the compiler from making incorrect assumptions about the | |
228 | * pointer value. The weird cast keeps both GCC and sparse happy. | |
229 | */ | |
eba11788 | 230 | #define SHIFT_PERCPU_PTR(__p, __offset) \ |
6fbc07bb TH |
231 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) |
232 | ||
233 | #define per_cpu_ptr(ptr, cpu) \ | |
eba11788 | 234 | ({ \ |
6fbc07bb TH |
235 | __verify_pcpu_ptr(ptr); \ |
236 | SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ | |
62fde541 TH |
237 | }) |
238 | ||
6fbc07bb TH |
239 | #define raw_cpu_ptr(ptr) \ |
240 | ({ \ | |
241 | __verify_pcpu_ptr(ptr); \ | |
242 | arch_raw_cpu_ptr(ptr); \ | |
243 | }) | |
62fde541 TH |
244 | |
245 | #ifdef CONFIG_DEBUG_PREEMPT | |
6fbc07bb TH |
246 | #define this_cpu_ptr(ptr) \ |
247 | ({ \ | |
248 | __verify_pcpu_ptr(ptr); \ | |
249 | SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ | |
250 | }) | |
62fde541 TH |
251 | #else |
252 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | |
253 | #endif | |
254 | ||
62fde541 TH |
255 | #else /* CONFIG_SMP */ |
256 | ||
eba11788 TH |
257 | #define VERIFY_PERCPU_PTR(__p) \ |
258 | ({ \ | |
259 | __verify_pcpu_ptr(__p); \ | |
260 | (typeof(*(__p)) __kernel __force *)(__p); \ | |
62fde541 TH |
261 | }) |
262 | ||
eba11788 | 263 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) |
3b8ed91d TH |
264 | #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) |
265 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | |
62fde541 TH |
266 | |
267 | #endif /* CONFIG_SMP */ | |
268 | ||
3b8ed91d | 269 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
3b8ed91d | 270 | |
9defda18 TH |
271 | /* |
272 | * Must be an lvalue. Since @var must be a simple identifier, | |
273 | * we force a syntax error here if it isn't. | |
274 | */ | |
eba11788 TH |
275 | #define get_cpu_var(var) \ |
276 | (*({ \ | |
277 | preempt_disable(); \ | |
278 | this_cpu_ptr(&var); \ | |
279 | })) | |
9defda18 TH |
280 | |
281 | /* | |
282 | * The weird & is necessary because sparse considers (void)(var) to be | |
283 | * a direct dereference of percpu variable (var). | |
284 | */ | |
eba11788 TH |
285 | #define put_cpu_var(var) \ |
286 | do { \ | |
287 | (void)&(var); \ | |
288 | preempt_enable(); \ | |
9defda18 TH |
289 | } while (0) |
290 | ||
eba11788 TH |
291 | #define get_cpu_ptr(var) \ |
292 | ({ \ | |
293 | preempt_disable(); \ | |
294 | this_cpu_ptr(var); \ | |
295 | }) | |
9defda18 | 296 | |
eba11788 TH |
297 | #define put_cpu_ptr(var) \ |
298 | do { \ | |
299 | (void)(var); \ | |
300 | preempt_enable(); \ | |
9defda18 TH |
301 | } while (0) |
302 | ||
a32f8d8e TH |
303 | /* |
304 | * Branching function to split up a function into a set of functions that | |
305 | * are called for different scalar sizes of the objects handled. | |
306 | */ | |
307 | ||
308 | extern void __bad_size_call_parameter(void); | |
309 | ||
310 | #ifdef CONFIG_DEBUG_PREEMPT | |
311 | extern void __this_cpu_preempt_check(const char *op); | |
312 | #else | |
f176d4cc | 313 | static __always_inline void __this_cpu_preempt_check(const char *op) { } |
a32f8d8e TH |
314 | #endif |
315 | ||
316 | #define __pcpu_size_call_return(stem, variable) \ | |
eba11788 TH |
317 | ({ \ |
318 | typeof(variable) pscr_ret__; \ | |
a32f8d8e TH |
319 | __verify_pcpu_ptr(&(variable)); \ |
320 | switch(sizeof(variable)) { \ | |
eba11788 TH |
321 | case 1: pscr_ret__ = stem##1(variable); break; \ |
322 | case 2: pscr_ret__ = stem##2(variable); break; \ | |
323 | case 4: pscr_ret__ = stem##4(variable); break; \ | |
324 | case 8: pscr_ret__ = stem##8(variable); break; \ | |
a32f8d8e | 325 | default: \ |
eba11788 | 326 | __bad_size_call_parameter(); break; \ |
a32f8d8e TH |
327 | } \ |
328 | pscr_ret__; \ | |
329 | }) | |
330 | ||
331 | #define __pcpu_size_call_return2(stem, variable, ...) \ | |
332 | ({ \ | |
333 | typeof(variable) pscr2_ret__; \ | |
334 | __verify_pcpu_ptr(&(variable)); \ | |
335 | switch(sizeof(variable)) { \ | |
336 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | |
337 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | |
338 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | |
339 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | |
340 | default: \ | |
341 | __bad_size_call_parameter(); break; \ | |
342 | } \ | |
343 | pscr2_ret__; \ | |
344 | }) | |
345 | ||
c5c0ba95 PZ |
346 | #define __pcpu_size_call_return2bool(stem, variable, ...) \ |
347 | ({ \ | |
348 | bool pscr2_ret__; \ | |
349 | __verify_pcpu_ptr(&(variable)); \ | |
350 | switch(sizeof(variable)) { \ | |
351 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | |
352 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | |
353 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | |
354 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | |
355 | default: \ | |
356 | __bad_size_call_parameter(); break; \ | |
357 | } \ | |
358 | pscr2_ret__; \ | |
359 | }) | |
360 | ||
a32f8d8e TH |
361 | #define __pcpu_size_call(stem, variable, ...) \ |
362 | do { \ | |
363 | __verify_pcpu_ptr(&(variable)); \ | |
364 | switch(sizeof(variable)) { \ | |
365 | case 1: stem##1(variable, __VA_ARGS__);break; \ | |
366 | case 2: stem##2(variable, __VA_ARGS__);break; \ | |
367 | case 4: stem##4(variable, __VA_ARGS__);break; \ | |
368 | case 8: stem##8(variable, __VA_ARGS__);break; \ | |
369 | default: \ | |
370 | __bad_size_call_parameter();break; \ | |
371 | } \ | |
372 | } while (0) | |
373 | ||
374 | /* | |
375 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> | |
376 | * | |
377 | * Optimized manipulation for memory allocated through the per cpu | |
378 | * allocator or for addresses of per cpu variables. | |
379 | * | |
380 | * These operation guarantee exclusivity of access for other operations | |
381 | * on the *same* processor. The assumption is that per cpu data is only | |
382 | * accessed by a single processor instance (the current one). | |
383 | * | |
384 | * The arch code can provide optimized implementation by defining macros | |
385 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per | |
386 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does | |
387 | * not provide operations for a scalar size then the fallback in the | |
388 | * generic code will be used. | |
eba11788 TH |
389 | * |
390 | * cmpxchg_double replaces two adjacent scalars at once. The first two | |
391 | * parameters are per cpu variables which have to be of the same size. A | |
392 | * truth value is returned to indicate success or failure (since a double | |
393 | * register result is difficult to handle). There is very limited hardware | |
394 | * support for these operations, so only certain sizes may work. | |
a32f8d8e TH |
395 | */ |
396 | ||
397 | /* | |
eba11788 TH |
398 | * Operations for contexts where we do not want to do any checks for |
399 | * preemptions. Unless strictly necessary, always use [__]this_cpu_*() | |
400 | * instead. | |
a32f8d8e | 401 | * |
eba11788 | 402 | * If there is no other protection through preempt disable and/or disabling |
06c88398 | 403 | * interrupts then one of these RMW operations can show unexpected behavior |
eba11788 TH |
404 | * because the execution thread was rescheduled on another processor or an |
405 | * interrupt occurred and the same percpu variable was modified from the | |
406 | * interrupt context. | |
a32f8d8e | 407 | */ |
eba11788 TH |
408 | #define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) |
409 | #define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) | |
410 | #define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) | |
411 | #define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) | |
412 | #define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) | |
413 | #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | |
414 | #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) | |
415 | #define raw_cpu_cmpxchg(pcp, oval, nval) \ | |
a32f8d8e | 416 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) |
c5c0ba95 PZ |
417 | #define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \ |
418 | __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval) | |
eba11788 TH |
419 | #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) |
420 | #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) | |
421 | #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) | |
422 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | |
423 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | |
424 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | |
a32f8d8e TH |
425 | |
426 | /* | |
eba11788 TH |
427 | * Operations for contexts that are safe from preemption/interrupts. These |
428 | * operations verify that preemption is disabled. | |
a32f8d8e | 429 | */ |
eba11788 TH |
430 | #define __this_cpu_read(pcp) \ |
431 | ({ \ | |
432 | __this_cpu_preempt_check("read"); \ | |
433 | raw_cpu_read(pcp); \ | |
434 | }) | |
a32f8d8e | 435 | |
eba11788 TH |
436 | #define __this_cpu_write(pcp, val) \ |
437 | ({ \ | |
438 | __this_cpu_preempt_check("write"); \ | |
439 | raw_cpu_write(pcp, val); \ | |
440 | }) | |
a32f8d8e | 441 | |
eba11788 TH |
442 | #define __this_cpu_add(pcp, val) \ |
443 | ({ \ | |
444 | __this_cpu_preempt_check("add"); \ | |
cadb1c4d | 445 | raw_cpu_add(pcp, val); \ |
eba11788 | 446 | }) |
a32f8d8e | 447 | |
eba11788 TH |
448 | #define __this_cpu_and(pcp, val) \ |
449 | ({ \ | |
450 | __this_cpu_preempt_check("and"); \ | |
cadb1c4d | 451 | raw_cpu_and(pcp, val); \ |
eba11788 | 452 | }) |
a32f8d8e | 453 | |
eba11788 TH |
454 | #define __this_cpu_or(pcp, val) \ |
455 | ({ \ | |
456 | __this_cpu_preempt_check("or"); \ | |
cadb1c4d | 457 | raw_cpu_or(pcp, val); \ |
eba11788 | 458 | }) |
a32f8d8e | 459 | |
eba11788 TH |
460 | #define __this_cpu_add_return(pcp, val) \ |
461 | ({ \ | |
462 | __this_cpu_preempt_check("add_return"); \ | |
463 | raw_cpu_add_return(pcp, val); \ | |
464 | }) | |
a32f8d8e | 465 | |
eba11788 TH |
466 | #define __this_cpu_xchg(pcp, nval) \ |
467 | ({ \ | |
468 | __this_cpu_preempt_check("xchg"); \ | |
469 | raw_cpu_xchg(pcp, nval); \ | |
470 | }) | |
a32f8d8e | 471 | |
eba11788 TH |
472 | #define __this_cpu_cmpxchg(pcp, oval, nval) \ |
473 | ({ \ | |
474 | __this_cpu_preempt_check("cmpxchg"); \ | |
475 | raw_cpu_cmpxchg(pcp, oval, nval); \ | |
476 | }) | |
a32f8d8e | 477 | |
eba11788 TH |
478 | #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) |
479 | #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) | |
480 | #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) | |
481 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | |
482 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | |
483 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | |
a32f8d8e TH |
484 | |
485 | /* | |
83cb8557 TH |
486 | * Operations with implied preemption/interrupt protection. These |
487 | * operations can be used without worrying about preemption or interrupt. | |
a32f8d8e | 488 | */ |
eba11788 TH |
489 | #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) |
490 | #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) | |
491 | #define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) | |
492 | #define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) | |
493 | #define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) | |
494 | #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | |
495 | #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) | |
496 | #define this_cpu_cmpxchg(pcp, oval, nval) \ | |
497 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | |
c5c0ba95 PZ |
498 | #define this_cpu_try_cmpxchg(pcp, ovalp, nval) \ |
499 | __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval) | |
eba11788 TH |
500 | #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) |
501 | #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) | |
502 | #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) | |
a32f8d8e TH |
503 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
504 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | |
505 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | |
a32f8d8e | 506 | |
62fde541 | 507 | #endif /* __ASSEMBLY__ */ |
5028eaa9 | 508 | #endif /* _LINUX_PERCPU_DEFS_H */ |