Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / arch / x86 / include / asm / percpu.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PERCPU_H
3#define _ASM_X86_PERCPU_H
3334052a 4
1a51e3a0 5#ifdef CONFIG_X86_64
61d73e4f
IM
6# define __percpu_seg gs
7# define __percpu_rel (%rip)
1a51e3a0 8#else
61d73e4f
IM
9# define __percpu_seg fs
10# define __percpu_rel
96a388de 11#endif
3334052a 12
24a295e4 13#ifdef __ASSEMBLER__
3334052a 14
3334052a 15#ifdef CONFIG_SMP
61d73e4f 16# define __percpu %__percpu_seg:
59bec00a 17#else
61d73e4f 18# define __percpu
59bec00a
UB
19#endif
20
21#define PER_CPU_VAR(var) __percpu(var)__percpu_rel
3334052a 22
61d73e4f 23#else /* !__ASSEMBLY__: */
3334052a 24
79165720 25#include <linux/args.h>
0e370363 26#include <linux/build_bug.h>
9939ddaf 27#include <linux/stringify.h>
30094208 28#include <asm/asm.h>
3334052a 29
9939ddaf 30#ifdef CONFIG_SMP
9a462b9e 31
d51faee4
UB
32#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
33
9a462b9e
NA
34#ifdef CONFIG_CC_HAS_NAMED_AS
35
3a1d3829 36#ifdef __CHECKER__
61d73e4f
IM
37# define __seg_gs __attribute__((address_space(__seg_gs)))
38# define __seg_fs __attribute__((address_space(__seg_fs)))
3a1d3829
UB
39#endif
40
d51faee4 41#define __percpu_prefix
79165720 42#define __percpu_seg_override CONCATENATE(__seg_, __percpu_seg)
9a462b9e 43
61d73e4f 44#else /* !CONFIG_CC_HAS_NAMED_AS: */
9a462b9e 45
d51faee4 46#define __percpu_prefix __force_percpu_prefix
9a462b9e 47#define __percpu_seg_override
9a462b9e
NA
48
49#endif /* CONFIG_CC_HAS_NAMED_AS */
50
db7829c6
BG
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
d51faee4
UB
54 */
55#define __my_cpu_offset this_cpu_read(this_cpu_off)
56
57/*
4e5b0e80
UB
58 * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
59 * kernel, because games are played with CONFIG_X86_64 there and
60 * sizeof(this_cpu_off) becames 4.
db7829c6 61 */
4e5b0e80 62#ifndef BUILD_VDSO32_64
9130ea06
IM
63#define arch_raw_cpu_ptr(_ptr) \
64({ \
65 unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \
66 \
67 tcp_ptr__ += (__force unsigned long)(_ptr); \
6a39fe05 68 (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)tcp_ptr__; \
db7829c6 69})
4e5b0e80 70#else
6a39fe05
UB
71#define arch_raw_cpu_ptr(_ptr) \
72({ \
73 BUILD_BUG(); \
74 (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)0; \
75})
4e5b0e80 76#endif
1d10f3ae 77
43bda69e
UB
78#define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel
79
61d73e4f
IM
80#else /* !CONFIG_SMP: */
81
d51faee4
UB
82#define __force_percpu_prefix
83#define __percpu_prefix
9a462b9e 84#define __percpu_seg_override
43bda69e
UB
85
86#define PER_CPU_VAR(var) (var)__percpu_rel
87
9a462b9e 88#endif /* CONFIG_SMP */
3334052a 89
6a367577
UB
90#if defined(CONFIG_USE_X86_SEG_SUPPORT) && defined(USE_TYPEOF_UNQUAL)
91# define __my_cpu_type(var) typeof(var)
92# define __my_cpu_ptr(ptr) (ptr)
93# define __my_cpu_var(var) (var)
94
95# define __percpu_qual __percpu_seg_override
96#else
97# define __my_cpu_type(var) typeof(var) __percpu_seg_override
98# define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
99# define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
100#endif
101
9a462b9e 102#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
d51faee4 103#define __percpu_arg(x) __percpu_prefix "%" #x
d7c3f8ce 104
61d73e4f
IM
105/*
106 * For arch-specific code, we can use direct single-insn ops (they
107 * don't give an lvalue though).
108 */
3334052a 109
9130ea06
IM
110#define __pcpu_type_1 u8
111#define __pcpu_type_2 u16
112#define __pcpu_type_4 u32
113#define __pcpu_type_8 u64
6865dc3a 114
9130ea06
IM
115#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
116#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
117#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
118#define __pcpu_cast_8(val) ((u64)(val))
6865dc3a 119
d40459cc
UB
120#define __pcpu_op_1(op) op "b "
121#define __pcpu_op_2(op) op "w "
122#define __pcpu_op_4(op) op "l "
123#define __pcpu_op_8(op) op "q "
6865dc3a 124
9130ea06
IM
125#define __pcpu_reg_1(mod, x) mod "q" (x)
126#define __pcpu_reg_2(mod, x) mod "r" (x)
127#define __pcpu_reg_4(mod, x) mod "r" (x)
128#define __pcpu_reg_8(mod, x) mod "r" (x)
6865dc3a 129
9130ea06
IM
130#define __pcpu_reg_imm_1(x) "qi" (x)
131#define __pcpu_reg_imm_2(x) "ri" (x)
132#define __pcpu_reg_imm_4(x) "ri" (x)
133#define __pcpu_reg_imm_8(x) "re" (x)
6865dc3a 134
08d564ad
UB
135#ifdef CONFIG_USE_X86_SEG_SUPPORT
136
a50ea641 137#define __raw_cpu_read(size, qual, pcp) \
08d564ad
UB
138({ \
139 *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \
140})
141
a50ea641 142#define __raw_cpu_write(size, qual, pcp, val) \
08d564ad
UB
143do { \
144 *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \
145} while (0)
146
539615de
UB
147#define __raw_cpu_read_const(pcp) __raw_cpu_read(, , pcp)
148
61d73e4f 149#else /* !CONFIG_USE_X86_SEG_SUPPORT: */
08d564ad 150
a50ea641 151#define __raw_cpu_read(size, qual, _var) \
08d564ad
UB
152({ \
153 __pcpu_type_##size pfo_val__; \
9130ea06 154 \
d40459cc
UB
155 asm qual (__pcpu_op_##size("mov") \
156 __percpu_arg([var]) ", %[val]" \
08d564ad
UB
157 : [val] __pcpu_reg_##size("=", pfo_val__) \
158 : [var] "m" (__my_cpu_var(_var))); \
9130ea06 159 \
08d564ad
UB
160 (typeof(_var))(unsigned long) pfo_val__; \
161})
162
a50ea641 163#define __raw_cpu_write(size, qual, _var, _val) \
c175acc1
BG
164do { \
165 __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
9130ea06 166 \
c175acc1 167 if (0) { \
8a3c3923 168 TYPEOF_UNQUAL(_var) pto_tmp__; \
c175acc1
BG
169 pto_tmp__ = (_val); \
170 (void)pto_tmp__; \
171 } \
d40459cc
UB
172 asm qual (__pcpu_op_##size("mov") "%[val], " \
173 __percpu_arg([var]) \
1fe67aee 174 : [var] "=m" (__my_cpu_var(_var)) \
c175acc1 175 : [val] __pcpu_reg_imm_##size(pto_val__)); \
bc9e3be2
JP
176} while (0)
177
539615de 178/*
61d73e4f 179 * The generic per-CPU infrastrucutre is not suitable for
539615de
UB
180 * reading const-qualified variables.
181 */
182#define __raw_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
183
08d564ad
UB
184#endif /* CONFIG_USE_X86_SEG_SUPPORT */
185
48908919 186#define __raw_cpu_read_stable(size, _var) \
08d564ad
UB
187({ \
188 __pcpu_type_##size pfo_val__; \
9130ea06 189 \
d40459cc
UB
190 asm(__pcpu_op_##size("mov") \
191 __force_percpu_arg(a[var]) ", %[val]" \
08d564ad
UB
192 : [val] __pcpu_reg_##size("=", pfo_val__) \
193 : [var] "i" (&(_var))); \
9130ea06 194 \
08d564ad
UB
195 (typeof(_var))(unsigned long) pfo_val__; \
196})
197
33e5614a
BG
198#define percpu_unary_op(size, qual, op, _var) \
199({ \
d40459cc 200 asm qual (__pcpu_op_##size(op) __percpu_arg([var]) \
9a462b9e 201 : [var] "+m" (__my_cpu_var(_var))); \
33e5614a
BG
202})
203
455ca134
UB
204#define percpu_binary_op(size, qual, op, _var, _val) \
205do { \
206 __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
9130ea06 207 \
455ca134 208 if (0) { \
8a3c3923 209 TYPEOF_UNQUAL(_var) pto_tmp__; \
455ca134
UB
210 pto_tmp__ = (_val); \
211 (void)pto_tmp__; \
212 } \
d40459cc 213 asm qual (__pcpu_op_##size(op) "%[val], " __percpu_arg([var]) \
455ca134
UB
214 : [var] "+m" (__my_cpu_var(_var)) \
215 : [val] __pcpu_reg_imm_##size(pto_val__)); \
216} while (0)
217
5917dae8 218/*
61d73e4f 219 * Generate a per-CPU add to memory instruction and optimize code
40f0a5d0 220 * if one is added or subtracted.
5917dae8 221 */
33e5614a 222#define percpu_add_op(size, qual, var, val) \
5917dae8 223do { \
6c2625e9
AS
224 const int pao_ID__ = \
225 (__builtin_constant_p(val) && \
226 ((val) == 1 || \
227 (val) == (typeof(val))-1)) ? (int)(val) : 0; \
9130ea06 228 \
5917dae8 229 if (0) { \
8a3c3923 230 TYPEOF_UNQUAL(var) pao_tmp__; \
5917dae8 231 pao_tmp__ = (val); \
23b764d0 232 (void)pao_tmp__; \
5917dae8 233 } \
33e5614a
BG
234 if (pao_ID__ == 1) \
235 percpu_unary_op(size, qual, "inc", var); \
236 else if (pao_ID__ == -1) \
237 percpu_unary_op(size, qual, "dec", var); \
238 else \
455ca134 239 percpu_binary_op(size, qual, "add", var, val); \
5917dae8
CL
240} while (0)
241
40304775
TH
242/*
243 * Add return operation
244 */
bbff583b 245#define percpu_add_return_op(size, qual, _var, _val) \
40304775 246({ \
bbff583b 247 __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \
9130ea06 248 \
d40459cc
UB
249 asm qual (__pcpu_op_##size("xadd") "%[tmp], " \
250 __percpu_arg([var]) \
bbff583b 251 : [tmp] __pcpu_reg_##size("+", paro_tmp__), \
9a462b9e 252 [var] "+m" (__my_cpu_var(_var)) \
bbff583b
BG
253 : : "memory"); \
254 (typeof(_var))(unsigned long) (paro_tmp__ + _val); \
40304775
TH
255})
256
ce99b9c8
UB
257/*
258 * raw_cpu_xchg() can use a load-store since
259 * it is not required to be IRQ-safe.
260 */
261#define raw_percpu_xchg_op(_var, _nval) \
262({ \
8a3c3923 263 TYPEOF_UNQUAL(_var) pxo_old__ = raw_cpu_read(_var); \
9130ea06 264 \
ce99b9c8 265 raw_cpu_write(_var, _nval); \
9130ea06 266 \
ce99b9c8
UB
267 pxo_old__; \
268})
269
7296e08a 270/*
61d73e4f
IM
271 * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
272 * XCHG is expensive due to the implied LOCK prefix. The processor
273 * cannot prefetch cachelines if XCHG is used.
7296e08a 274 */
05390846 275#define this_percpu_xchg_op(_var, _nval) \
7296e08a 276({ \
8a3c3923 277 TYPEOF_UNQUAL(_var) pxo_old__ = this_cpu_read(_var); \
9130ea06 278 \
05390846 279 do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \
9130ea06 280 \
05390846 281 pxo_old__; \
7296e08a
CL
282})
283
284/*
61d73e4f
IM
285 * CMPXCHG has no such implied lock semantics as a result it is much
286 * more efficient for CPU-local operations.
7296e08a 287 */
ebcd580b 288#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
7296e08a 289({ \
ebcd580b
BG
290 __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \
291 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
9130ea06 292 \
d40459cc
UB
293 asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \
294 __percpu_arg([var]) \
ebcd580b 295 : [oval] "+a" (pco_old__), \
9a462b9e 296 [var] "+m" (__my_cpu_var(_var)) \
ebcd580b
BG
297 : [nval] __pcpu_reg_##size(, pco_new__) \
298 : "memory"); \
9130ea06 299 \
ebcd580b 300 (typeof(_var))(unsigned long) pco_old__; \
7296e08a
CL
301})
302
5f863897
UB
303#define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval) \
304({ \
305 bool success; \
306 __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
307 __pcpu_type_##size pco_old__ = *pco_oval__; \
308 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
9130ea06 309 \
d40459cc
UB
310 asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \
311 __percpu_arg([var]) \
5f863897
UB
312 CC_SET(z) \
313 : CC_OUT(z) (success), \
314 [oval] "+a" (pco_old__), \
9a462b9e 315 [var] "+m" (__my_cpu_var(_var)) \
5f863897
UB
316 : [nval] __pcpu_reg_##size(, pco_new__) \
317 : "memory"); \
318 if (unlikely(!success)) \
319 *pco_oval__ = pco_old__; \
9130ea06 320 \
5f863897
UB
321 likely(success); \
322})
323
6d12c8d3 324#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
61d73e4f 325
6d12c8d3
PZ
326#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \
327({ \
328 union { \
329 u64 var; \
330 struct { \
331 u32 low, high; \
332 }; \
333 } old__, new__; \
334 \
335 old__.var = _oval; \
336 new__.var = _nval; \
337 \
2d352ec9
UB
338 asm_inline qual ( \
339 ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
340 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
341 : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \
342 "+a" (old__.low), "+d" (old__.high)) \
343 : "b" (new__.low), "c" (new__.high), \
344 "S" (&(_var)) \
345 : "memory"); \
6d12c8d3
PZ
346 \
347 old__.var; \
348})
349
9130ea06
IM
350#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval)
351#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
54cd971c
UB
352
353#define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval) \
354({ \
355 bool success; \
356 u64 *_oval = (u64 *)(_ovalp); \
357 union { \
358 u64 var; \
359 struct { \
360 u32 low, high; \
361 }; \
362 } old__, new__; \
363 \
364 old__.var = *_oval; \
365 new__.var = _nval; \
366 \
2d352ec9
UB
367 asm_inline qual ( \
368 ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
369 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
370 CC_SET(z) \
371 : ALT_OUTPUT_SP(CC_OUT(z) (success), \
372 [var] "+m" (__my_cpu_var(_var)), \
373 "+a" (old__.low), "+d" (old__.high)) \
374 : "b" (new__.low), "c" (new__.high), \
375 "S" (&(_var)) \
376 : "memory"); \
54cd971c
UB
377 if (unlikely(!success)) \
378 *_oval = old__.var; \
9130ea06 379 \
54cd971c
UB
380 likely(success); \
381})
382
383#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval)
384#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
61d73e4f
IM
385
386#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */
6d12c8d3
PZ
387
388#ifdef CONFIG_X86_64
9130ea06
IM
389#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval);
390#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval);
6d12c8d3 391
54cd971c
UB
392#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval);
393#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval);
394
6d12c8d3
PZ
395#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \
396({ \
397 union { \
398 u128 var; \
399 struct { \
400 u64 low, high; \
401 }; \
402 } old__, new__; \
403 \
404 old__.var = _oval; \
405 new__.var = _nval; \
406 \
2d352ec9
UB
407 asm_inline qual ( \
408 ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
409 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
410 : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \
411 "+a" (old__.low), "+d" (old__.high)) \
412 : "b" (new__.low), "c" (new__.high), \
413 "S" (&(_var)) \
414 : "memory"); \
6d12c8d3
PZ
415 \
416 old__.var; \
417})
418
9130ea06
IM
419#define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval)
420#define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
54cd971c
UB
421
422#define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval) \
423({ \
424 bool success; \
425 u128 *_oval = (u128 *)(_ovalp); \
426 union { \
427 u128 var; \
428 struct { \
429 u64 low, high; \
430 }; \
431 } old__, new__; \
432 \
433 old__.var = *_oval; \
434 new__.var = _nval; \
435 \
2d352ec9
UB
436 asm_inline qual ( \
437 ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
438 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
439 CC_SET(z) \
440 : ALT_OUTPUT_SP(CC_OUT(z) (success), \
441 [var] "+m" (__my_cpu_var(_var)), \
442 "+a" (old__.low), "+d" (old__.high)) \
443 : "b" (new__.low), "c" (new__.high), \
444 "S" (&(_var)) \
445 : "memory"); \
54cd971c
UB
446 if (unlikely(!success)) \
447 *_oval = old__.var; \
2d352ec9 448 \
54cd971c
UB
449 likely(success); \
450})
451
452#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval)
453#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
61d73e4f
IM
454
455#endif /* CONFIG_X86_64 */
6d12c8d3 456
9130ea06
IM
457#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp)
458#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp)
459#define raw_cpu_read_4(pcp) __raw_cpu_read(4, , pcp)
460#define raw_cpu_write_1(pcp, val) __raw_cpu_write(1, , pcp, val)
461#define raw_cpu_write_2(pcp, val) __raw_cpu_write(2, , pcp, val)
462#define raw_cpu_write_4(pcp, val) __raw_cpu_write(4, , pcp, val)
463
464#define this_cpu_read_1(pcp) __raw_cpu_read(1, volatile, pcp)
465#define this_cpu_read_2(pcp) __raw_cpu_read(2, volatile, pcp)
466#define this_cpu_read_4(pcp) __raw_cpu_read(4, volatile, pcp)
467#define this_cpu_write_1(pcp, val) __raw_cpu_write(1, volatile, pcp, val)
468#define this_cpu_write_2(pcp, val) __raw_cpu_write(2, volatile, pcp, val)
469#define this_cpu_write_4(pcp, val) __raw_cpu_write(4, volatile, pcp, val)
470
471#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp)
472#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp)
473#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp)
474
475#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
476#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
477#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val)
478#define raw_cpu_and_1(pcp, val) percpu_binary_op(1, , "and", (pcp), val)
479#define raw_cpu_and_2(pcp, val) percpu_binary_op(2, , "and", (pcp), val)
480#define raw_cpu_and_4(pcp, val) percpu_binary_op(4, , "and", (pcp), val)
481#define raw_cpu_or_1(pcp, val) percpu_binary_op(1, , "or", (pcp), val)
482#define raw_cpu_or_2(pcp, val) percpu_binary_op(2, , "or", (pcp), val)
483#define raw_cpu_or_4(pcp, val) percpu_binary_op(4, , "or", (pcp), val)
484#define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val)
485#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
486#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
487
488#define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val)
489#define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val)
490#define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val)
491#define this_cpu_and_1(pcp, val) percpu_binary_op(1, volatile, "and", (pcp), val)
492#define this_cpu_and_2(pcp, val) percpu_binary_op(2, volatile, "and", (pcp), val)
493#define this_cpu_and_4(pcp, val) percpu_binary_op(4, volatile, "and", (pcp), val)
494#define this_cpu_or_1(pcp, val) percpu_binary_op(1, volatile, "or", (pcp), val)
495#define this_cpu_or_2(pcp, val) percpu_binary_op(2, volatile, "or", (pcp), val)
496#define this_cpu_or_4(pcp, val) percpu_binary_op(4, volatile, "or", (pcp), val)
497#define this_cpu_xchg_1(pcp, nval) this_percpu_xchg_op(pcp, nval)
498#define this_cpu_xchg_2(pcp, nval) this_percpu_xchg_op(pcp, nval)
499#define this_cpu_xchg_4(pcp, nval) this_percpu_xchg_op(pcp, nval)
500
501#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
502#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
503#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val)
504#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval)
505#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval)
506#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval)
507#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
508#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
509#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)
510
511#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val)
512#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val)
513#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val)
514#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
515#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
516#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
5f863897
UB
517#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
518#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
519#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
7296e08a 520
30ed1a79 521/*
61d73e4f
IM
522 * Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
523 * 32-bit kernels must fall back to generic operations.
30ed1a79
CL
524 */
525#ifdef CONFIG_X86_64
9130ea06
IM
526
527#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp)
528#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val)
529
530#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp)
531#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val)
532
533#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp)
534
535#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
536#define raw_cpu_and_8(pcp, val) percpu_binary_op(8, , "and", (pcp), val)
537#define raw_cpu_or_8(pcp, val) percpu_binary_op(8, , "or", (pcp), val)
538#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val)
539#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
540#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval)
541#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)
542
543#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val)
544#define this_cpu_and_8(pcp, val) percpu_binary_op(8, volatile, "and", (pcp), val)
545#define this_cpu_or_8(pcp, val) percpu_binary_op(8, volatile, "or", (pcp), val)
546#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
547#define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval)
548#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
5f863897 549#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
93cfa544 550
9130ea06 551#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp)
61d73e4f
IM
552
553#else /* !CONFIG_X86_64: */
554
555/* There is no generic 64-bit read stable operation for 32-bit targets. */
9130ea06 556#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
93cfa544 557
9130ea06 558#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp)
61d73e4f
IM
559
560#endif /* CONFIG_X86_64 */
30ed1a79 561
9130ea06 562#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)
47c9dbd2
UB
563
564/*
61d73e4f
IM
565 * this_cpu_read() makes the compiler load the per-CPU variable every time
566 * it is accessed while this_cpu_read_stable() allows the value to be cached.
47c9dbd2 567 * this_cpu_read_stable() is more efficient and can be used if its value
61d73e4f 568 * is guaranteed to be valid across CPUs. The current users include
a1e4cc01 569 * current_task and cpu_current_top_of_stack, both of which are
47c9dbd2
UB
570 * actually per-thread variables implemented as per-CPU variables and
571 * thus stable for the duration of the respective task.
572 */
9130ea06 573#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
47c9dbd2 574
a3f8a3a2
UB
575#define x86_this_cpu_constant_test_bit(_nr, _var) \
576({ \
577 unsigned long __percpu *addr__ = \
578 (unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \
9130ea06 579 \
a3f8a3a2
UB
580 !!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__)); \
581})
349c004e 582
9130ea06
IM
583#define x86_this_cpu_variable_test_bit(_nr, _var) \
584({ \
585 bool oldbit; \
586 \
587 asm volatile("btl %[nr], " __percpu_arg([var]) \
588 CC_SET(c) \
589 : CC_OUT(c) (oldbit) \
590 : [var] "m" (__my_cpu_var(_var)), \
591 [nr] "rI" (_nr)); \
592 oldbit; \
a3f8a3a2
UB
593})
594
9130ea06
IM
595#define x86_this_cpu_test_bit(_nr, _var) \
596 (__builtin_constant_p(_nr) \
597 ? x86_this_cpu_constant_test_bit(_nr, _var) \
a3f8a3a2 598 : x86_this_cpu_variable_test_bit(_nr, _var))
349c004e
CL
599
600
6dbde353
IM
601#include <asm-generic/percpu.h>
602
603/* We can use this directly for local CPU (faster). */
06aa0305 604DECLARE_PER_CPU_CACHE_HOT(unsigned long, this_cpu_off);
6dbde353 605
24a295e4 606#endif /* !__ASSEMBLER__ */
23ca4bba
MT
607
608#ifdef CONFIG_SMP
609
610/*
611 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
612 * variables that are initialized and accessed before there are per_cpu
613 * areas allocated.
614 */
615
616#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
617 DEFINE_PER_CPU(_type, _name) = _initvalue; \
618 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
619 { [0 ... NR_CPUS-1] = _initvalue }; \
c6a92a25 620 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
23ca4bba 621
c35f7741
IY
622#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
623 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
624 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
625 { [0 ... NR_CPUS-1] = _initvalue }; \
626 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
627
9130ea06 628#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
23ca4bba
MT
629 EXPORT_PER_CPU_SYMBOL(_name)
630
9130ea06
IM
631#define DECLARE_EARLY_PER_CPU(_type, _name) \
632 DECLARE_PER_CPU(_type, _name); \
633 extern __typeof__(_type) *_name##_early_ptr; \
23ca4bba
MT
634 extern __typeof__(_type) _name##_early_map[]
635
9130ea06
IM
636#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
637 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
638 extern __typeof__(_type) *_name##_early_ptr; \
c35f7741
IY
639 extern __typeof__(_type) _name##_early_map[]
640
9130ea06
IM
641#define early_per_cpu_ptr(_name) (_name##_early_ptr)
642#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
643
644#define early_per_cpu(_name, _cpu) \
645 *(early_per_cpu_ptr(_name) ? \
646 &early_per_cpu_ptr(_name)[_cpu] : \
f10fcd47 647 &per_cpu(_name, _cpu))
23ca4bba 648
61d73e4f 649#else /* !CONFIG_SMP: */
9130ea06 650#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
23ca4bba
MT
651 DEFINE_PER_CPU(_type, _name) = _initvalue
652
c35f7741
IY
653#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
654 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
655
9130ea06 656#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
23ca4bba
MT
657 EXPORT_PER_CPU_SYMBOL(_name)
658
9130ea06 659#define DECLARE_EARLY_PER_CPU(_type, _name) \
23ca4bba
MT
660 DECLARE_PER_CPU(_type, _name)
661
9130ea06 662#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
c35f7741
IY
663 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
664
9130ea06
IM
665#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
666#define early_per_cpu_ptr(_name) NULL
23ca4bba
MT
667/* no early_per_cpu_map() */
668
9130ea06 669#endif /* !CONFIG_SMP */
23ca4bba 670
1965aae3 671#endif /* _ASM_X86_PERCPU_H */