Merge tag 'smp-urgent-2023-09-02' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / powerpc / include / asm / cmpxchg.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ae3a197e
DH
2#ifndef _ASM_POWERPC_CMPXCHG_H_
3#define _ASM_POWERPC_CMPXCHG_H_
4
5#ifdef __KERNEL__
6#include <linux/compiler.h>
7#include <asm/synch.h>
10d8b148 8#include <linux/bug.h>
ae3a197e 9
d0563a12
PX
10#ifdef __BIG_ENDIAN
11#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
12#else
13#define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
14#endif
15
16#define XCHG_GEN(type, sfx, cl) \
da58b23c 17static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
d0563a12
PX
18{ \
19 unsigned int prev, prev_mask, tmp, bitoff, off; \
20 \
21 off = (unsigned long)p % sizeof(u32); \
22 bitoff = BITOFF_CAL(sizeof(type), off); \
23 p -= off; \
24 val <<= bitoff; \
25 prev_mask = (u32)(type)-1 << bitoff; \
26 \
27 __asm__ __volatile__( \
28"1: lwarx %0,0,%3\n" \
29" andc %1,%0,%5\n" \
30" or %1,%1,%4\n" \
d0563a12
PX
31" stwcx. %1,0,%3\n" \
32" bne- 1b\n" \
33 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
34 : "r" (p), "r" (val), "r" (prev_mask) \
35 : "cc", cl); \
36 \
37 return prev >> bitoff; \
38}
39
40#define CMPXCHG_GEN(type, sfx, br, br2, cl) \
41static inline \
da58b23c 42u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
d0563a12
PX
43{ \
44 unsigned int prev, prev_mask, tmp, bitoff, off; \
45 \
46 off = (unsigned long)p % sizeof(u32); \
47 bitoff = BITOFF_CAL(sizeof(type), off); \
48 p -= off; \
49 old <<= bitoff; \
50 new <<= bitoff; \
51 prev_mask = (u32)(type)-1 << bitoff; \
52 \
53 __asm__ __volatile__( \
54 br \
55"1: lwarx %0,0,%3\n" \
56" and %1,%0,%6\n" \
57" cmpw 0,%1,%4\n" \
58" bne- 2f\n" \
59" andc %1,%0,%6\n" \
60" or %1,%1,%5\n" \
d0563a12
PX
61" stwcx. %1,0,%3\n" \
62" bne- 1b\n" \
63 br2 \
64 "\n" \
65"2:" \
66 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
67 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
68 : "cc", cl); \
69 \
70 return prev >> bitoff; \
71}
72
ae3a197e
DH
73/*
74 * Atomic exchange
75 *
26760fc1 76 * Changes the memory location '*p' to be val and returns
ae3a197e
DH
77 * the previous value stored there.
78 */
26760fc1 79
b86cf14f 80#ifndef CONFIG_PPC_HAS_LBARX_LHARX
d0563a12
PX
81XCHG_GEN(u8, _local, "memory");
82XCHG_GEN(u8, _relaxed, "cc");
83XCHG_GEN(u16, _local, "memory");
84XCHG_GEN(u16, _relaxed, "cc");
b86cf14f
NP
85#else
86static __always_inline unsigned long
87__xchg_u8_local(volatile void *p, unsigned long val)
88{
89 unsigned long prev;
90
91 __asm__ __volatile__(
92"1: lbarx %0,0,%2 # __xchg_u8_local\n"
93" stbcx. %3,0,%2 \n"
94" bne- 1b"
95 : "=&r" (prev), "+m" (*(volatile unsigned char *)p)
96 : "r" (p), "r" (val)
97 : "cc", "memory");
98
99 return prev;
100}
101
102static __always_inline unsigned long
103__xchg_u8_relaxed(u8 *p, unsigned long val)
104{
105 unsigned long prev;
106
107 __asm__ __volatile__(
108"1: lbarx %0,0,%2 # __xchg_u8_relaxed\n"
109" stbcx. %3,0,%2\n"
110" bne- 1b"
111 : "=&r" (prev), "+m" (*p)
112 : "r" (p), "r" (val)
113 : "cc");
114
115 return prev;
116}
117
118static __always_inline unsigned long
119__xchg_u16_local(volatile void *p, unsigned long val)
120{
121 unsigned long prev;
122
123 __asm__ __volatile__(
124"1: lharx %0,0,%2 # __xchg_u16_local\n"
125" sthcx. %3,0,%2\n"
126" bne- 1b"
127 : "=&r" (prev), "+m" (*(volatile unsigned short *)p)
128 : "r" (p), "r" (val)
129 : "cc", "memory");
130
131 return prev;
132}
133
134static __always_inline unsigned long
135__xchg_u16_relaxed(u16 *p, unsigned long val)
136{
137 unsigned long prev;
138
139 __asm__ __volatile__(
140"1: lharx %0,0,%2 # __xchg_u16_relaxed\n"
141" sthcx. %3,0,%2\n"
142" bne- 1b"
143 : "=&r" (prev), "+m" (*p)
144 : "r" (p), "r" (val)
145 : "cc");
146
147 return prev;
148}
149#endif
d0563a12 150
ae3a197e 151static __always_inline unsigned long
26760fc1 152__xchg_u32_local(volatile void *p, unsigned long val)
ae3a197e
DH
153{
154 unsigned long prev;
155
156 __asm__ __volatile__(
ae3a197e 157"1: lwarx %0,0,%2 \n"
ae3a197e
DH
158" stwcx. %3,0,%2 \n\
159 bne- 1b"
ae3a197e
DH
160 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
161 : "r" (p), "r" (val)
162 : "cc", "memory");
163
164 return prev;
165}
166
ae3a197e 167static __always_inline unsigned long
26760fc1 168__xchg_u32_relaxed(u32 *p, unsigned long val)
ae3a197e
DH
169{
170 unsigned long prev;
171
172 __asm__ __volatile__(
26760fc1 173"1: lwarx %0,0,%2\n"
26760fc1
BF
174" stwcx. %3,0,%2\n"
175" bne- 1b"
176 : "=&r" (prev), "+m" (*p)
ae3a197e 177 : "r" (p), "r" (val)
26760fc1 178 : "cc");
ae3a197e
DH
179
180 return prev;
181}
182
183#ifdef CONFIG_PPC64
184static __always_inline unsigned long
26760fc1 185__xchg_u64_local(volatile void *p, unsigned long val)
ae3a197e
DH
186{
187 unsigned long prev;
188
189 __asm__ __volatile__(
ae3a197e 190"1: ldarx %0,0,%2 \n"
ae3a197e
DH
191" stdcx. %3,0,%2 \n\
192 bne- 1b"
ae3a197e
DH
193 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
194 : "r" (p), "r" (val)
195 : "cc", "memory");
196
197 return prev;
198}
199
200static __always_inline unsigned long
26760fc1 201__xchg_u64_relaxed(u64 *p, unsigned long val)
ae3a197e
DH
202{
203 unsigned long prev;
204
205 __asm__ __volatile__(
26760fc1 206"1: ldarx %0,0,%2\n"
26760fc1
BF
207" stdcx. %3,0,%2\n"
208" bne- 1b"
209 : "=&r" (prev), "+m" (*p)
ae3a197e 210 : "r" (p), "r" (val)
26760fc1 211 : "cc");
ae3a197e
DH
212
213 return prev;
214}
215#endif
216
ae3a197e 217static __always_inline unsigned long
d0563a12 218__xchg_local(void *ptr, unsigned long x, unsigned int size)
ae3a197e
DH
219{
220 switch (size) {
d0563a12
PX
221 case 1:
222 return __xchg_u8_local(ptr, x);
223 case 2:
224 return __xchg_u16_local(ptr, x);
ae3a197e 225 case 4:
26760fc1 226 return __xchg_u32_local(ptr, x);
ae3a197e
DH
227#ifdef CONFIG_PPC64
228 case 8:
26760fc1 229 return __xchg_u64_local(ptr, x);
ae3a197e
DH
230#endif
231 }
06855063 232 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
ae3a197e
DH
233 return x;
234}
235
236static __always_inline unsigned long
26760fc1 237__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
ae3a197e
DH
238{
239 switch (size) {
d0563a12
PX
240 case 1:
241 return __xchg_u8_relaxed(ptr, x);
242 case 2:
243 return __xchg_u16_relaxed(ptr, x);
ae3a197e 244 case 4:
26760fc1 245 return __xchg_u32_relaxed(ptr, x);
ae3a197e
DH
246#ifdef CONFIG_PPC64
247 case 8:
26760fc1 248 return __xchg_u64_relaxed(ptr, x);
ae3a197e
DH
249#endif
250 }
06855063 251 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_relaxed");
ae3a197e
DH
252 return x;
253}
9eaa8293 254#define arch_xchg_local(ptr,x) \
ae3a197e
DH
255 ({ \
256 __typeof__(*(ptr)) _x_ = (x); \
257 (__typeof__(*(ptr))) __xchg_local((ptr), \
258 (unsigned long)_x_, sizeof(*(ptr))); \
259 })
260
9eaa8293 261#define arch_xchg_relaxed(ptr, x) \
26760fc1
BF
262({ \
263 __typeof__(*(ptr)) _x_ = (x); \
264 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
265 (unsigned long)_x_, sizeof(*(ptr))); \
266})
b86cf14f 267
ae3a197e
DH
268/*
269 * Compare and exchange - if *p == old, set it to new,
270 * and return the old value of *p.
271 */
b86cf14f 272#ifndef CONFIG_PPC_HAS_LBARX_LHARX
d0563a12
PX
273CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
274CMPXCHG_GEN(u8, _local, , , "memory");
275CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
276CMPXCHG_GEN(u8, _relaxed, , , "cc");
277CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
278CMPXCHG_GEN(u16, _local, , , "memory");
279CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
280CMPXCHG_GEN(u16, _relaxed, , , "cc");
b86cf14f
NP
281#else
282static __always_inline unsigned long
283__cmpxchg_u8(volatile unsigned char *p, unsigned long old, unsigned long new)
284{
285 unsigned int prev;
286
287 __asm__ __volatile__ (
288 PPC_ATOMIC_ENTRY_BARRIER
289"1: lbarx %0,0,%2 # __cmpxchg_u8\n"
290" cmpw 0,%0,%3\n"
291" bne- 2f\n"
292" stbcx. %4,0,%2\n"
293" bne- 1b"
294 PPC_ATOMIC_EXIT_BARRIER
295 "\n\
2962:"
297 : "=&r" (prev), "+m" (*p)
298 : "r" (p), "r" (old), "r" (new)
299 : "cc", "memory");
300
301 return prev;
302}
303
304static __always_inline unsigned long
305__cmpxchg_u8_local(volatile unsigned char *p, unsigned long old,
306 unsigned long new)
307{
308 unsigned int prev;
309
310 __asm__ __volatile__ (
311"1: lbarx %0,0,%2 # __cmpxchg_u8_local\n"
312" cmpw 0,%0,%3\n"
313" bne- 2f\n"
314" stbcx. %4,0,%2\n"
315" bne- 1b\n"
316"2:"
317 : "=&r" (prev), "+m" (*p)
318 : "r" (p), "r" (old), "r" (new)
319 : "cc", "memory");
320
321 return prev;
322}
323
324static __always_inline unsigned long
325__cmpxchg_u8_relaxed(u8 *p, unsigned long old, unsigned long new)
326{
327 unsigned long prev;
328
329 __asm__ __volatile__ (
330"1: lbarx %0,0,%2 # __cmpxchg_u8_relaxed\n"
331" cmpw 0,%0,%3\n"
332" bne- 2f\n"
333" stbcx. %4,0,%2\n"
334" bne- 1b\n"
335"2:"
336 : "=&r" (prev), "+m" (*p)
337 : "r" (p), "r" (old), "r" (new)
338 : "cc");
339
340 return prev;
341}
342
343static __always_inline unsigned long
344__cmpxchg_u8_acquire(u8 *p, unsigned long old, unsigned long new)
345{
346 unsigned long prev;
347
348 __asm__ __volatile__ (
349"1: lbarx %0,0,%2 # __cmpxchg_u8_acquire\n"
350" cmpw 0,%0,%3\n"
351" bne- 2f\n"
352" stbcx. %4,0,%2\n"
353" bne- 1b\n"
354 PPC_ACQUIRE_BARRIER
355"2:"
356 : "=&r" (prev), "+m" (*p)
357 : "r" (p), "r" (old), "r" (new)
358 : "cc", "memory");
359
360 return prev;
361}
362
363static __always_inline unsigned long
364__cmpxchg_u16(volatile unsigned short *p, unsigned long old, unsigned long new)
365{
366 unsigned int prev;
367
368 __asm__ __volatile__ (
369 PPC_ATOMIC_ENTRY_BARRIER
370"1: lharx %0,0,%2 # __cmpxchg_u16\n"
371" cmpw 0,%0,%3\n"
372" bne- 2f\n"
373" sthcx. %4,0,%2\n"
374" bne- 1b\n"
375 PPC_ATOMIC_EXIT_BARRIER
376"2:"
377 : "=&r" (prev), "+m" (*p)
378 : "r" (p), "r" (old), "r" (new)
379 : "cc", "memory");
380
381 return prev;
382}
383
384static __always_inline unsigned long
385__cmpxchg_u16_local(volatile unsigned short *p, unsigned long old,
386 unsigned long new)
387{
388 unsigned int prev;
389
390 __asm__ __volatile__ (
391"1: lharx %0,0,%2 # __cmpxchg_u16_local\n"
392" cmpw 0,%0,%3\n"
393" bne- 2f\n"
394" sthcx. %4,0,%2\n"
395" bne- 1b"
396"2:"
397 : "=&r" (prev), "+m" (*p)
398 : "r" (p), "r" (old), "r" (new)
399 : "cc", "memory");
400
401 return prev;
402}
403
404static __always_inline unsigned long
405__cmpxchg_u16_relaxed(u16 *p, unsigned long old, unsigned long new)
406{
407 unsigned long prev;
408
409 __asm__ __volatile__ (
410"1: lharx %0,0,%2 # __cmpxchg_u16_relaxed\n"
411" cmpw 0,%0,%3\n"
412" bne- 2f\n"
413" sthcx. %4,0,%2\n"
414" bne- 1b\n"
415"2:"
416 : "=&r" (prev), "+m" (*p)
417 : "r" (p), "r" (old), "r" (new)
418 : "cc");
419
420 return prev;
421}
422
423static __always_inline unsigned long
424__cmpxchg_u16_acquire(u16 *p, unsigned long old, unsigned long new)
425{
426 unsigned long prev;
427
428 __asm__ __volatile__ (
429"1: lharx %0,0,%2 # __cmpxchg_u16_acquire\n"
430" cmpw 0,%0,%3\n"
431" bne- 2f\n"
432" sthcx. %4,0,%2\n"
433" bne- 1b\n"
434 PPC_ACQUIRE_BARRIER
435"2:"
436 : "=&r" (prev), "+m" (*p)
437 : "r" (p), "r" (old), "r" (new)
438 : "cc", "memory");
439
440 return prev;
441}
442#endif
d0563a12 443
ae3a197e
DH
444static __always_inline unsigned long
445__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
446{
447 unsigned int prev;
448
449 __asm__ __volatile__ (
81d7a329 450 PPC_ATOMIC_ENTRY_BARRIER
ae3a197e
DH
451"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
452 cmpw 0,%0,%3\n\
453 bne- 2f\n"
ae3a197e
DH
454" stwcx. %4,0,%2\n\
455 bne- 1b"
81d7a329 456 PPC_ATOMIC_EXIT_BARRIER
ae3a197e
DH
457 "\n\
4582:"
459 : "=&r" (prev), "+m" (*p)
460 : "r" (p), "r" (old), "r" (new)
461 : "cc", "memory");
462
463 return prev;
464}
465
466static __always_inline unsigned long
467__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
468 unsigned long new)
469{
470 unsigned int prev;
471
472 __asm__ __volatile__ (
473"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
474 cmpw 0,%0,%3\n\
475 bne- 2f\n"
ae3a197e
DH
476" stwcx. %4,0,%2\n\
477 bne- 1b"
478 "\n\
4792:"
480 : "=&r" (prev), "+m" (*p)
481 : "r" (p), "r" (old), "r" (new)
482 : "cc", "memory");
483
484 return prev;
485}
486
56c08e6d
BF
487static __always_inline unsigned long
488__cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
489{
490 unsigned long prev;
491
492 __asm__ __volatile__ (
493"1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
494" cmpw 0,%0,%3\n"
495" bne- 2f\n"
56c08e6d
BF
496" stwcx. %4,0,%2\n"
497" bne- 1b\n"
498"2:"
499 : "=&r" (prev), "+m" (*p)
500 : "r" (p), "r" (old), "r" (new)
501 : "cc");
502
503 return prev;
504}
505
506/*
507 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
508 * can avoid superfluous barriers if we use assembly code to implement
509 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
510 * cmpxchg_release() because that will result in putting a barrier in the
511 * middle of a ll/sc loop, which is probably a bad idea. For example, this
512 * might cause the conditional store more likely to fail.
513 */
514static __always_inline unsigned long
515__cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
516{
517 unsigned long prev;
518
519 __asm__ __volatile__ (
520"1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
521" cmpw 0,%0,%3\n"
522" bne- 2f\n"
56c08e6d
BF
523" stwcx. %4,0,%2\n"
524" bne- 1b\n"
525 PPC_ACQUIRE_BARRIER
526 "\n"
527"2:"
528 : "=&r" (prev), "+m" (*p)
529 : "r" (p), "r" (old), "r" (new)
530 : "cc", "memory");
531
532 return prev;
533}
534
ae3a197e
DH
535#ifdef CONFIG_PPC64
536static __always_inline unsigned long
537__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
538{
539 unsigned long prev;
540
541 __asm__ __volatile__ (
81d7a329 542 PPC_ATOMIC_ENTRY_BARRIER
ae3a197e
DH
543"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
544 cmpd 0,%0,%3\n\
545 bne- 2f\n\
546 stdcx. %4,0,%2\n\
547 bne- 1b"
81d7a329 548 PPC_ATOMIC_EXIT_BARRIER
ae3a197e
DH
549 "\n\
5502:"
551 : "=&r" (prev), "+m" (*p)
552 : "r" (p), "r" (old), "r" (new)
553 : "cc", "memory");
554
555 return prev;
556}
557
558static __always_inline unsigned long
559__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
560 unsigned long new)
561{
562 unsigned long prev;
563
564 __asm__ __volatile__ (
565"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
566 cmpd 0,%0,%3\n\
567 bne- 2f\n\
568 stdcx. %4,0,%2\n\
569 bne- 1b"
570 "\n\
5712:"
572 : "=&r" (prev), "+m" (*p)
573 : "r" (p), "r" (old), "r" (new)
574 : "cc", "memory");
575
576 return prev;
577}
56c08e6d
BF
578
579static __always_inline unsigned long
580__cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
581{
582 unsigned long prev;
583
584 __asm__ __volatile__ (
585"1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
586" cmpd 0,%0,%3\n"
587" bne- 2f\n"
588" stdcx. %4,0,%2\n"
589" bne- 1b\n"
590"2:"
591 : "=&r" (prev), "+m" (*p)
592 : "r" (p), "r" (old), "r" (new)
593 : "cc");
594
595 return prev;
596}
597
598static __always_inline unsigned long
599__cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
600{
601 unsigned long prev;
602
603 __asm__ __volatile__ (
604"1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
605" cmpd 0,%0,%3\n"
606" bne- 2f\n"
607" stdcx. %4,0,%2\n"
608" bne- 1b\n"
609 PPC_ACQUIRE_BARRIER
610 "\n"
611"2:"
612 : "=&r" (prev), "+m" (*p)
613 : "r" (p), "r" (old), "r" (new)
614 : "cc", "memory");
615
616 return prev;
617}
ae3a197e
DH
618#endif
619
ae3a197e 620static __always_inline unsigned long
da58b23c 621__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
ae3a197e
DH
622 unsigned int size)
623{
624 switch (size) {
d0563a12
PX
625 case 1:
626 return __cmpxchg_u8(ptr, old, new);
627 case 2:
628 return __cmpxchg_u16(ptr, old, new);
ae3a197e
DH
629 case 4:
630 return __cmpxchg_u32(ptr, old, new);
631#ifdef CONFIG_PPC64
632 case 8:
633 return __cmpxchg_u64(ptr, old, new);
634#endif
635 }
10d8b148 636 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
ae3a197e
DH
637 return old;
638}
639
640static __always_inline unsigned long
d0563a12 641__cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
ae3a197e
DH
642 unsigned int size)
643{
644 switch (size) {
d0563a12
PX
645 case 1:
646 return __cmpxchg_u8_local(ptr, old, new);
647 case 2:
648 return __cmpxchg_u16_local(ptr, old, new);
ae3a197e
DH
649 case 4:
650 return __cmpxchg_u32_local(ptr, old, new);
651#ifdef CONFIG_PPC64
652 case 8:
653 return __cmpxchg_u64_local(ptr, old, new);
654#endif
655 }
10d8b148 656 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
ae3a197e
DH
657 return old;
658}
659
56c08e6d
BF
660static __always_inline unsigned long
661__cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
662 unsigned int size)
663{
664 switch (size) {
d0563a12
PX
665 case 1:
666 return __cmpxchg_u8_relaxed(ptr, old, new);
667 case 2:
668 return __cmpxchg_u16_relaxed(ptr, old, new);
56c08e6d
BF
669 case 4:
670 return __cmpxchg_u32_relaxed(ptr, old, new);
671#ifdef CONFIG_PPC64
672 case 8:
673 return __cmpxchg_u64_relaxed(ptr, old, new);
674#endif
675 }
10d8b148 676 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
56c08e6d
BF
677 return old;
678}
679
680static __always_inline unsigned long
681__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
682 unsigned int size)
683{
684 switch (size) {
d0563a12
PX
685 case 1:
686 return __cmpxchg_u8_acquire(ptr, old, new);
687 case 2:
688 return __cmpxchg_u16_acquire(ptr, old, new);
56c08e6d
BF
689 case 4:
690 return __cmpxchg_u32_acquire(ptr, old, new);
691#ifdef CONFIG_PPC64
692 case 8:
693 return __cmpxchg_u64_acquire(ptr, old, new);
694#endif
695 }
10d8b148 696 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
56c08e6d
BF
697 return old;
698}
9eaa8293 699#define arch_cmpxchg(ptr, o, n) \
ae3a197e
DH
700 ({ \
701 __typeof__(*(ptr)) _o_ = (o); \
702 __typeof__(*(ptr)) _n_ = (n); \
703 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
704 (unsigned long)_n_, sizeof(*(ptr))); \
705 })
706
707
9eaa8293 708#define arch_cmpxchg_local(ptr, o, n) \
ae3a197e
DH
709 ({ \
710 __typeof__(*(ptr)) _o_ = (o); \
711 __typeof__(*(ptr)) _n_ = (n); \
712 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
713 (unsigned long)_n_, sizeof(*(ptr))); \
714 })
715
9eaa8293 716#define arch_cmpxchg_relaxed(ptr, o, n) \
56c08e6d
BF
717({ \
718 __typeof__(*(ptr)) _o_ = (o); \
719 __typeof__(*(ptr)) _n_ = (n); \
720 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
721 (unsigned long)_o_, (unsigned long)_n_, \
722 sizeof(*(ptr))); \
723})
724
9eaa8293 725#define arch_cmpxchg_acquire(ptr, o, n) \
56c08e6d
BF
726({ \
727 __typeof__(*(ptr)) _o_ = (o); \
728 __typeof__(*(ptr)) _n_ = (n); \
729 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
730 (unsigned long)_o_, (unsigned long)_n_, \
731 sizeof(*(ptr))); \
732})
ae3a197e 733#ifdef CONFIG_PPC64
9eaa8293 734#define arch_cmpxchg64(ptr, o, n) \
ae3a197e
DH
735 ({ \
736 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
9eaa8293 737 arch_cmpxchg((ptr), (o), (n)); \
ae3a197e 738 })
9eaa8293 739#define arch_cmpxchg64_local(ptr, o, n) \
ae3a197e
DH
740 ({ \
741 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
9eaa8293 742 arch_cmpxchg_local((ptr), (o), (n)); \
ae3a197e 743 })
9eaa8293 744#define arch_cmpxchg64_relaxed(ptr, o, n) \
56c08e6d
BF
745({ \
746 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
9eaa8293 747 arch_cmpxchg_relaxed((ptr), (o), (n)); \
56c08e6d 748})
9eaa8293 749#define arch_cmpxchg64_acquire(ptr, o, n) \
56c08e6d
BF
750({ \
751 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
9eaa8293 752 arch_cmpxchg_acquire((ptr), (o), (n)); \
56c08e6d 753})
ae3a197e
DH
754#else
755#include <asm-generic/cmpxchg-local.h>
9eaa8293 756#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
ae3a197e
DH
757#endif
758
759#endif /* __KERNEL__ */
760#endif /* _ASM_POWERPC_CMPXCHG_H_ */