Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / arch / hexagon / include / asm / atomic.h
CommitLineData
08dbd0f8 1/* SPDX-License-Identifier: GPL-2.0-only */
75085018
RK
2/*
3 * Atomic operations for the Hexagon architecture
4 *
7c6a5df4 5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
75085018
RK
6 */
7
8#ifndef _ASM_ATOMIC_H
9#define _ASM_ATOMIC_H
10
11#include <linux/types.h>
8335896b 12#include <asm/cmpxchg.h>
94cf42f8 13#include <asm/barrier.h>
75085018 14
b10fa7b6
RK
15/* Normal writes in our arch don't clear lock reservations */
16
94b63eb6 17static inline void arch_atomic_set(atomic_t *v, int new)
b10fa7b6
RK
18{
19 asm volatile(
20 "1: r6 = memw_locked(%0);\n"
21 " memw_locked(%0,p0) = %1;\n"
22 " if (!P0) jump 1b;\n"
23 :
24 : "r" (&v->counter), "r" (new)
25 : "memory", "p0", "r6"
26 );
27}
75085018 28
94b63eb6 29#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
9d664c0a 30
94b63eb6 31#define arch_atomic_read(v) READ_ONCE((v)->counter)
75085018 32
50f853e3 33#define ATOMIC_OP(op) \
94b63eb6 34static inline void arch_atomic_##op(int i, atomic_t *v) \
50f853e3
PZ
35{ \
36 int output; \
37 \
38 __asm__ __volatile__ ( \
39 "1: %0 = memw_locked(%1);\n" \
40 " %0 = "#op "(%0,%2);\n" \
41 " memw_locked(%1,P3)=%0;\n" \
780a0cfd 42 " if (!P3) jump 1b;\n" \
50f853e3
PZ
43 : "=&r" (output) \
44 : "r" (&v->counter), "r" (i) \
45 : "memory", "p3" \
46 ); \
47} \
48
4be7dd39 49#define ATOMIC_OP_RETURN(op) \
94b63eb6 50static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
50f853e3
PZ
51{ \
52 int output; \
53 \
54 __asm__ __volatile__ ( \
55 "1: %0 = memw_locked(%1);\n" \
56 " %0 = "#op "(%0,%2);\n" \
57 " memw_locked(%1,P3)=%0;\n" \
780a0cfd 58 " if (!P3) jump 1b;\n" \
50f853e3
PZ
59 : "=&r" (output) \
60 : "r" (&v->counter), "r" (i) \
61 : "memory", "p3" \
62 ); \
63 return output; \
75085018
RK
64}
65
4be7dd39 66#define ATOMIC_FETCH_OP(op) \
94b63eb6 67static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
4be7dd39
PZ
68{ \
69 int output, val; \
70 \
71 __asm__ __volatile__ ( \
72 "1: %0 = memw_locked(%2);\n" \
73 " %1 = "#op "(%0,%3);\n" \
74 " memw_locked(%2,P3)=%1;\n" \
780a0cfd 75 " if (!P3) jump 1b;\n" \
4be7dd39
PZ
76 : "=&r" (output), "=&r" (val) \
77 : "r" (&v->counter), "r" (i) \
78 : "memory", "p3" \
79 ); \
80 return output; \
81}
82
83#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
75085018 84
50f853e3
PZ
85ATOMIC_OPS(add)
86ATOMIC_OPS(sub)
75085018 87
8ad17f21
MR
88#define arch_atomic_add_return arch_atomic_add_return
89#define arch_atomic_sub_return arch_atomic_sub_return
90#define arch_atomic_fetch_add arch_atomic_fetch_add
91#define arch_atomic_fetch_sub arch_atomic_fetch_sub
92
4be7dd39
PZ
93#undef ATOMIC_OPS
94#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
95
4be7dd39
PZ
96ATOMIC_OPS(and)
97ATOMIC_OPS(or)
98ATOMIC_OPS(xor)
610f7ba9 99
8ad17f21
MR
100#define arch_atomic_fetch_and arch_atomic_fetch_and
101#define arch_atomic_fetch_or arch_atomic_fetch_or
102#define arch_atomic_fetch_xor arch_atomic_fetch_xor
103
50f853e3 104#undef ATOMIC_OPS
4be7dd39 105#undef ATOMIC_FETCH_OP
50f853e3
PZ
106#undef ATOMIC_OP_RETURN
107#undef ATOMIC_OP
75085018 108
94b63eb6 109static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
75085018 110{
e0025a72
RK
111 int __oldval;
112 register int tmp;
113
75085018
RK
114 asm volatile(
115 "1: %0 = memw_locked(%2);"
116 " {"
117 " p3 = cmp.eq(%0, %4);"
118 " if (p3.new) jump:nt 2f;"
e0025a72 119 " %1 = add(%0, %3);"
75085018 120 " }"
e0025a72 121 " memw_locked(%2, p3) = %1;"
75085018 122 " {"
780a0cfd 123 " if (!p3) jump 1b;"
75085018
RK
124 " }"
125 "2:"
e0025a72 126 : "=&r" (__oldval), "=&r" (tmp)
75085018
RK
127 : "r" (v), "r" (a), "r" (u)
128 : "memory", "p3"
129 );
e0025a72 130 return __oldval;
75085018 131}
94b63eb6 132#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
75085018 133
75085018 134#endif