License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / m32r / include / asm / bitops.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _ASM_M32R_BITOPS_H
3#define _ASM_M32R_BITOPS_H
4
5/*
6 * linux/include/asm-m32r/bitops.h
7 *
8 * Copyright 1992, Linus Torvalds.
9 *
10 * M32R version:
11 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
12 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
13 */
14
0624517d
JS
15#ifndef _LINUX_BITOPS_H
16#error only <linux/bitops.h> can be included directly
17#endif
18
1da177e4 19#include <linux/compiler.h>
c9034c3a 20#include <linux/irqflags.h>
1da177e4 21#include <asm/assembler.h>
1da177e4 22#include <asm/byteorder.h>
c9034c3a 23#include <asm/dcache_clear.h>
1da177e4 24#include <asm/types.h>
89607d5e 25#include <asm/barrier.h>
1da177e4
LT
26
27/*
28 * These have to be done with inline assembly: that way the bit-setting
29 * is guaranteed to be atomic. All bit operations return 0 if the bit
30 * was cleared before the operation and != 0 if it was not.
31 *
32 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
33 */
34
35/**
36 * set_bit - Atomically set a bit in memory
37 * @nr: the bit to set
38 * @addr: the address to start counting from
39 *
40 * This function is atomic and may not be reordered. See __set_bit()
41 * if you do not require the atomic guarantees.
42 * Note that @nr may be almost arbitrarily large; this function is not
43 * restricted to acting on a single-word quantity.
44 */
45static __inline__ void set_bit(int nr, volatile void * addr)
46{
47 __u32 mask;
48 volatile __u32 *a = addr;
49 unsigned long flags;
50 unsigned long tmp;
51
52 a += (nr >> 5);
53 mask = (1 << (nr & 0x1F));
54
55 local_irq_save(flags);
56 __asm__ __volatile__ (
57 DCACHE_CLEAR("%0", "r6", "%1")
58 M32R_LOCK" %0, @%1; \n\t"
59 "or %0, %2; \n\t"
60 M32R_UNLOCK" %0, @%1; \n\t"
61 : "=&r" (tmp)
62 : "r" (a), "r" (mask)
63 : "memory"
64#ifdef CONFIG_CHIP_M32700_TS1
65 , "r6"
66#endif /* CONFIG_CHIP_M32700_TS1 */
67 );
68 local_irq_restore(flags);
69}
70
1da177e4
LT
71/**
72 * clear_bit - Clears a bit in memory
73 * @nr: Bit to clear
74 * @addr: Address to start counting from
75 *
76 * clear_bit() is atomic and may not be reordered. However, it does
77 * not contain a memory barrier, so if it is used for locking purposes,
89607d5e 78 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
1da177e4
LT
79 * in order to ensure changes are visible on other processors.
80 */
81static __inline__ void clear_bit(int nr, volatile void * addr)
82{
83 __u32 mask;
84 volatile __u32 *a = addr;
85 unsigned long flags;
86 unsigned long tmp;
87
88 a += (nr >> 5);
89 mask = (1 << (nr & 0x1F));
90
91 local_irq_save(flags);
92
93 __asm__ __volatile__ (
94 DCACHE_CLEAR("%0", "r6", "%1")
95 M32R_LOCK" %0, @%1; \n\t"
96 "and %0, %2; \n\t"
97 M32R_UNLOCK" %0, @%1; \n\t"
98 : "=&r" (tmp)
99 : "r" (a), "r" (~mask)
100 : "memory"
101#ifdef CONFIG_CHIP_M32700_TS1
102 , "r6"
103#endif /* CONFIG_CHIP_M32700_TS1 */
104 );
105 local_irq_restore(flags);
106}
107
1da177e4
LT
108/**
109 * change_bit - Toggle a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * change_bit() is atomic and may not be reordered.
114 * Note that @nr may be almost arbitrarily large; this function is not
115 * restricted to acting on a single-word quantity.
116 */
117static __inline__ void change_bit(int nr, volatile void * addr)
118{
119 __u32 mask;
120 volatile __u32 *a = addr;
121 unsigned long flags;
122 unsigned long tmp;
123
124 a += (nr >> 5);
125 mask = (1 << (nr & 0x1F));
126
127 local_irq_save(flags);
128 __asm__ __volatile__ (
129 DCACHE_CLEAR("%0", "r6", "%1")
130 M32R_LOCK" %0, @%1; \n\t"
131 "xor %0, %2; \n\t"
132 M32R_UNLOCK" %0, @%1; \n\t"
133 : "=&r" (tmp)
134 : "r" (a), "r" (mask)
135 : "memory"
136#ifdef CONFIG_CHIP_M32700_TS1
137 , "r6"
138#endif /* CONFIG_CHIP_M32700_TS1 */
139 );
140 local_irq_restore(flags);
141}
142
143/**
144 * test_and_set_bit - Set a bit and return its old value
145 * @nr: Bit to set
146 * @addr: Address to count from
147 *
148 * This operation is atomic and cannot be reordered.
149 * It also implies a memory barrier.
150 */
151static __inline__ int test_and_set_bit(int nr, volatile void * addr)
152{
153 __u32 mask, oldbit;
154 volatile __u32 *a = addr;
155 unsigned long flags;
156 unsigned long tmp;
157
158 a += (nr >> 5);
159 mask = (1 << (nr & 0x1F));
160
161 local_irq_save(flags);
162 __asm__ __volatile__ (
163 DCACHE_CLEAR("%0", "%1", "%2")
164 M32R_LOCK" %0, @%2; \n\t"
165 "mv %1, %0; \n\t"
166 "and %0, %3; \n\t"
167 "or %1, %3; \n\t"
168 M32R_UNLOCK" %1, @%2; \n\t"
169 : "=&r" (oldbit), "=&r" (tmp)
170 : "r" (a), "r" (mask)
171 : "memory"
172 );
173 local_irq_restore(flags);
174
175 return (oldbit != 0);
176}
177
1da177e4
LT
178/**
179 * test_and_clear_bit - Clear a bit and return its old value
180 * @nr: Bit to set
181 * @addr: Address to count from
182 *
183 * This operation is atomic and cannot be reordered.
184 * It also implies a memory barrier.
185 */
186static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
187{
188 __u32 mask, oldbit;
189 volatile __u32 *a = addr;
190 unsigned long flags;
191 unsigned long tmp;
192
193 a += (nr >> 5);
194 mask = (1 << (nr & 0x1F));
195
196 local_irq_save(flags);
197
198 __asm__ __volatile__ (
199 DCACHE_CLEAR("%0", "%1", "%3")
200 M32R_LOCK" %0, @%3; \n\t"
201 "mv %1, %0; \n\t"
202 "and %0, %2; \n\t"
203 "not %2, %2; \n\t"
204 "and %1, %2; \n\t"
205 M32R_UNLOCK" %1, @%3; \n\t"
206 : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
207 : "r" (a)
208 : "memory"
209 );
210 local_irq_restore(flags);
211
212 return (oldbit != 0);
213}
214
1da177e4
LT
215/**
216 * test_and_change_bit - Change a bit and return its old value
217 * @nr: Bit to set
218 * @addr: Address to count from
219 *
220 * This operation is atomic and cannot be reordered.
221 * It also implies a memory barrier.
222 */
223static __inline__ int test_and_change_bit(int nr, volatile void * addr)
224{
225 __u32 mask, oldbit;
226 volatile __u32 *a = addr;
227 unsigned long flags;
228 unsigned long tmp;
229
230 a += (nr >> 5);
231 mask = (1 << (nr & 0x1F));
232
233 local_irq_save(flags);
234 __asm__ __volatile__ (
235 DCACHE_CLEAR("%0", "%1", "%2")
236 M32R_LOCK" %0, @%2; \n\t"
237 "mv %1, %0; \n\t"
238 "and %0, %3; \n\t"
239 "xor %1, %3; \n\t"
240 M32R_UNLOCK" %1, @%2; \n\t"
241 : "=&r" (oldbit), "=&r" (tmp)
242 : "r" (a), "r" (mask)
243 : "memory"
244 );
245 local_irq_restore(flags);
246
247 return (oldbit != 0);
248}
249
6d9f937b
AM
250#include <asm-generic/bitops/non-atomic.h>
251#include <asm-generic/bitops/ffz.h>
252#include <asm-generic/bitops/__ffs.h>
253#include <asm-generic/bitops/fls.h>
16a20626 254#include <asm-generic/bitops/__fls.h>
6d9f937b 255#include <asm-generic/bitops/fls64.h>
1da177e4
LT
256
257#ifdef __KERNEL__
258
6d9f937b
AM
259#include <asm-generic/bitops/sched.h>
260#include <asm-generic/bitops/find.h>
261#include <asm-generic/bitops/ffs.h>
262#include <asm-generic/bitops/hweight.h>
26333576 263#include <asm-generic/bitops/lock.h>
1da177e4
LT
264
265#endif /* __KERNEL__ */
266
267#ifdef __KERNEL__
268
861b5ae7 269#include <asm-generic/bitops/le.h>
6d9f937b 270#include <asm-generic/bitops/ext2-atomic.h>
1da177e4
LT
271
272#endif /* __KERNEL__ */
273
274#endif /* _ASM_M32R_BITOPS_H */