Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_M32R_SYSTEM_H |
2 | #define _ASM_M32R_SYSTEM_H | |
3 | ||
4 | /* | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | * | |
4127272c HT |
9 | * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto |
10 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> | |
1da177e4 LT |
11 | */ |
12 | ||
0332db5a | 13 | #include <asm/assembler.h> |
1da177e4 LT |
14 | |
15 | #ifdef __KERNEL__ | |
16 | ||
17 | /* | |
18 | * switch_to(prev, next) should switch from task `prev' to `next' | |
19 | * `prev' will never be the same as `next'. | |
20 | * | |
36c8b586 | 21 | * `next' and `prev' should be struct task_struct, but it isn't always defined |
1da177e4 LT |
22 | */ |
23 | ||
1da177e4 | 24 | #define switch_to(prev, next, last) do { \ |
1da177e4 | 25 | __asm__ __volatile__ ( \ |
4127272c HT |
26 | " seth lr, #high(1f) \n" \ |
27 | " or3 lr, lr, #low(1f) \n" \ | |
28 | " st lr, @%4 ; store old LR \n" \ | |
29 | " ld lr, @%5 ; load new LR \n" \ | |
30 | " st sp, @%2 ; store old SP \n" \ | |
31 | " ld sp, @%3 ; load new SP \n" \ | |
32 | " push %1 ; store `prev' on new stack \n" \ | |
33 | " jmp lr \n" \ | |
34 | " .fillinsn \n" \ | |
35 | "1: \n" \ | |
36 | " pop %0 ; restore `__last' from new stack \n" \ | |
37 | : "=r" (last) \ | |
38 | : "0" (prev), \ | |
39 | "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ | |
40 | "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ | |
41 | : "memory", "lr" \ | |
1da177e4 | 42 | ); \ |
1da177e4 LT |
43 | } while(0) |
44 | ||
4dc7a0bb IM |
45 | /* |
46 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
47 | * it needs a way to flush as much of the CPU's caches as possible. | |
48 | * | |
49 | * TODO: fill this in! | |
50 | */ | |
51 | static inline void sched_cacheflush(void) | |
52 | { | |
53 | } | |
54 | ||
1da177e4 | 55 | /* Interrupt Control */ |
9287d95e | 56 | #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) |
1da177e4 LT |
57 | #define local_irq_enable() \ |
58 | __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") | |
59 | #define local_irq_disable() \ | |
60 | __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") | |
9287d95e | 61 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
62 | static inline void local_irq_enable(void) |
63 | { | |
64 | unsigned long tmpreg; | |
65 | __asm__ __volatile__( | |
66 | "mvfc %0, psw; \n\t" | |
67 | "or3 %0, %0, #0x0040; \n\t" | |
68 | "mvtc %0, psw; \n\t" | |
69 | : "=&r" (tmpreg) : : "cbit", "memory"); | |
70 | } | |
71 | ||
72 | static inline void local_irq_disable(void) | |
73 | { | |
74 | unsigned long tmpreg0, tmpreg1; | |
75 | __asm__ __volatile__( | |
76 | "ld24 %0, #0 ; Use 32-bit insn. \n\t" | |
77 | "mvfc %1, psw ; No interrupt can be accepted here. \n\t" | |
78 | "mvtc %0, psw \n\t" | |
79 | "and3 %0, %1, #0xffbf \n\t" | |
80 | "mvtc %0, psw \n\t" | |
81 | : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); | |
82 | } | |
9287d95e | 83 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
84 | |
85 | #define local_save_flags(x) \ | |
86 | __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) | |
87 | ||
88 | #define local_irq_restore(x) \ | |
89 | __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ | |
90 | : "r" (x) : "cbit", "memory") | |
91 | ||
9287d95e | 92 | #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) |
1da177e4 LT |
93 | #define local_irq_save(x) \ |
94 | __asm__ __volatile__( \ | |
95 | "mvfc %0, psw; \n\t" \ | |
96 | "clrpsw #0x40 -> nop; \n\t" \ | |
97 | : "=r" (x) : /* no input */ : "memory") | |
9287d95e | 98 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
99 | #define local_irq_save(x) \ |
100 | ({ \ | |
101 | unsigned long tmpreg; \ | |
102 | __asm__ __volatile__( \ | |
103 | "ld24 %1, #0 \n\t" \ | |
104 | "mvfc %0, psw \n\t" \ | |
105 | "mvtc %1, psw \n\t" \ | |
106 | "and3 %1, %0, #0xffbf \n\t" \ | |
107 | "mvtc %1, psw \n\t" \ | |
108 | : "=r" (x), "=&r" (tmpreg) \ | |
109 | : : "cbit", "memory"); \ | |
110 | }) | |
9287d95e | 111 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
112 | |
113 | #define irqs_disabled() \ | |
114 | ({ \ | |
115 | unsigned long flags; \ | |
116 | local_save_flags(flags); \ | |
117 | !(flags & 0x40); \ | |
118 | }) | |
119 | ||
1da177e4 LT |
120 | #define nop() __asm__ __volatile__ ("nop" : : ) |
121 | ||
122 | #define xchg(ptr,x) \ | |
123 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
124 | ||
1da177e4 LT |
125 | #ifdef CONFIG_SMP |
126 | extern void __xchg_called_with_bad_pointer(void); | |
127 | #endif | |
128 | ||
129 | #ifdef CONFIG_CHIP_M32700_TS1 | |
130 | #define DCACHE_CLEAR(reg0, reg1, addr) \ | |
131 | "seth "reg1", #high(dcache_dummy); \n\t" \ | |
132 | "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \ | |
133 | "lock "reg0", @"reg1"; \n\t" \ | |
134 | "add3 "reg0", "addr", #0x1000; \n\t" \ | |
135 | "ld "reg0", @"reg0"; \n\t" \ | |
136 | "add3 "reg0", "addr", #0x2000; \n\t" \ | |
137 | "ld "reg0", @"reg0"; \n\t" \ | |
138 | "unlock "reg0", @"reg1"; \n\t" | |
139 | /* FIXME: This workaround code cannot handle kenrel modules | |
140 | * correctly under SMP environment. | |
141 | */ | |
142 | #else /* CONFIG_CHIP_M32700_TS1 */ | |
143 | #define DCACHE_CLEAR(reg0, reg1, addr) | |
144 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
145 | ||
4127272c HT |
146 | static inline unsigned long |
147 | __xchg(unsigned long x, volatile void * ptr, int size) | |
1da177e4 LT |
148 | { |
149 | unsigned long flags; | |
150 | unsigned long tmp = 0; | |
151 | ||
152 | local_irq_save(flags); | |
153 | ||
154 | switch (size) { | |
155 | #ifndef CONFIG_SMP | |
156 | case 1: | |
157 | __asm__ __volatile__ ( | |
158 | "ldb %0, @%2 \n\t" | |
159 | "stb %1, @%2 \n\t" | |
160 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
161 | break; | |
162 | case 2: | |
163 | __asm__ __volatile__ ( | |
164 | "ldh %0, @%2 \n\t" | |
165 | "sth %1, @%2 \n\t" | |
166 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
167 | break; | |
168 | case 4: | |
169 | __asm__ __volatile__ ( | |
170 | "ld %0, @%2 \n\t" | |
171 | "st %1, @%2 \n\t" | |
172 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
173 | break; | |
174 | #else /* CONFIG_SMP */ | |
175 | case 4: | |
176 | __asm__ __volatile__ ( | |
177 | DCACHE_CLEAR("%0", "r4", "%2") | |
178 | "lock %0, @%2; \n\t" | |
179 | "unlock %1, @%2; \n\t" | |
180 | : "=&r" (tmp) : "r" (x), "r" (ptr) | |
181 | : "memory" | |
182 | #ifdef CONFIG_CHIP_M32700_TS1 | |
183 | , "r4" | |
184 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
185 | ); | |
186 | break; | |
187 | default: | |
188 | __xchg_called_with_bad_pointer(); | |
189 | #endif /* CONFIG_SMP */ | |
190 | } | |
191 | ||
192 | local_irq_restore(flags); | |
193 | ||
194 | return (tmp); | |
195 | } | |
196 | ||
0332db5a HT |
197 | #define __HAVE_ARCH_CMPXCHG 1 |
198 | ||
4127272c | 199 | static inline unsigned long |
0332db5a HT |
200 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) |
201 | { | |
202 | unsigned long flags; | |
203 | unsigned int retval; | |
204 | ||
205 | local_irq_save(flags); | |
206 | __asm__ __volatile__ ( | |
207 | DCACHE_CLEAR("%0", "r4", "%1") | |
208 | M32R_LOCK" %0, @%1; \n" | |
209 | " bne %0, %2, 1f; \n" | |
210 | M32R_UNLOCK" %3, @%1; \n" | |
211 | " bra 2f; \n" | |
212 | " .fillinsn \n" | |
213 | "1:" | |
b04ec261 | 214 | M32R_UNLOCK" %0, @%1; \n" |
0332db5a HT |
215 | " .fillinsn \n" |
216 | "2:" | |
217 | : "=&r" (retval) | |
218 | : "r" (p), "r" (old), "r" (new) | |
219 | : "cbit", "memory" | |
220 | #ifdef CONFIG_CHIP_M32700_TS1 | |
221 | , "r4" | |
222 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
223 | ); | |
224 | local_irq_restore(flags); | |
225 | ||
226 | return retval; | |
227 | } | |
228 | ||
229 | /* This function doesn't exist, so you'll get a linker error | |
230 | if something tries to do an invalid cmpxchg(). */ | |
231 | extern void __cmpxchg_called_with_bad_pointer(void); | |
232 | ||
4127272c | 233 | static inline unsigned long |
0332db5a HT |
234 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
235 | { | |
236 | switch (size) { | |
237 | case 4: | |
238 | return __cmpxchg_u32(ptr, old, new); | |
239 | #if 0 /* we don't have __cmpxchg_u64 */ | |
240 | case 8: | |
241 | return __cmpxchg_u64(ptr, old, new); | |
242 | #endif /* 0 */ | |
243 | } | |
244 | __cmpxchg_called_with_bad_pointer(); | |
245 | return old; | |
246 | } | |
247 | ||
248 | #define cmpxchg(ptr,o,n) \ | |
249 | ({ \ | |
250 | __typeof__(*(ptr)) _o_ = (o); \ | |
251 | __typeof__(*(ptr)) _n_ = (n); \ | |
252 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
253 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
254 | }) | |
255 | ||
256 | #endif /* __KERNEL__ */ | |
257 | ||
1da177e4 LT |
258 | /* |
259 | * Memory barrier. | |
260 | * | |
261 | * mb() prevents loads and stores being reordered across this point. | |
262 | * rmb() prevents loads being reordered across this point. | |
263 | * wmb() prevents stores being reordered across this point. | |
264 | */ | |
265 | #define mb() barrier() | |
266 | #define rmb() mb() | |
267 | #define wmb() mb() | |
268 | ||
269 | /** | |
270 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
271 | * depend on. | |
272 | * | |
273 | * No data-dependent reads from memory-like regions are ever reordered | |
274 | * over this barrier. All reads preceding this primitive are guaranteed | |
275 | * to access memory (but not necessarily other CPUs' caches) before any | |
276 | * reads following this primitive that depend on the data return by | |
277 | * any of the preceding reads. This primitive is much lighter weight than | |
278 | * rmb() on most CPUs, and is never heavier weight than is | |
279 | * rmb(). | |
280 | * | |
281 | * These ordering constraints are respected by both the local CPU | |
282 | * and the compiler. | |
283 | * | |
284 | * Ordering is not guaranteed by anything other than these primitives, | |
285 | * not even by data dependencies. See the documentation for | |
286 | * memory_barrier() for examples and URLs to more information. | |
287 | * | |
288 | * For example, the following code would force ordering (the initial | |
289 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
290 | * | |
291 | * <programlisting> | |
292 | * CPU 0 CPU 1 | |
293 | * | |
294 | * b = 2; | |
295 | * memory_barrier(); | |
296 | * p = &b; q = p; | |
297 | * read_barrier_depends(); | |
298 | * d = *q; | |
299 | * </programlisting> | |
300 | * | |
301 | * | |
302 | * because the read of "*q" depends on the read of "p" and these | |
303 | * two reads are separated by a read_barrier_depends(). However, | |
304 | * the following code, with the same initial values for "a" and "b": | |
305 | * | |
306 | * <programlisting> | |
307 | * CPU 0 CPU 1 | |
308 | * | |
309 | * a = 2; | |
310 | * memory_barrier(); | |
311 | * b = 3; y = b; | |
312 | * read_barrier_depends(); | |
313 | * x = a; | |
314 | * </programlisting> | |
315 | * | |
316 | * does not enforce ordering, since there is no data dependency between | |
317 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
318 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
d6e05edc | 319 | * in cases like this where there are no data dependencies. |
1da177e4 LT |
320 | **/ |
321 | ||
322 | #define read_barrier_depends() do { } while (0) | |
323 | ||
324 | #ifdef CONFIG_SMP | |
325 | #define smp_mb() mb() | |
326 | #define smp_rmb() rmb() | |
327 | #define smp_wmb() wmb() | |
328 | #define smp_read_barrier_depends() read_barrier_depends() | |
a27f3113 | 329 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
1da177e4 LT |
330 | #else |
331 | #define smp_mb() barrier() | |
332 | #define smp_rmb() barrier() | |
333 | #define smp_wmb() barrier() | |
334 | #define smp_read_barrier_depends() do { } while (0) | |
a27f3113 | 335 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
1da177e4 LT |
336 | #endif |
337 | ||
1da177e4 LT |
338 | #define arch_align_stack(x) (x) |
339 | ||
fabb626a | 340 | #endif /* _ASM_M32R_SYSTEM_H */ |