1 #ifndef _ASM_M32R_SYSTEM_H
2 #define _ASM_M32R_SYSTEM_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 #include <asm/assembler.h>
18 * switch_to(prev, next) should switch from task `prev' to `next'
19 * `prev' will never be the same as `next'.
21 * `next' and `prev' should be struct task_struct, but it isn't always defined
24 #if defined(CONFIG_FRAME_POINTER) || \
25 !defined(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER)
26 #define M32R_PUSH_FP " push fp\n"
27 #define M32R_POP_FP " pop fp\n"
29 #define M32R_PUSH_FP ""
30 #define M32R_POP_FP ""
33 #define switch_to(prev, next, last) do { \
34 __asm__ __volatile__ ( \
35 " seth lr, #high(1f) \n" \
36 " or3 lr, lr, #low(1f) \n" \
37 " st lr, @%4 ; store old LR \n" \
38 " ld lr, @%5 ; load new LR \n" \
40 " st sp, @%2 ; store old SP \n" \
41 " ld sp, @%3 ; load new SP \n" \
42 " push %1 ; store `prev' on new stack \n" \
46 " pop %0 ; restore `__last' from new stack \n" \
50 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
51 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
57 * On SMP systems, when the scheduler does migration-cost autodetection,
58 * it needs a way to flush as much of the CPU's caches as possible.
62 static inline void sched_cacheflush(void)
66 /* Interrupt Control */
67 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
68 #define local_irq_enable() \
69 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
70 #define local_irq_disable() \
71 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
72 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
73 static inline void local_irq_enable(void)
78 "or3 %0, %0, #0x0040; \n\t"
80 : "=&r" (tmpreg) : : "cbit", "memory");
83 static inline void local_irq_disable(void)
85 unsigned long tmpreg0, tmpreg1;
87 "ld24 %0, #0 ; Use 32-bit insn. \n\t"
88 "mvfc %1, psw ; No interrupt can be accepted here. \n\t"
90 "and3 %0, %1, #0xffbf \n\t"
92 : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
94 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
96 #define local_save_flags(x) \
97 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
99 #define local_irq_restore(x) \
100 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
101 : "r" (x) : "cbit", "memory")
103 #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
104 #define local_irq_save(x) \
105 __asm__ __volatile__( \
106 "mvfc %0, psw; \n\t" \
107 "clrpsw #0x40 -> nop; \n\t" \
108 : "=r" (x) : /* no input */ : "memory")
109 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
110 #define local_irq_save(x) \
112 unsigned long tmpreg; \
113 __asm__ __volatile__( \
115 "mvfc %0, psw \n\t" \
116 "mvtc %1, psw \n\t" \
117 "and3 %1, %0, #0xffbf \n\t" \
118 "mvtc %1, psw \n\t" \
119 : "=r" (x), "=&r" (tmpreg) \
120 : : "cbit", "memory"); \
122 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
124 #define irqs_disabled() \
126 unsigned long flags; \
127 local_save_flags(flags); \
131 #define nop() __asm__ __volatile__ ("nop" : : )
133 #define xchg(ptr,x) \
134 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
137 extern void __xchg_called_with_bad_pointer(void);
140 #ifdef CONFIG_CHIP_M32700_TS1
141 #define DCACHE_CLEAR(reg0, reg1, addr) \
142 "seth "reg1", #high(dcache_dummy); \n\t" \
143 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
144 "lock "reg0", @"reg1"; \n\t" \
145 "add3 "reg0", "addr", #0x1000; \n\t" \
146 "ld "reg0", @"reg0"; \n\t" \
147 "add3 "reg0", "addr", #0x2000; \n\t" \
148 "ld "reg0", @"reg0"; \n\t" \
149 "unlock "reg0", @"reg1"; \n\t"
150 /* FIXME: This workaround code cannot handle kernel modules
151 * correctly under SMP environment.
153 #else /* CONFIG_CHIP_M32700_TS1 */
154 #define DCACHE_CLEAR(reg0, reg1, addr)
155 #endif /* CONFIG_CHIP_M32700_TS1 */
157 static inline unsigned long
158 __xchg(unsigned long x, volatile void * ptr, int size)
161 unsigned long tmp = 0;
163 local_irq_save(flags);
168 __asm__ __volatile__ (
171 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
174 __asm__ __volatile__ (
177 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
180 __asm__ __volatile__ (
183 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
185 #else /* CONFIG_SMP */
187 __asm__ __volatile__ (
188 DCACHE_CLEAR("%0", "r4", "%2")
190 "unlock %1, @%2; \n\t"
191 : "=&r" (tmp) : "r" (x), "r" (ptr)
193 #ifdef CONFIG_CHIP_M32700_TS1
195 #endif /* CONFIG_CHIP_M32700_TS1 */
199 __xchg_called_with_bad_pointer();
200 #endif /* CONFIG_SMP */
203 local_irq_restore(flags);
208 #define __HAVE_ARCH_CMPXCHG 1
210 static inline unsigned long
211 __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
216 local_irq_save(flags);
217 __asm__ __volatile__ (
218 DCACHE_CLEAR("%0", "r4", "%1")
219 M32R_LOCK" %0, @%1; \n"
220 " bne %0, %2, 1f; \n"
221 M32R_UNLOCK" %3, @%1; \n"
225 M32R_UNLOCK" %0, @%1; \n"
229 : "r" (p), "r" (old), "r" (new)
231 #ifdef CONFIG_CHIP_M32700_TS1
233 #endif /* CONFIG_CHIP_M32700_TS1 */
235 local_irq_restore(flags);
240 /* This function doesn't exist, so you'll get a linker error
241 if something tries to do an invalid cmpxchg(). */
242 extern void __cmpxchg_called_with_bad_pointer(void);
244 static inline unsigned long
245 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
249 return __cmpxchg_u32(ptr, old, new);
250 #if 0 /* we don't have __cmpxchg_u64 */
252 return __cmpxchg_u64(ptr, old, new);
255 __cmpxchg_called_with_bad_pointer();
259 #define cmpxchg(ptr,o,n) \
261 __typeof__(*(ptr)) _o_ = (o); \
262 __typeof__(*(ptr)) _n_ = (n); \
263 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
264 (unsigned long)_n_, sizeof(*(ptr))); \
267 #endif /* __KERNEL__ */
272 * mb() prevents loads and stores being reordered across this point.
273 * rmb() prevents loads being reordered across this point.
274 * wmb() prevents stores being reordered across this point.
276 #define mb() barrier()
281 * read_barrier_depends - Flush all pending reads that subsequents reads
284 * No data-dependent reads from memory-like regions are ever reordered
285 * over this barrier. All reads preceding this primitive are guaranteed
286 * to access memory (but not necessarily other CPUs' caches) before any
287 * reads following this primitive that depend on the data return by
288 * any of the preceding reads. This primitive is much lighter weight than
289 * rmb() on most CPUs, and is never heavier weight than is
292 * These ordering constraints are respected by both the local CPU
295 * Ordering is not guaranteed by anything other than these primitives,
296 * not even by data dependencies. See the documentation for
297 * memory_barrier() for examples and URLs to more information.
299 * For example, the following code would force ordering (the initial
300 * value of "a" is zero, "b" is one, and "p" is "&a"):
308 * read_barrier_depends();
313 * because the read of "*q" depends on the read of "p" and these
314 * two reads are separated by a read_barrier_depends(). However,
315 * the following code, with the same initial values for "a" and "b":
323 * read_barrier_depends();
327 * does not enforce ordering, since there is no data dependency between
328 * the read of "a" and the read of "b". Therefore, on some CPUs, such
329 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
330 * in cases like this where there are no data dependencies.
333 #define read_barrier_depends() do { } while (0)
336 #define smp_mb() mb()
337 #define smp_rmb() rmb()
338 #define smp_wmb() wmb()
339 #define smp_read_barrier_depends() read_barrier_depends()
340 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
342 #define smp_mb() barrier()
343 #define smp_rmb() barrier()
344 #define smp_wmb() barrier()
345 #define smp_read_barrier_depends() do { } while (0)
346 #define set_mb(var, value) do { var = value; barrier(); } while (0)
349 #define arch_align_stack(x) (x)
351 #endif /* _ASM_M32R_SYSTEM_H */