1 #ifndef _ASM_M32R_SYSTEM_H
2 #define _ASM_M32R_SYSTEM_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 #include <asm/assembler.h>
18 * switch_to(prev, next) should switch from task `prev' to `next'
19 * `prev' will never be the same as `next'.
21 * `next' and `prev' should be struct task_struct, but it isn't always defined
24 #define switch_to(prev, next, last) do { \
25 __asm__ __volatile__ ( \
26 " seth lr, #high(1f) \n" \
27 " or3 lr, lr, #low(1f) \n" \
28 " st lr, @%4 ; store old LR \n" \
29 " ld lr, @%5 ; load new LR \n" \
30 " st sp, @%2 ; store old SP \n" \
31 " ld sp, @%3 ; load new SP \n" \
32 " push %1 ; store `prev' on new stack \n" \
36 " pop %0 ; restore `__last' from new stack \n" \
39 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
40 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
46 * On SMP systems, when the scheduler does migration-cost autodetection,
47 * it needs a way to flush as much of the CPU's caches as possible.
51 static inline void sched_cacheflush(void)
55 /* Interrupt Control */
56 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
57 #define local_irq_enable() \
58 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
59 #define local_irq_disable() \
60 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
61 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
62 static inline void local_irq_enable(void)
67 "or3 %0, %0, #0x0040; \n\t"
69 : "=&r" (tmpreg) : : "cbit", "memory");
72 static inline void local_irq_disable(void)
74 unsigned long tmpreg0, tmpreg1;
76 "ld24 %0, #0 ; Use 32-bit insn. \n\t"
77 "mvfc %1, psw ; No interrupt can be accepted here. \n\t"
79 "and3 %0, %1, #0xffbf \n\t"
81 : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
83 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
85 #define local_save_flags(x) \
86 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
88 #define local_irq_restore(x) \
89 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
90 : "r" (x) : "cbit", "memory")
92 #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
93 #define local_irq_save(x) \
94 __asm__ __volatile__( \
95 "mvfc %0, psw; \n\t" \
96 "clrpsw #0x40 -> nop; \n\t" \
97 : "=r" (x) : /* no input */ : "memory")
98 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
99 #define local_irq_save(x) \
101 unsigned long tmpreg; \
102 __asm__ __volatile__( \
104 "mvfc %0, psw \n\t" \
105 "mvtc %1, psw \n\t" \
106 "and3 %1, %0, #0xffbf \n\t" \
107 "mvtc %1, psw \n\t" \
108 : "=r" (x), "=&r" (tmpreg) \
109 : : "cbit", "memory"); \
111 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
113 #define irqs_disabled() \
115 unsigned long flags; \
116 local_save_flags(flags); \
120 #define nop() __asm__ __volatile__ ("nop" : : )
122 #define xchg(ptr,x) \
123 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
126 extern void __xchg_called_with_bad_pointer(void);
129 #ifdef CONFIG_CHIP_M32700_TS1
130 #define DCACHE_CLEAR(reg0, reg1, addr) \
131 "seth "reg1", #high(dcache_dummy); \n\t" \
132 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
133 "lock "reg0", @"reg1"; \n\t" \
134 "add3 "reg0", "addr", #0x1000; \n\t" \
135 "ld "reg0", @"reg0"; \n\t" \
136 "add3 "reg0", "addr", #0x2000; \n\t" \
137 "ld "reg0", @"reg0"; \n\t" \
138 "unlock "reg0", @"reg1"; \n\t"
139 /* FIXME: This workaround code cannot handle kernel modules
140 * correctly under SMP environment.
142 #else /* CONFIG_CHIP_M32700_TS1 */
143 #define DCACHE_CLEAR(reg0, reg1, addr)
144 #endif /* CONFIG_CHIP_M32700_TS1 */
146 static inline unsigned long
147 __xchg(unsigned long x, volatile void * ptr, int size)
150 unsigned long tmp = 0;
152 local_irq_save(flags);
157 __asm__ __volatile__ (
160 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
163 __asm__ __volatile__ (
166 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
169 __asm__ __volatile__ (
172 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
174 #else /* CONFIG_SMP */
176 __asm__ __volatile__ (
177 DCACHE_CLEAR("%0", "r4", "%2")
179 "unlock %1, @%2; \n\t"
180 : "=&r" (tmp) : "r" (x), "r" (ptr)
182 #ifdef CONFIG_CHIP_M32700_TS1
184 #endif /* CONFIG_CHIP_M32700_TS1 */
188 __xchg_called_with_bad_pointer();
189 #endif /* CONFIG_SMP */
192 local_irq_restore(flags);
197 #define __HAVE_ARCH_CMPXCHG 1
199 static inline unsigned long
200 __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
205 local_irq_save(flags);
206 __asm__ __volatile__ (
207 DCACHE_CLEAR("%0", "r4", "%1")
208 M32R_LOCK" %0, @%1; \n"
209 " bne %0, %2, 1f; \n"
210 M32R_UNLOCK" %3, @%1; \n"
214 M32R_UNLOCK" %0, @%1; \n"
218 : "r" (p), "r" (old), "r" (new)
220 #ifdef CONFIG_CHIP_M32700_TS1
222 #endif /* CONFIG_CHIP_M32700_TS1 */
224 local_irq_restore(flags);
229 /* This function doesn't exist, so you'll get a linker error
230 if something tries to do an invalid cmpxchg(). */
231 extern void __cmpxchg_called_with_bad_pointer(void);
233 static inline unsigned long
234 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
238 return __cmpxchg_u32(ptr, old, new);
239 #if 0 /* we don't have __cmpxchg_u64 */
241 return __cmpxchg_u64(ptr, old, new);
244 __cmpxchg_called_with_bad_pointer();
248 #define cmpxchg(ptr,o,n) \
250 __typeof__(*(ptr)) _o_ = (o); \
251 __typeof__(*(ptr)) _n_ = (n); \
252 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
253 (unsigned long)_n_, sizeof(*(ptr))); \
256 #endif /* __KERNEL__ */
261 * mb() prevents loads and stores being reordered across this point.
262 * rmb() prevents loads being reordered across this point.
263 * wmb() prevents stores being reordered across this point.
265 #define mb() barrier()
270 * read_barrier_depends - Flush all pending reads that subsequents reads
273 * No data-dependent reads from memory-like regions are ever reordered
274 * over this barrier. All reads preceding this primitive are guaranteed
275 * to access memory (but not necessarily other CPUs' caches) before any
276 * reads following this primitive that depend on the data return by
277 * any of the preceding reads. This primitive is much lighter weight than
278 * rmb() on most CPUs, and is never heavier weight than is
281 * These ordering constraints are respected by both the local CPU
284 * Ordering is not guaranteed by anything other than these primitives,
285 * not even by data dependencies. See the documentation for
286 * memory_barrier() for examples and URLs to more information.
288 * For example, the following code would force ordering (the initial
289 * value of "a" is zero, "b" is one, and "p" is "&a"):
297 * read_barrier_depends();
302 * because the read of "*q" depends on the read of "p" and these
303 * two reads are separated by a read_barrier_depends(). However,
304 * the following code, with the same initial values for "a" and "b":
312 * read_barrier_depends();
316 * does not enforce ordering, since there is no data dependency between
317 * the read of "a" and the read of "b". Therefore, on some CPUs, such
318 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
319 * in cases like this where there are no data dependencies.
322 #define read_barrier_depends() do { } while (0)
325 #define smp_mb() mb()
326 #define smp_rmb() rmb()
327 #define smp_wmb() wmb()
328 #define smp_read_barrier_depends() read_barrier_depends()
329 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
331 #define smp_mb() barrier()
332 #define smp_rmb() barrier()
333 #define smp_wmb() barrier()
334 #define smp_read_barrier_depends() do { } while (0)
335 #define set_mb(var, value) do { var = value; barrier(); } while (0)
338 #define arch_align_stack(x) (x)
340 #endif /* _ASM_M32R_SYSTEM_H */