Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_M32R_SYSTEM_H |
2 | #define _ASM_M32R_SYSTEM_H | |
3 | ||
4 | /* | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | * | |
9 | * Copyright (C) 2001 by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto | |
10 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | |
11 | */ | |
12 | ||
13 | #include <linux/config.h> | |
14 | ||
15 | #ifdef __KERNEL__ | |
16 | ||
17 | /* | |
18 | * switch_to(prev, next) should switch from task `prev' to `next' | |
19 | * `prev' will never be the same as `next'. | |
20 | * | |
21 | * `next' and `prev' should be struct task_struct, but it isn't always defined | |
22 | */ | |
23 | ||
24 | #ifndef CONFIG_SMP | |
25 | #define prepare_to_switch() do { } while(0) | |
26 | #endif /* not CONFIG_SMP */ | |
27 | ||
28 | #define switch_to(prev, next, last) do { \ | |
29 | register unsigned long arg0 __asm__ ("r0") = (unsigned long)prev; \ | |
30 | register unsigned long arg1 __asm__ ("r1") = (unsigned long)next; \ | |
31 | register unsigned long *oldsp __asm__ ("r2") = &(prev->thread.sp); \ | |
32 | register unsigned long *newsp __asm__ ("r3") = &(next->thread.sp); \ | |
33 | register unsigned long *oldlr __asm__ ("r4") = &(prev->thread.lr); \ | |
34 | register unsigned long *newlr __asm__ ("r5") = &(next->thread.lr); \ | |
35 | register struct task_struct *__last __asm__ ("r6"); \ | |
36 | __asm__ __volatile__ ( \ | |
37 | "st r8, @-r15 \n\t" \ | |
38 | "st r9, @-r15 \n\t" \ | |
39 | "st r10, @-r15 \n\t" \ | |
40 | "st r11, @-r15 \n\t" \ | |
41 | "st r12, @-r15 \n\t" \ | |
42 | "st r13, @-r15 \n\t" \ | |
43 | "st r14, @-r15 \n\t" \ | |
44 | "seth r14, #high(1f) \n\t" \ | |
45 | "or3 r14, r14, #low(1f) \n\t" \ | |
46 | "st r14, @r4 ; store old LR \n\t" \ | |
47 | "st r15, @r2 ; store old SP \n\t" \ | |
48 | "ld r15, @r3 ; load new SP \n\t" \ | |
49 | "st r0, @-r15 ; store 'prev' onto new stack \n\t" \ | |
50 | "ld r14, @r5 ; load new LR \n\t" \ | |
51 | "jmp r14 \n\t" \ | |
52 | ".fillinsn \n " \ | |
53 | "1: \n\t" \ | |
54 | "ld r6, @r15+ ; load 'prev' from new stack \n\t" \ | |
55 | "ld r14, @r15+ \n\t" \ | |
56 | "ld r13, @r15+ \n\t" \ | |
57 | "ld r12, @r15+ \n\t" \ | |
58 | "ld r11, @r15+ \n\t" \ | |
59 | "ld r10, @r15+ \n\t" \ | |
60 | "ld r9, @r15+ \n\t" \ | |
61 | "ld r8, @r15+ \n\t" \ | |
62 | : "=&r" (__last) \ | |
63 | : "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \ | |
64 | "r" (oldlr), "r" (newlr) \ | |
65 | : "memory" \ | |
66 | ); \ | |
67 | last = __last; \ | |
68 | } while(0) | |
69 | ||
70 | /* Interrupt Control */ | |
71 | #if !defined(CONFIG_CHIP_M32102) | |
72 | #define local_irq_enable() \ | |
73 | __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") | |
74 | #define local_irq_disable() \ | |
75 | __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") | |
76 | #else /* CONFIG_CHIP_M32102 */ | |
77 | static inline void local_irq_enable(void) | |
78 | { | |
79 | unsigned long tmpreg; | |
80 | __asm__ __volatile__( | |
81 | "mvfc %0, psw; \n\t" | |
82 | "or3 %0, %0, #0x0040; \n\t" | |
83 | "mvtc %0, psw; \n\t" | |
84 | : "=&r" (tmpreg) : : "cbit", "memory"); | |
85 | } | |
86 | ||
87 | static inline void local_irq_disable(void) | |
88 | { | |
89 | unsigned long tmpreg0, tmpreg1; | |
90 | __asm__ __volatile__( | |
91 | "ld24 %0, #0 ; Use 32-bit insn. \n\t" | |
92 | "mvfc %1, psw ; No interrupt can be accepted here. \n\t" | |
93 | "mvtc %0, psw \n\t" | |
94 | "and3 %0, %1, #0xffbf \n\t" | |
95 | "mvtc %0, psw \n\t" | |
96 | : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); | |
97 | } | |
98 | #endif /* CONFIG_CHIP_M32102 */ | |
99 | ||
100 | #define local_save_flags(x) \ | |
101 | __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) | |
102 | ||
103 | #define local_irq_restore(x) \ | |
104 | __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ | |
105 | : "r" (x) : "cbit", "memory") | |
106 | ||
107 | #if !defined(CONFIG_CHIP_M32102) | |
108 | #define local_irq_save(x) \ | |
109 | __asm__ __volatile__( \ | |
110 | "mvfc %0, psw; \n\t" \ | |
111 | "clrpsw #0x40 -> nop; \n\t" \ | |
112 | : "=r" (x) : /* no input */ : "memory") | |
113 | #else /* CONFIG_CHIP_M32102 */ | |
114 | #define local_irq_save(x) \ | |
115 | ({ \ | |
116 | unsigned long tmpreg; \ | |
117 | __asm__ __volatile__( \ | |
118 | "ld24 %1, #0 \n\t" \ | |
119 | "mvfc %0, psw \n\t" \ | |
120 | "mvtc %1, psw \n\t" \ | |
121 | "and3 %1, %0, #0xffbf \n\t" \ | |
122 | "mvtc %1, psw \n\t" \ | |
123 | : "=r" (x), "=&r" (tmpreg) \ | |
124 | : : "cbit", "memory"); \ | |
125 | }) | |
126 | #endif /* CONFIG_CHIP_M32102 */ | |
127 | ||
128 | #define irqs_disabled() \ | |
129 | ({ \ | |
130 | unsigned long flags; \ | |
131 | local_save_flags(flags); \ | |
132 | !(flags & 0x40); \ | |
133 | }) | |
134 | ||
135 | #endif /* __KERNEL__ */ | |
136 | ||
137 | #define nop() __asm__ __volatile__ ("nop" : : ) | |
138 | ||
139 | #define xchg(ptr,x) \ | |
140 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
141 | ||
142 | #define tas(ptr) (xchg((ptr),1)) | |
143 | ||
144 | #ifdef CONFIG_SMP | |
145 | extern void __xchg_called_with_bad_pointer(void); | |
146 | #endif | |
147 | ||
148 | #ifdef CONFIG_CHIP_M32700_TS1 | |
149 | #define DCACHE_CLEAR(reg0, reg1, addr) \ | |
150 | "seth "reg1", #high(dcache_dummy); \n\t" \ | |
151 | "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \ | |
152 | "lock "reg0", @"reg1"; \n\t" \ | |
153 | "add3 "reg0", "addr", #0x1000; \n\t" \ | |
154 | "ld "reg0", @"reg0"; \n\t" \ | |
155 | "add3 "reg0", "addr", #0x2000; \n\t" \ | |
156 | "ld "reg0", @"reg0"; \n\t" \ | |
157 | "unlock "reg0", @"reg1"; \n\t" | |
158 | /* FIXME: This workaround code cannot handle kenrel modules | |
159 | * correctly under SMP environment. | |
160 | */ | |
161 | #else /* CONFIG_CHIP_M32700_TS1 */ | |
162 | #define DCACHE_CLEAR(reg0, reg1, addr) | |
163 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
164 | ||
165 | static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, | |
166 | int size) | |
167 | { | |
168 | unsigned long flags; | |
169 | unsigned long tmp = 0; | |
170 | ||
171 | local_irq_save(flags); | |
172 | ||
173 | switch (size) { | |
174 | #ifndef CONFIG_SMP | |
175 | case 1: | |
176 | __asm__ __volatile__ ( | |
177 | "ldb %0, @%2 \n\t" | |
178 | "stb %1, @%2 \n\t" | |
179 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
180 | break; | |
181 | case 2: | |
182 | __asm__ __volatile__ ( | |
183 | "ldh %0, @%2 \n\t" | |
184 | "sth %1, @%2 \n\t" | |
185 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
186 | break; | |
187 | case 4: | |
188 | __asm__ __volatile__ ( | |
189 | "ld %0, @%2 \n\t" | |
190 | "st %1, @%2 \n\t" | |
191 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
192 | break; | |
193 | #else /* CONFIG_SMP */ | |
194 | case 4: | |
195 | __asm__ __volatile__ ( | |
196 | DCACHE_CLEAR("%0", "r4", "%2") | |
197 | "lock %0, @%2; \n\t" | |
198 | "unlock %1, @%2; \n\t" | |
199 | : "=&r" (tmp) : "r" (x), "r" (ptr) | |
200 | : "memory" | |
201 | #ifdef CONFIG_CHIP_M32700_TS1 | |
202 | , "r4" | |
203 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
204 | ); | |
205 | break; | |
206 | default: | |
207 | __xchg_called_with_bad_pointer(); | |
208 | #endif /* CONFIG_SMP */ | |
209 | } | |
210 | ||
211 | local_irq_restore(flags); | |
212 | ||
213 | return (tmp); | |
214 | } | |
215 | ||
216 | /* | |
217 | * Memory barrier. | |
218 | * | |
219 | * mb() prevents loads and stores being reordered across this point. | |
220 | * rmb() prevents loads being reordered across this point. | |
221 | * wmb() prevents stores being reordered across this point. | |
222 | */ | |
223 | #define mb() barrier() | |
224 | #define rmb() mb() | |
225 | #define wmb() mb() | |
226 | ||
227 | /** | |
228 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
229 | * depend on. | |
230 | * | |
231 | * No data-dependent reads from memory-like regions are ever reordered | |
232 | * over this barrier. All reads preceding this primitive are guaranteed | |
233 | * to access memory (but not necessarily other CPUs' caches) before any | |
234 | * reads following this primitive that depend on the data return by | |
235 | * any of the preceding reads. This primitive is much lighter weight than | |
236 | * rmb() on most CPUs, and is never heavier weight than is | |
237 | * rmb(). | |
238 | * | |
239 | * These ordering constraints are respected by both the local CPU | |
240 | * and the compiler. | |
241 | * | |
242 | * Ordering is not guaranteed by anything other than these primitives, | |
243 | * not even by data dependencies. See the documentation for | |
244 | * memory_barrier() for examples and URLs to more information. | |
245 | * | |
246 | * For example, the following code would force ordering (the initial | |
247 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
248 | * | |
249 | * <programlisting> | |
250 | * CPU 0 CPU 1 | |
251 | * | |
252 | * b = 2; | |
253 | * memory_barrier(); | |
254 | * p = &b; q = p; | |
255 | * read_barrier_depends(); | |
256 | * d = *q; | |
257 | * </programlisting> | |
258 | * | |
259 | * | |
260 | * because the read of "*q" depends on the read of "p" and these | |
261 | * two reads are separated by a read_barrier_depends(). However, | |
262 | * the following code, with the same initial values for "a" and "b": | |
263 | * | |
264 | * <programlisting> | |
265 | * CPU 0 CPU 1 | |
266 | * | |
267 | * a = 2; | |
268 | * memory_barrier(); | |
269 | * b = 3; y = b; | |
270 | * read_barrier_depends(); | |
271 | * x = a; | |
272 | * </programlisting> | |
273 | * | |
274 | * does not enforce ordering, since there is no data dependency between | |
275 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
276 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
277 | * in cases like thiswhere there are no data dependencies. | |
278 | **/ | |
279 | ||
280 | #define read_barrier_depends() do { } while (0) | |
281 | ||
282 | #ifdef CONFIG_SMP | |
283 | #define smp_mb() mb() | |
284 | #define smp_rmb() rmb() | |
285 | #define smp_wmb() wmb() | |
286 | #define smp_read_barrier_depends() read_barrier_depends() | |
287 | #else | |
288 | #define smp_mb() barrier() | |
289 | #define smp_rmb() barrier() | |
290 | #define smp_wmb() barrier() | |
291 | #define smp_read_barrier_depends() do { } while (0) | |
292 | #endif | |
293 | ||
294 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | |
295 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
296 | ||
297 | #define arch_align_stack(x) (x) | |
298 | ||
299 | #endif /* _ASM_M32R_SYSTEM_H */ |