Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_SYSTEM_H |
2 | #define __ASM_SH_SYSTEM_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | |
6 | * Copyright (C) 2002 Paul Mundt | |
7 | */ | |
8 | ||
afbfb52e | 9 | #include <linux/irqflags.h> |
310f7963 | 10 | #include <linux/compiler.h> |
e4e3b5cc | 11 | #include <asm/types.h> |
1da177e4 LT |
12 | |
13 | /* | |
14 | * switch_to() should switch tasks to task nr n, first | |
15 | */ | |
16 | ||
17 | #define switch_to(prev, next, last) do { \ | |
36c8b586 | 18 | struct task_struct *__last; \ |
1da177e4 LT |
19 | register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ |
20 | register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ | |
21 | register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ | |
22 | register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ | |
23 | register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ | |
24 | register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ | |
25 | __asm__ __volatile__ (".balign 4\n\t" \ | |
26 | "stc.l gbr, @-r15\n\t" \ | |
27 | "sts.l pr, @-r15\n\t" \ | |
28 | "mov.l r8, @-r15\n\t" \ | |
29 | "mov.l r9, @-r15\n\t" \ | |
30 | "mov.l r10, @-r15\n\t" \ | |
31 | "mov.l r11, @-r15\n\t" \ | |
32 | "mov.l r12, @-r15\n\t" \ | |
33 | "mov.l r13, @-r15\n\t" \ | |
34 | "mov.l r14, @-r15\n\t" \ | |
35 | "mov.l r15, @r1 ! save SP\n\t" \ | |
36 | "mov.l @r6, r15 ! change to new stack\n\t" \ | |
37 | "mova 1f, %0\n\t" \ | |
38 | "mov.l %0, @r2 ! save PC\n\t" \ | |
39 | "mov.l 2f, %0\n\t" \ | |
40 | "jmp @%0 ! call __switch_to\n\t" \ | |
41 | " lds r7, pr ! with return to new PC\n\t" \ | |
42 | ".balign 4\n" \ | |
43 | "2:\n\t" \ | |
44 | ".long __switch_to\n" \ | |
45 | "1:\n\t" \ | |
46 | "mov.l @r15+, r14\n\t" \ | |
47 | "mov.l @r15+, r13\n\t" \ | |
48 | "mov.l @r15+, r12\n\t" \ | |
49 | "mov.l @r15+, r11\n\t" \ | |
50 | "mov.l @r15+, r10\n\t" \ | |
51 | "mov.l @r15+, r9\n\t" \ | |
52 | "mov.l @r15+, r8\n\t" \ | |
53 | "lds.l @r15+, pr\n\t" \ | |
54 | "ldc.l @r15+, gbr\n\t" \ | |
55 | : "=z" (__last) \ | |
56 | : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ | |
57 | "r" (__ts5), "r" (__ts6), "r" (__ts7) \ | |
58 | : "r3", "t"); \ | |
59 | last = __last; \ | |
60 | } while (0) | |
61 | ||
4dc7a0bb IM |
62 | /* |
63 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
64 | * it needs a way to flush as much of the CPU's caches as possible. | |
65 | * | |
66 | * TODO: fill this in! | |
67 | */ | |
68 | static inline void sched_cacheflush(void) | |
69 | { | |
70 | } | |
71 | ||
29847622 PM |
72 | #ifdef CONFIG_CPU_SH4A |
73 | #define __icbi() \ | |
74 | { \ | |
75 | unsigned long __addr; \ | |
76 | __addr = 0xa8000000; \ | |
77 | __asm__ __volatile__( \ | |
78 | "icbi %0\n\t" \ | |
79 | : /* no output */ \ | |
80 | : "m" (__m(__addr))); \ | |
81 | } | |
82 | #endif | |
1da177e4 | 83 | |
0f08f338 | 84 | static inline unsigned long tas(volatile int *m) |
00b3aa3f | 85 | { |
1da177e4 LT |
86 | unsigned long retval; |
87 | ||
88 | __asm__ __volatile__ ("tas.b @%1\n\t" | |
89 | "movt %0" | |
90 | : "=r" (retval): "r" (m): "t", "memory"); | |
91 | return retval; | |
92 | } | |
93 | ||
29847622 PM |
94 | /* |
95 | * A brief note on ctrl_barrier(), the control register write barrier. | |
96 | * | |
97 | * Legacy SH cores typically require a sequence of 8 nops after | |
98 | * modification of a control register in order for the changes to take | |
99 | * effect. On newer cores (like the sh4a and sh5) this is accomplished | |
100 | * with icbi. | |
101 | * | |
102 | * Also note that on sh4a in the icbi case we can forego a synco for the | |
103 | * write barrier, as it's not necessary for control registers. | |
104 | * | |
105 | * Historically we have only done this type of barrier for the MMUCR, but | |
106 | * it's also necessary for the CCR, so we make it generic here instead. | |
107 | */ | |
fdfc74f9 | 108 | #ifdef CONFIG_CPU_SH4A |
29847622 PM |
109 | #define mb() __asm__ __volatile__ ("synco": : :"memory") |
110 | #define rmb() mb() | |
111 | #define wmb() __asm__ __volatile__ ("synco": : :"memory") | |
112 | #define ctrl_barrier() __icbi() | |
fdfc74f9 PM |
113 | #define read_barrier_depends() do { } while(0) |
114 | #else | |
29847622 PM |
115 | #define mb() __asm__ __volatile__ ("": : :"memory") |
116 | #define rmb() mb() | |
117 | #define wmb() __asm__ __volatile__ ("": : :"memory") | |
118 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | |
1da177e4 | 119 | #define read_barrier_depends() do { } while(0) |
fdfc74f9 | 120 | #endif |
1da177e4 LT |
121 | |
122 | #ifdef CONFIG_SMP | |
123 | #define smp_mb() mb() | |
124 | #define smp_rmb() rmb() | |
125 | #define smp_wmb() wmb() | |
126 | #define smp_read_barrier_depends() read_barrier_depends() | |
127 | #else | |
128 | #define smp_mb() barrier() | |
129 | #define smp_rmb() barrier() | |
130 | #define smp_wmb() barrier() | |
131 | #define smp_read_barrier_depends() do { } while(0) | |
132 | #endif | |
133 | ||
134 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | |
1da177e4 | 135 | |
1da177e4 LT |
136 | /* |
137 | * Jump to P2 area. | |
138 | * When handling TLB or caches, we need to do it from P2 area. | |
139 | */ | |
140 | #define jump_to_P2() \ | |
141 | do { \ | |
142 | unsigned long __dummy; \ | |
143 | __asm__ __volatile__( \ | |
144 | "mov.l 1f, %0\n\t" \ | |
145 | "or %1, %0\n\t" \ | |
146 | "jmp @%0\n\t" \ | |
147 | " nop\n\t" \ | |
148 | ".balign 4\n" \ | |
149 | "1: .long 2f\n" \ | |
150 | "2:" \ | |
151 | : "=&r" (__dummy) \ | |
152 | : "r" (0x20000000)); \ | |
153 | } while (0) | |
154 | ||
155 | /* | |
156 | * Back to P1 area. | |
157 | */ | |
158 | #define back_to_P1() \ | |
159 | do { \ | |
160 | unsigned long __dummy; \ | |
29847622 | 161 | ctrl_barrier(); \ |
1da177e4 | 162 | __asm__ __volatile__( \ |
1da177e4 LT |
163 | "mov.l 1f, %0\n\t" \ |
164 | "jmp @%0\n\t" \ | |
165 | " nop\n\t" \ | |
166 | ".balign 4\n" \ | |
167 | "1: .long 2f\n" \ | |
168 | "2:" \ | |
169 | : "=&r" (__dummy)); \ | |
170 | } while (0) | |
171 | ||
00b3aa3f | 172 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) |
1da177e4 LT |
173 | { |
174 | unsigned long flags, retval; | |
175 | ||
176 | local_irq_save(flags); | |
177 | retval = *m; | |
178 | *m = val; | |
179 | local_irq_restore(flags); | |
180 | return retval; | |
181 | } | |
182 | ||
00b3aa3f | 183 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) |
1da177e4 LT |
184 | { |
185 | unsigned long flags, retval; | |
186 | ||
187 | local_irq_save(flags); | |
188 | retval = *m; | |
189 | *m = val & 0xff; | |
190 | local_irq_restore(flags); | |
191 | return retval; | |
192 | } | |
193 | ||
00b3aa3f PM |
194 | extern void __xchg_called_with_bad_pointer(void); |
195 | ||
196 | #define __xchg(ptr, x, size) \ | |
197 | ({ \ | |
198 | unsigned long __xchg__res; \ | |
199 | volatile void *__xchg_ptr = (ptr); \ | |
200 | switch (size) { \ | |
201 | case 4: \ | |
202 | __xchg__res = xchg_u32(__xchg_ptr, x); \ | |
203 | break; \ | |
204 | case 1: \ | |
205 | __xchg__res = xchg_u8(__xchg_ptr, x); \ | |
206 | break; \ | |
207 | default: \ | |
208 | __xchg_called_with_bad_pointer(); \ | |
209 | __xchg__res = x; \ | |
210 | break; \ | |
211 | } \ | |
212 | \ | |
213 | __xchg__res; \ | |
214 | }) | |
215 | ||
216 | #define xchg(ptr,x) \ | |
217 | ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) | |
1da177e4 | 218 | |
e4e3b5cc TR |
219 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, |
220 | unsigned long new) | |
221 | { | |
222 | __u32 retval; | |
223 | unsigned long flags; | |
224 | ||
225 | local_irq_save(flags); | |
226 | retval = *m; | |
227 | if (retval == old) | |
228 | *m = new; | |
229 | local_irq_restore(flags); /* implies memory barrier */ | |
230 | return retval; | |
231 | } | |
232 | ||
233 | /* This function doesn't exist, so you'll get a linker error | |
234 | * if something tries to do an invalid cmpxchg(). */ | |
235 | extern void __cmpxchg_called_with_bad_pointer(void); | |
236 | ||
237 | #define __HAVE_ARCH_CMPXCHG 1 | |
238 | ||
239 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | |
240 | unsigned long new, int size) | |
241 | { | |
242 | switch (size) { | |
243 | case 4: | |
244 | return __cmpxchg_u32(ptr, old, new); | |
245 | } | |
246 | __cmpxchg_called_with_bad_pointer(); | |
247 | return old; | |
248 | } | |
249 | ||
250 | #define cmpxchg(ptr,o,n) \ | |
251 | ({ \ | |
252 | __typeof__(*(ptr)) _o_ = (o); \ | |
253 | __typeof__(*(ptr)) _n_ = (n); \ | |
254 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
255 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
256 | }) | |
257 | ||
1f666587 PM |
258 | extern void *set_exception_table_vec(unsigned int vec, void *handler); |
259 | ||
260 | static inline void *set_exception_table_evt(unsigned int evt, void *handler) | |
261 | { | |
262 | return set_exception_table_vec(evt >> 5, handler); | |
263 | } | |
264 | ||
1da177e4 LT |
265 | /* XXX |
266 | * disable hlt during certain critical i/o operations | |
267 | */ | |
268 | #define HAVE_DISABLE_HLT | |
269 | void disable_hlt(void); | |
270 | void enable_hlt(void); | |
271 | ||
272 | #define arch_align_stack(x) (x) | |
273 | ||
274 | #endif |