Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_ARM_SYSTEM_H |
2 | #define __ASM_ARM_SYSTEM_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | #include <linux/config.h> | |
7 | ||
8 | #define CPU_ARCH_UNKNOWN 0 | |
9 | #define CPU_ARCH_ARMv3 1 | |
10 | #define CPU_ARCH_ARMv4 2 | |
11 | #define CPU_ARCH_ARMv4T 3 | |
12 | #define CPU_ARCH_ARMv5 4 | |
13 | #define CPU_ARCH_ARMv5T 5 | |
14 | #define CPU_ARCH_ARMv5TE 6 | |
15 | #define CPU_ARCH_ARMv5TEJ 7 | |
16 | #define CPU_ARCH_ARMv6 8 | |
17 | ||
18 | /* | |
19 | * CR1 bits (CP#15 CR1) | |
20 | */ | |
21 | #define CR_M (1 << 0) /* MMU enable */ | |
22 | #define CR_A (1 << 1) /* Alignment abort enable */ | |
23 | #define CR_C (1 << 2) /* Dcache enable */ | |
24 | #define CR_W (1 << 3) /* Write buffer enable */ | |
25 | #define CR_P (1 << 4) /* 32-bit exception handler */ | |
26 | #define CR_D (1 << 5) /* 32-bit data address range */ | |
27 | #define CR_L (1 << 6) /* Implementation defined */ | |
28 | #define CR_B (1 << 7) /* Big endian */ | |
29 | #define CR_S (1 << 8) /* System MMU protection */ | |
30 | #define CR_R (1 << 9) /* ROM MMU protection */ | |
31 | #define CR_F (1 << 10) /* Implementation defined */ | |
32 | #define CR_Z (1 << 11) /* Implementation defined */ | |
33 | #define CR_I (1 << 12) /* Icache enable */ | |
34 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | |
35 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | |
36 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | |
37 | #define CR_DT (1 << 16) | |
38 | #define CR_IT (1 << 18) | |
39 | #define CR_ST (1 << 19) | |
40 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | |
41 | #define CR_U (1 << 22) /* Unaligned access operation */ | |
42 | #define CR_XP (1 << 23) /* Extended page tables */ | |
43 | #define CR_VE (1 << 24) /* Vectored interrupts */ | |
44 | ||
45 | #define CPUID_ID 0 | |
46 | #define CPUID_CACHETYPE 1 | |
47 | #define CPUID_TCM 2 | |
48 | #define CPUID_TLBTYPE 3 | |
49 | ||
50 | #define read_cpuid(reg) \ | |
51 | ({ \ | |
52 | unsigned int __val; \ | |
53 | asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ | |
54 | : "=r" (__val) \ | |
55 | : \ | |
56 | : "cc"); \ | |
57 | __val; \ | |
58 | }) | |
59 | ||
60 | /* | |
61 | * This is used to ensure the compiler did actually allocate the register we | |
62 | * asked it for some inline assembly sequences. Apparently we can't trust | |
63 | * the compiler from one version to another so a bit of paranoia won't hurt. | |
64 | * This string is meant to be concatenated with the inline asm string and | |
65 | * will cause compilation to stop on mismatch. | |
66 | * (for details, see gcc PR 15089) | |
67 | */ | |
68 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | |
69 | ||
70 | #ifndef __ASSEMBLY__ | |
71 | ||
72 | #include <linux/linkage.h> | |
73 | ||
74 | struct thread_info; | |
75 | struct task_struct; | |
76 | ||
77 | /* information about the system we're running on */ | |
78 | extern unsigned int system_rev; | |
79 | extern unsigned int system_serial_low; | |
80 | extern unsigned int system_serial_high; | |
81 | extern unsigned int mem_fclk_21285; | |
82 | ||
83 | struct pt_regs; | |
84 | ||
85 | void die(const char *msg, struct pt_regs *regs, int err) | |
86 | __attribute__((noreturn)); | |
87 | ||
cfb0810e RK |
88 | struct siginfo; |
89 | void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, | |
90 | unsigned long err, unsigned long trap); | |
1da177e4 LT |
91 | |
92 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | |
93 | struct pt_regs *), | |
94 | int sig, const char *name); | |
95 | ||
1da177e4 LT |
96 | #define xchg(ptr,x) \ |
97 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
98 | ||
99 | #define tas(ptr) (xchg((ptr),1)) | |
100 | ||
101 | extern asmlinkage void __backtrace(void); | |
652a12ef | 102 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); |
5470dc65 RK |
103 | |
104 | struct mm_struct; | |
652a12ef RK |
105 | extern void show_pte(struct mm_struct *mm, unsigned long addr); |
106 | extern void __show_regs(struct pt_regs *); | |
1da177e4 LT |
107 | |
108 | extern int cpu_architecture(void); | |
36c5ed23 | 109 | extern void cpu_init(void); |
1da177e4 LT |
110 | |
111 | #define set_cr(x) \ | |
112 | __asm__ __volatile__( \ | |
113 | "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ | |
114 | : : "r" (x) : "cc") | |
115 | ||
116 | #define get_cr() \ | |
117 | ({ \ | |
118 | unsigned int __val; \ | |
119 | __asm__ __volatile__( \ | |
120 | "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ | |
121 | : "=r" (__val) : : "cc"); \ | |
122 | __val; \ | |
123 | }) | |
124 | ||
125 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | |
126 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | |
127 | ||
128 | #define UDBG_UNDEFINED (1 << 0) | |
129 | #define UDBG_SYSCALL (1 << 1) | |
130 | #define UDBG_BADABORT (1 << 2) | |
131 | #define UDBG_SEGV (1 << 3) | |
132 | #define UDBG_BUS (1 << 4) | |
133 | ||
134 | extern unsigned int user_debug; | |
135 | ||
136 | #if __LINUX_ARM_ARCH__ >= 4 | |
137 | #define vectors_high() (cr_alignment & CR_V) | |
138 | #else | |
139 | #define vectors_high() (0) | |
140 | #endif | |
141 | ||
6d9b37a3 RK |
142 | #if __LINUX_ARM_ARCH__ >= 6 |
143 | #define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ | |
144 | : : "r" (0) : "memory") | |
145 | #else | |
1da177e4 | 146 | #define mb() __asm__ __volatile__ ("" : : : "memory") |
6d9b37a3 | 147 | #endif |
1da177e4 LT |
148 | #define rmb() mb() |
149 | #define wmb() mb() | |
150 | #define read_barrier_depends() do { } while(0) | |
151 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
152 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
153 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | |
154 | ||
1da177e4 | 155 | /* |
4866cde0 NP |
156 | * switch_mm() may do a full cache flush over the context switch, |
157 | * so enable interrupts over the context switch to avoid high | |
158 | * latency. | |
1da177e4 | 159 | */ |
4866cde0 | 160 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW |
1da177e4 LT |
161 | |
162 | /* | |
163 | * switch_to(prev, next) should switch from task `prev' to `next' | |
164 | * `prev' will never be the same as `next'. schedule() itself | |
165 | * contains the memory barrier to tell GCC not to cache `current'. | |
166 | */ | |
167 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | |
168 | ||
169 | #define switch_to(prev,next,last) \ | |
170 | do { \ | |
171 | last = __switch_to(prev,prev->thread_info,next->thread_info); \ | |
172 | } while (0) | |
173 | ||
4dc7a0bb IM |
174 | /* |
175 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
176 | * it needs a way to flush as much of the CPU's caches as possible. | |
177 | * | |
178 | * TODO: fill this in! | |
179 | */ | |
180 | static inline void sched_cacheflush(void) | |
181 | { | |
182 | } | |
183 | ||
1da177e4 LT |
184 | /* |
185 | * CPU interrupt mask handling. | |
186 | */ | |
187 | #if __LINUX_ARM_ARCH__ >= 6 | |
188 | ||
189 | #define local_irq_save(x) \ | |
190 | ({ \ | |
191 | __asm__ __volatile__( \ | |
192 | "mrs %0, cpsr @ local_irq_save\n" \ | |
193 | "cpsid i" \ | |
194 | : "=r" (x) : : "memory", "cc"); \ | |
195 | }) | |
196 | ||
197 | #define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") | |
198 | #define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") | |
199 | #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") | |
200 | #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") | |
201 | ||
202 | #else | |
203 | ||
204 | /* | |
205 | * Save the current interrupt enable state & disable IRQs | |
206 | */ | |
207 | #define local_irq_save(x) \ | |
208 | ({ \ | |
209 | unsigned long temp; \ | |
210 | (void) (&temp == &x); \ | |
211 | __asm__ __volatile__( \ | |
212 | "mrs %0, cpsr @ local_irq_save\n" \ | |
213 | " orr %1, %0, #128\n" \ | |
214 | " msr cpsr_c, %1" \ | |
215 | : "=r" (x), "=r" (temp) \ | |
216 | : \ | |
217 | : "memory", "cc"); \ | |
218 | }) | |
219 | ||
220 | /* | |
221 | * Enable IRQs | |
222 | */ | |
223 | #define local_irq_enable() \ | |
224 | ({ \ | |
225 | unsigned long temp; \ | |
226 | __asm__ __volatile__( \ | |
227 | "mrs %0, cpsr @ local_irq_enable\n" \ | |
228 | " bic %0, %0, #128\n" \ | |
229 | " msr cpsr_c, %0" \ | |
230 | : "=r" (temp) \ | |
231 | : \ | |
232 | : "memory", "cc"); \ | |
233 | }) | |
234 | ||
235 | /* | |
236 | * Disable IRQs | |
237 | */ | |
238 | #define local_irq_disable() \ | |
239 | ({ \ | |
240 | unsigned long temp; \ | |
241 | __asm__ __volatile__( \ | |
242 | "mrs %0, cpsr @ local_irq_disable\n" \ | |
243 | " orr %0, %0, #128\n" \ | |
244 | " msr cpsr_c, %0" \ | |
245 | : "=r" (temp) \ | |
246 | : \ | |
247 | : "memory", "cc"); \ | |
248 | }) | |
249 | ||
250 | /* | |
251 | * Enable FIQs | |
252 | */ | |
253 | #define local_fiq_enable() \ | |
254 | ({ \ | |
255 | unsigned long temp; \ | |
256 | __asm__ __volatile__( \ | |
257 | "mrs %0, cpsr @ stf\n" \ | |
258 | " bic %0, %0, #64\n" \ | |
259 | " msr cpsr_c, %0" \ | |
260 | : "=r" (temp) \ | |
261 | : \ | |
262 | : "memory", "cc"); \ | |
263 | }) | |
264 | ||
265 | /* | |
266 | * Disable FIQs | |
267 | */ | |
268 | #define local_fiq_disable() \ | |
269 | ({ \ | |
270 | unsigned long temp; \ | |
271 | __asm__ __volatile__( \ | |
272 | "mrs %0, cpsr @ clf\n" \ | |
273 | " orr %0, %0, #64\n" \ | |
274 | " msr cpsr_c, %0" \ | |
275 | : "=r" (temp) \ | |
276 | : \ | |
277 | : "memory", "cc"); \ | |
278 | }) | |
279 | ||
280 | #endif | |
281 | ||
282 | /* | |
283 | * Save the current interrupt enable state. | |
284 | */ | |
285 | #define local_save_flags(x) \ | |
286 | ({ \ | |
287 | __asm__ __volatile__( \ | |
288 | "mrs %0, cpsr @ local_save_flags" \ | |
289 | : "=r" (x) : : "memory", "cc"); \ | |
290 | }) | |
291 | ||
292 | /* | |
293 | * restore saved IRQ & FIQ state | |
294 | */ | |
295 | #define local_irq_restore(x) \ | |
296 | __asm__ __volatile__( \ | |
297 | "msr cpsr_c, %0 @ local_irq_restore\n" \ | |
298 | : \ | |
299 | : "r" (x) \ | |
300 | : "memory", "cc") | |
301 | ||
302 | #define irqs_disabled() \ | |
303 | ({ \ | |
304 | unsigned long flags; \ | |
305 | local_save_flags(flags); \ | |
9a558cb4 | 306 | (int)(flags & PSR_I_BIT); \ |
1da177e4 LT |
307 | }) |
308 | ||
309 | #ifdef CONFIG_SMP | |
1da177e4 LT |
310 | |
311 | #define smp_mb() mb() | |
312 | #define smp_rmb() rmb() | |
313 | #define smp_wmb() wmb() | |
314 | #define smp_read_barrier_depends() read_barrier_depends() | |
315 | ||
316 | #else | |
317 | ||
318 | #define smp_mb() barrier() | |
319 | #define smp_rmb() barrier() | |
320 | #define smp_wmb() barrier() | |
321 | #define smp_read_barrier_depends() do { } while(0) | |
322 | ||
053a7b5b RK |
323 | #endif /* CONFIG_SMP */ |
324 | ||
1da177e4 LT |
325 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) |
326 | /* | |
327 | * On the StrongARM, "swp" is terminally broken since it bypasses the | |
328 | * cache totally. This means that the cache becomes inconsistent, and, | |
329 | * since we use normal loads/stores as well, this is really bad. | |
330 | * Typically, this causes oopsen in filp_close, but could have other, | |
331 | * more disasterous effects. There are two work-arounds: | |
332 | * 1. Disable interrupts and emulate the atomic swap | |
333 | * 2. Clean the cache, perform atomic swap, flush the cache | |
334 | * | |
335 | * We choose (1) since its the "easiest" to achieve here and is not | |
336 | * dependent on the processor type. | |
053a7b5b RK |
337 | * |
338 | * NOTE that this solution won't work on an SMP system, so explcitly | |
339 | * forbid it here. | |
1da177e4 LT |
340 | */ |
341 | #define swp_is_buggy | |
342 | #endif | |
343 | ||
344 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
345 | { | |
346 | extern void __bad_xchg(volatile void *, int); | |
347 | unsigned long ret; | |
348 | #ifdef swp_is_buggy | |
349 | unsigned long flags; | |
350 | #endif | |
9560782f RK |
351 | #if __LINUX_ARM_ARCH__ >= 6 |
352 | unsigned int tmp; | |
353 | #endif | |
1da177e4 LT |
354 | |
355 | switch (size) { | |
9560782f RK |
356 | #if __LINUX_ARM_ARCH__ >= 6 |
357 | case 1: | |
358 | asm volatile("@ __xchg1\n" | |
359 | "1: ldrexb %0, [%3]\n" | |
360 | " strexb %1, %2, [%3]\n" | |
361 | " teq %1, #0\n" | |
362 | " bne 1b" | |
363 | : "=&r" (ret), "=&r" (tmp) | |
364 | : "r" (x), "r" (ptr) | |
365 | : "memory", "cc"); | |
366 | break; | |
367 | case 4: | |
368 | asm volatile("@ __xchg4\n" | |
369 | "1: ldrex %0, [%3]\n" | |
370 | " strex %1, %2, [%3]\n" | |
371 | " teq %1, #0\n" | |
372 | " bne 1b" | |
373 | : "=&r" (ret), "=&r" (tmp) | |
374 | : "r" (x), "r" (ptr) | |
375 | : "memory", "cc"); | |
376 | break; | |
377 | #elif defined(swp_is_buggy) | |
378 | #ifdef CONFIG_SMP | |
379 | #error SMP is not supported on this platform | |
380 | #endif | |
381 | case 1: | |
382 | local_irq_save(flags); | |
383 | ret = *(volatile unsigned char *)ptr; | |
384 | *(volatile unsigned char *)ptr = x; | |
385 | local_irq_restore(flags); | |
386 | break; | |
387 | ||
388 | case 4: | |
389 | local_irq_save(flags); | |
390 | ret = *(volatile unsigned long *)ptr; | |
391 | *(volatile unsigned long *)ptr = x; | |
392 | local_irq_restore(flags); | |
393 | break; | |
1da177e4 | 394 | #else |
9560782f RK |
395 | case 1: |
396 | asm volatile("@ __xchg1\n" | |
397 | " swpb %0, %1, [%2]" | |
398 | : "=&r" (ret) | |
399 | : "r" (x), "r" (ptr) | |
400 | : "memory", "cc"); | |
401 | break; | |
402 | case 4: | |
403 | asm volatile("@ __xchg4\n" | |
404 | " swp %0, %1, [%2]" | |
405 | : "=&r" (ret) | |
406 | : "r" (x), "r" (ptr) | |
407 | : "memory", "cc"); | |
408 | break; | |
1da177e4 | 409 | #endif |
9560782f RK |
410 | default: |
411 | __bad_xchg(ptr, size), ret = 0; | |
412 | break; | |
1da177e4 LT |
413 | } |
414 | ||
415 | return ret; | |
416 | } | |
417 | ||
1da177e4 LT |
418 | #endif /* __ASSEMBLY__ */ |
419 | ||
420 | #define arch_align_stack(x) (x) | |
421 | ||
422 | #endif /* __KERNEL__ */ | |
423 | ||
424 | #endif |