Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_ARM_SYSTEM_H |
2 | #define __ASM_ARM_SYSTEM_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | #include <linux/config.h> | |
7 | ||
8 | /* | |
9 | * This is used to ensure the compiler did actually allocate the register we | |
10 | * asked it for some inline assembly sequences. Apparently we can't trust | |
11 | * the compiler from one version to another so a bit of paranoia won't hurt. | |
12 | * This string is meant to be concatenated with the inline asm string and | |
13 | * will cause compilation to stop on mismatch. (From ARM32 - may come in handy) | |
14 | */ | |
15 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | |
16 | ||
17 | #ifndef __ASSEMBLY__ | |
18 | ||
19 | #include <linux/linkage.h> | |
20 | ||
21 | struct thread_info; | |
22 | struct task_struct; | |
23 | ||
24 | #if 0 | |
25 | /* information about the system we're running on */ | |
26 | extern unsigned int system_rev; | |
27 | extern unsigned int system_serial_low; | |
28 | extern unsigned int system_serial_high; | |
29 | extern unsigned int mem_fclk_21285; | |
30 | ||
31 | FIXME - sort this | |
32 | /* | |
33 | * We need to turn the caches off before calling the reset vector - RiscOS | |
34 | * messes up if we don't | |
35 | */ | |
36 | #define proc_hard_reset() cpu_proc_fin() | |
37 | ||
38 | #endif | |
39 | ||
40 | struct pt_regs; | |
41 | ||
42 | void die(const char *msg, struct pt_regs *regs, int err) | |
43 | __attribute__((noreturn)); | |
44 | ||
45 | void die_if_kernel(const char *str, struct pt_regs *regs, int err); | |
46 | ||
47 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | |
48 | struct pt_regs *), | |
49 | int sig, const char *name); | |
50 | ||
51 | #include <asm/proc-fns.h> | |
52 | ||
53 | #define xchg(ptr,x) \ | |
54 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
55 | ||
56 | #define tas(ptr) (xchg((ptr),1)) | |
57 | ||
58 | extern asmlinkage void __backtrace(void); | |
59 | ||
60 | #define set_cr(x) \ | |
61 | __asm__ __volatile__( \ | |
62 | "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ | |
63 | : : "r" (x) : "cc") | |
64 | ||
65 | #define get_cr() \ | |
66 | ({ \ | |
67 | unsigned int __val; \ | |
68 | __asm__ __volatile__( \ | |
69 | "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ | |
70 | : "=r" (__val) : : "cc"); \ | |
71 | __val; \ | |
72 | }) | |
73 | ||
74 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | |
75 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | |
76 | ||
77 | #define UDBG_UNDEFINED (1 << 0) | |
78 | #define UDBG_SYSCALL (1 << 1) | |
79 | #define UDBG_BADABORT (1 << 2) | |
80 | #define UDBG_SEGV (1 << 3) | |
81 | #define UDBG_BUS (1 << 4) | |
82 | ||
83 | extern unsigned int user_debug; | |
84 | ||
85 | #define vectors_base() (0) | |
86 | ||
87 | #define mb() __asm__ __volatile__ ("" : : : "memory") | |
88 | #define rmb() mb() | |
89 | #define wmb() mb() | |
90 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | |
91 | ||
92 | #define read_barrier_depends() do { } while(0) | |
93 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
94 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
95 | ||
96 | /* | |
97 | * We assume knowledge of how | |
98 | * spin_unlock_irq() and friends are implemented. This avoids | |
99 | * us needlessly decrementing and incrementing the preempt count. | |
100 | */ | |
407c57b8 AD |
101 | #define prepare_arch_switch(next) local_irq_enable() |
102 | #define finish_arch_switch(prev) spin_unlock(&(rq)->lock) | |
1da177e4 LT |
103 | |
104 | /* | |
105 | * switch_to(prev, next) should switch from task `prev' to `next' | |
106 | * `prev' will never be the same as `next'. schedule() itself | |
107 | * contains the memory barrier to tell GCC not to cache `current'. | |
108 | */ | |
109 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | |
110 | ||
111 | #define switch_to(prev,next,last) \ | |
112 | do { \ | |
697102cd | 113 | last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \ |
1da177e4 LT |
114 | } while (0) |
115 | ||
4dc7a0bb IM |
116 | /* |
117 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
118 | * it needs a way to flush as much of the CPU's caches as possible. | |
119 | * | |
120 | * TODO: fill this in! | |
121 | */ | |
122 | static inline void sched_cacheflush(void) | |
123 | { | |
124 | } | |
125 | ||
1da177e4 LT |
126 | /* |
127 | * Save the current interrupt enable state & disable IRQs | |
128 | */ | |
129 | #define local_irq_save(x) \ | |
130 | do { \ | |
131 | unsigned long temp; \ | |
132 | __asm__ __volatile__( \ | |
133 | " mov %0, pc @ save_flags_cli\n" \ | |
134 | " orr %1, %0, #0x08000000\n" \ | |
135 | " and %0, %0, #0x0c000000\n" \ | |
136 | " teqp %1, #0\n" \ | |
137 | : "=r" (x), "=r" (temp) \ | |
138 | : \ | |
139 | : "memory"); \ | |
140 | } while (0) | |
141 | ||
142 | /* | |
143 | * Enable IRQs (sti) | |
144 | */ | |
145 | #define local_irq_enable() \ | |
146 | do { \ | |
147 | unsigned long temp; \ | |
148 | __asm__ __volatile__( \ | |
149 | " mov %0, pc @ sti\n" \ | |
150 | " bic %0, %0, #0x08000000\n" \ | |
151 | " teqp %0, #0\n" \ | |
152 | : "=r" (temp) \ | |
153 | : \ | |
154 | : "memory"); \ | |
155 | } while(0) | |
156 | ||
157 | /* | |
158 | * Disable IRQs (cli) | |
159 | */ | |
160 | #define local_irq_disable() \ | |
161 | do { \ | |
162 | unsigned long temp; \ | |
163 | __asm__ __volatile__( \ | |
164 | " mov %0, pc @ cli\n" \ | |
165 | " orr %0, %0, #0x08000000\n" \ | |
166 | " teqp %0, #0\n" \ | |
167 | : "=r" (temp) \ | |
168 | : \ | |
169 | : "memory"); \ | |
170 | } while(0) | |
171 | ||
172 | /* Enable FIQs (stf) */ | |
173 | ||
174 | #define __stf() do { \ | |
175 | unsigned long temp; \ | |
176 | __asm__ __volatile__( \ | |
177 | " mov %0, pc @ stf\n" \ | |
178 | " bic %0, %0, #0x04000000\n" \ | |
179 | " teqp %0, #0\n" \ | |
180 | : "=r" (temp)); \ | |
181 | } while(0) | |
182 | ||
183 | /* Disable FIQs (clf) */ | |
184 | ||
185 | #define __clf() do { \ | |
186 | unsigned long temp; \ | |
187 | __asm__ __volatile__( \ | |
188 | " mov %0, pc @ clf\n" \ | |
189 | " orr %0, %0, #0x04000000\n" \ | |
190 | " teqp %0, #0\n" \ | |
191 | : "=r" (temp)); \ | |
192 | } while(0) | |
193 | ||
194 | ||
195 | /* | |
196 | * Save the current interrupt enable state. | |
197 | */ | |
198 | #define local_save_flags(x) \ | |
199 | do { \ | |
200 | __asm__ __volatile__( \ | |
201 | " mov %0, pc @ save_flags\n" \ | |
202 | " and %0, %0, #0x0c000000\n" \ | |
203 | : "=r" (x)); \ | |
204 | } while (0) | |
205 | ||
206 | ||
207 | /* | |
208 | * restore saved IRQ & FIQ state | |
209 | */ | |
210 | #define local_irq_restore(x) \ | |
211 | do { \ | |
212 | unsigned long temp; \ | |
213 | __asm__ __volatile__( \ | |
214 | " mov %0, pc @ restore_flags\n" \ | |
215 | " bic %0, %0, #0x0c000000\n" \ | |
216 | " orr %0, %0, %1\n" \ | |
217 | " teqp %0, #0\n" \ | |
218 | : "=&r" (temp) \ | |
219 | : "r" (x) \ | |
220 | : "memory"); \ | |
221 | } while (0) | |
222 | ||
223 | ||
224 | #ifdef CONFIG_SMP | |
225 | #error SMP not supported | |
226 | #endif | |
227 | ||
228 | #define smp_mb() barrier() | |
229 | #define smp_rmb() barrier() | |
230 | #define smp_wmb() barrier() | |
231 | #define smp_read_barrier_depends() do { } while(0) | |
232 | ||
233 | #define clf() __clf() | |
234 | #define stf() __stf() | |
235 | ||
236 | #define irqs_disabled() \ | |
237 | ({ \ | |
238 | unsigned long flags; \ | |
239 | local_save_flags(flags); \ | |
240 | flags & PSR_I_BIT; \ | |
241 | }) | |
242 | ||
243 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
244 | { | |
245 | extern void __bad_xchg(volatile void *, int); | |
246 | ||
247 | switch (size) { | |
248 | case 1: return cpu_xchg_1(x, ptr); | |
249 | case 4: return cpu_xchg_4(x, ptr); | |
250 | default: __bad_xchg(ptr, size); | |
251 | } | |
252 | return 0; | |
253 | } | |
254 | ||
255 | #endif /* __ASSEMBLY__ */ | |
256 | ||
257 | #define arch_align_stack(x) (x) | |
258 | ||
259 | #endif /* __KERNEL__ */ | |
260 | ||
261 | #endif |