Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_ARM_SYSTEM_H |
2 | #define __ASM_ARM_SYSTEM_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
1da177e4 LT |
6 | #define CPU_ARCH_UNKNOWN 0 |
7 | #define CPU_ARCH_ARMv3 1 | |
8 | #define CPU_ARCH_ARMv4 2 | |
9 | #define CPU_ARCH_ARMv4T 3 | |
10 | #define CPU_ARCH_ARMv5 4 | |
11 | #define CPU_ARCH_ARMv5T 5 | |
12 | #define CPU_ARCH_ARMv5TE 6 | |
13 | #define CPU_ARCH_ARMv5TEJ 7 | |
14 | #define CPU_ARCH_ARMv6 8 | |
bbe88886 | 15 | #define CPU_ARCH_ARMv7 9 |
1da177e4 LT |
16 | |
17 | /* | |
18 | * CR1 bits (CP#15 CR1) | |
19 | */ | |
20 | #define CR_M (1 << 0) /* MMU enable */ | |
21 | #define CR_A (1 << 1) /* Alignment abort enable */ | |
22 | #define CR_C (1 << 2) /* Dcache enable */ | |
23 | #define CR_W (1 << 3) /* Write buffer enable */ | |
24 | #define CR_P (1 << 4) /* 32-bit exception handler */ | |
25 | #define CR_D (1 << 5) /* 32-bit data address range */ | |
26 | #define CR_L (1 << 6) /* Implementation defined */ | |
27 | #define CR_B (1 << 7) /* Big endian */ | |
28 | #define CR_S (1 << 8) /* System MMU protection */ | |
29 | #define CR_R (1 << 9) /* ROM MMU protection */ | |
30 | #define CR_F (1 << 10) /* Implementation defined */ | |
31 | #define CR_Z (1 << 11) /* Implementation defined */ | |
32 | #define CR_I (1 << 12) /* Icache enable */ | |
33 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | |
34 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | |
35 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | |
36 | #define CR_DT (1 << 16) | |
37 | #define CR_IT (1 << 18) | |
38 | #define CR_ST (1 << 19) | |
39 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | |
40 | #define CR_U (1 << 22) /* Unaligned access operation */ | |
41 | #define CR_XP (1 << 23) /* Extended page tables */ | |
42 | #define CR_VE (1 << 24) /* Vectored interrupts */ | |
b1cce6b1 RK |
43 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ |
44 | #define CR_TRE (1 << 28) /* TEX remap enable */ | |
45 | #define CR_AFE (1 << 29) /* Access flag enable */ | |
46 | #define CR_TE (1 << 30) /* Thumb exception enable */ | |
1da177e4 | 47 | |
1da177e4 LT |
48 | /* |
49 | * This is used to ensure the compiler did actually allocate the register we | |
50 | * asked it for some inline assembly sequences. Apparently we can't trust | |
51 | * the compiler from one version to another so a bit of paranoia won't hurt. | |
52 | * This string is meant to be concatenated with the inline asm string and | |
53 | * will cause compilation to stop on mismatch. | |
54 | * (for details, see gcc PR 15089) | |
55 | */ | |
56 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | |
57 | ||
58 | #ifndef __ASSEMBLY__ | |
59 | ||
60 | #include <linux/linkage.h> | |
255d1f86 | 61 | #include <linux/irqflags.h> |
1da177e4 | 62 | |
e7c5650f CM |
63 | #include <asm/outercache.h> |
64 | ||
7ab3f8d5 RK |
65 | #define __exception __attribute__((section(".exception.text"))) |
66 | ||
1da177e4 LT |
67 | struct thread_info; |
68 | struct task_struct; | |
69 | ||
70 | /* information about the system we're running on */ | |
71 | extern unsigned int system_rev; | |
72 | extern unsigned int system_serial_low; | |
73 | extern unsigned int system_serial_high; | |
74 | extern unsigned int mem_fclk_21285; | |
75 | ||
76 | struct pt_regs; | |
77 | ||
a9221de6 | 78 | void die(const char *msg, struct pt_regs *regs, int err); |
1da177e4 | 79 | |
cfb0810e | 80 | struct siginfo; |
1eeb66a1 | 81 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, |
cfb0810e | 82 | unsigned long err, unsigned long trap); |
1da177e4 LT |
83 | |
84 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | |
85 | struct pt_regs *), | |
86 | int sig, const char *name); | |
87 | ||
1da177e4 LT |
88 | #define xchg(ptr,x) \ |
89 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
90 | ||
1da177e4 | 91 | extern asmlinkage void __backtrace(void); |
652a12ef | 92 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); |
5470dc65 RK |
93 | |
94 | struct mm_struct; | |
652a12ef RK |
95 | extern void show_pte(struct mm_struct *mm, unsigned long addr); |
96 | extern void __show_regs(struct pt_regs *); | |
1da177e4 LT |
97 | |
98 | extern int cpu_architecture(void); | |
36c5ed23 | 99 | extern void cpu_init(void); |
1da177e4 | 100 | |
be093beb RK |
101 | void arm_machine_restart(char mode, const char *cmd); |
102 | extern void (*arm_pm_restart)(char str, const char *cmd); | |
74617fb6 | 103 | |
56660faf CM |
104 | #define UDBG_UNDEFINED (1 << 0) |
105 | #define UDBG_SYSCALL (1 << 1) | |
106 | #define UDBG_BADABORT (1 << 2) | |
107 | #define UDBG_SEGV (1 << 3) | |
108 | #define UDBG_BUS (1 << 4) | |
109 | ||
110 | extern unsigned int user_debug; | |
111 | ||
112 | #if __LINUX_ARM_ARCH__ >= 4 | |
113 | #define vectors_high() (cr_alignment & CR_V) | |
114 | #else | |
115 | #define vectors_high() (0) | |
116 | #endif | |
117 | ||
56163fcf CM |
118 | #if __LINUX_ARM_ARCH__ >= 7 |
119 | #define isb() __asm__ __volatile__ ("isb" : : : "memory") | |
120 | #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") | |
121 | #define dmb() __asm__ __volatile__ ("dmb" : : : "memory") | |
122 | #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 | |
56660faf CM |
123 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ |
124 | : : "r" (0) : "memory") | |
125 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | |
126 | : : "r" (0) : "memory") | |
127 | #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ | |
128 | : : "r" (0) : "memory") | |
28853ac8 PZ |
129 | #elif defined(CONFIG_CPU_FA526) |
130 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ | |
131 | : : "r" (0) : "memory") | |
132 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | |
133 | : : "r" (0) : "memory") | |
134 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | |
56660faf CM |
135 | #else |
136 | #define isb() __asm__ __volatile__ ("" : : : "memory") | |
137 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | |
138 | : : "r" (0) : "memory") | |
139 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | |
140 | #endif | |
9623b373 | 141 | |
e7c5650f CM |
142 | #ifdef CONFIG_ARCH_HAS_BARRIERS |
143 | #include <mach/barriers.h> | |
ac1d426e | 144 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) |
e7c5650f | 145 | #define mb() do { dsb(); outer_sync(); } while (0) |
26a26d32 | 146 | #define rmb() dmb() |
e7c5650f | 147 | #define wmb() mb() |
26a26d32 | 148 | #else |
398e692f LB |
149 | #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) |
150 | #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | |
151 | #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | |
26a26d32 RK |
152 | #endif |
153 | ||
154 | #ifndef CONFIG_SMP | |
398e692f LB |
155 | #define smp_mb() barrier() |
156 | #define smp_rmb() barrier() | |
157 | #define smp_wmb() barrier() | |
9623b373 | 158 | #else |
e7c5650f CM |
159 | #define smp_mb() dmb() |
160 | #define smp_rmb() dmb() | |
161 | #define smp_wmb() dmb() | |
398e692f | 162 | #endif |
26a26d32 | 163 | |
398e692f LB |
164 | #define read_barrier_depends() do { } while(0) |
165 | #define smp_read_barrier_depends() do { } while(0) | |
9623b373 CM |
166 | |
167 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | |
56660faf CM |
168 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); |
169 | ||
255d1f86 RK |
170 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ |
171 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | |
172 | ||
efe90d27 RK |
173 | static inline unsigned int get_cr(void) |
174 | { | |
175 | unsigned int val; | |
176 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); | |
177 | return val; | |
178 | } | |
179 | ||
180 | static inline void set_cr(unsigned int val) | |
181 | { | |
182 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" | |
183 | : : "r" (val) : "cc"); | |
56660faf | 184 | isb(); |
efe90d27 RK |
185 | } |
186 | ||
255d1f86 RK |
187 | #ifndef CONFIG_SMP |
188 | extern void adjust_cr(unsigned long mask, unsigned long set); | |
189 | #endif | |
190 | ||
efe90d27 RK |
191 | #define CPACC_FULL(n) (3 << (n * 2)) |
192 | #define CPACC_SVC(n) (1 << (n * 2)) | |
193 | #define CPACC_DISABLE(n) (0 << (n * 2)) | |
194 | ||
195 | static inline unsigned int get_copro_access(void) | |
196 | { | |
197 | unsigned int val; | |
198 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" | |
199 | : "=r" (val) : : "cc"); | |
200 | return val; | |
201 | } | |
202 | ||
203 | static inline void set_copro_access(unsigned int val) | |
204 | { | |
205 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" | |
206 | : : "r" (val) : "cc"); | |
56660faf | 207 | isb(); |
efe90d27 | 208 | } |
1da177e4 | 209 | |
1da177e4 | 210 | /* |
4866cde0 NP |
211 | * switch_mm() may do a full cache flush over the context switch, |
212 | * so enable interrupts over the context switch to avoid high | |
213 | * latency. | |
1da177e4 | 214 | */ |
4866cde0 | 215 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW |
1da177e4 LT |
216 | |
217 | /* | |
218 | * switch_to(prev, next) should switch from task `prev' to `next' | |
219 | * `prev' will never be the same as `next'. schedule() itself | |
220 | * contains the memory barrier to tell GCC not to cache `current'. | |
221 | */ | |
222 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | |
223 | ||
224 | #define switch_to(prev,next,last) \ | |
225 | do { \ | |
e7c1b32f | 226 | last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ |
1da177e4 LT |
227 | } while (0) |
228 | ||
1da177e4 LT |
229 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) |
230 | /* | |
231 | * On the StrongARM, "swp" is terminally broken since it bypasses the | |
232 | * cache totally. This means that the cache becomes inconsistent, and, | |
233 | * since we use normal loads/stores as well, this is really bad. | |
234 | * Typically, this causes oopsen in filp_close, but could have other, | |
235 | * more disasterous effects. There are two work-arounds: | |
236 | * 1. Disable interrupts and emulate the atomic swap | |
237 | * 2. Clean the cache, perform atomic swap, flush the cache | |
238 | * | |
239 | * We choose (1) since its the "easiest" to achieve here and is not | |
240 | * dependent on the processor type. | |
053a7b5b RK |
241 | * |
242 | * NOTE that this solution won't work on an SMP system, so explcitly | |
243 | * forbid it here. | |
1da177e4 LT |
244 | */ |
245 | #define swp_is_buggy | |
246 | #endif | |
247 | ||
248 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
249 | { | |
250 | extern void __bad_xchg(volatile void *, int); | |
251 | unsigned long ret; | |
252 | #ifdef swp_is_buggy | |
253 | unsigned long flags; | |
254 | #endif | |
9560782f RK |
255 | #if __LINUX_ARM_ARCH__ >= 6 |
256 | unsigned int tmp; | |
257 | #endif | |
1da177e4 | 258 | |
bac4e960 RK |
259 | smp_mb(); |
260 | ||
1da177e4 | 261 | switch (size) { |
9560782f RK |
262 | #if __LINUX_ARM_ARCH__ >= 6 |
263 | case 1: | |
264 | asm volatile("@ __xchg1\n" | |
265 | "1: ldrexb %0, [%3]\n" | |
266 | " strexb %1, %2, [%3]\n" | |
267 | " teq %1, #0\n" | |
268 | " bne 1b" | |
269 | : "=&r" (ret), "=&r" (tmp) | |
270 | : "r" (x), "r" (ptr) | |
271 | : "memory", "cc"); | |
272 | break; | |
273 | case 4: | |
274 | asm volatile("@ __xchg4\n" | |
275 | "1: ldrex %0, [%3]\n" | |
276 | " strex %1, %2, [%3]\n" | |
277 | " teq %1, #0\n" | |
278 | " bne 1b" | |
279 | : "=&r" (ret), "=&r" (tmp) | |
280 | : "r" (x), "r" (ptr) | |
281 | : "memory", "cc"); | |
282 | break; | |
283 | #elif defined(swp_is_buggy) | |
284 | #ifdef CONFIG_SMP | |
285 | #error SMP is not supported on this platform | |
286 | #endif | |
287 | case 1: | |
e7cc2c59 | 288 | raw_local_irq_save(flags); |
9560782f RK |
289 | ret = *(volatile unsigned char *)ptr; |
290 | *(volatile unsigned char *)ptr = x; | |
e7cc2c59 | 291 | raw_local_irq_restore(flags); |
9560782f RK |
292 | break; |
293 | ||
294 | case 4: | |
e7cc2c59 | 295 | raw_local_irq_save(flags); |
9560782f RK |
296 | ret = *(volatile unsigned long *)ptr; |
297 | *(volatile unsigned long *)ptr = x; | |
e7cc2c59 | 298 | raw_local_irq_restore(flags); |
9560782f | 299 | break; |
1da177e4 | 300 | #else |
9560782f RK |
301 | case 1: |
302 | asm volatile("@ __xchg1\n" | |
303 | " swpb %0, %1, [%2]" | |
304 | : "=&r" (ret) | |
305 | : "r" (x), "r" (ptr) | |
306 | : "memory", "cc"); | |
307 | break; | |
308 | case 4: | |
309 | asm volatile("@ __xchg4\n" | |
310 | " swp %0, %1, [%2]" | |
311 | : "=&r" (ret) | |
312 | : "r" (x), "r" (ptr) | |
313 | : "memory", "cc"); | |
314 | break; | |
1da177e4 | 315 | #endif |
9560782f RK |
316 | default: |
317 | __bad_xchg(ptr, size), ret = 0; | |
318 | break; | |
1da177e4 | 319 | } |
bac4e960 | 320 | smp_mb(); |
1da177e4 LT |
321 | |
322 | return ret; | |
323 | } | |
324 | ||
dabaeff0 BD |
325 | extern void disable_hlt(void); |
326 | extern void enable_hlt(void); | |
327 | ||
176393d4 MD |
328 | #include <asm-generic/cmpxchg-local.h> |
329 | ||
ecd322c9 MD |
330 | #if __LINUX_ARM_ARCH__ < 6 |
331 | ||
332 | #ifdef CONFIG_SMP | |
333 | #error "SMP is not supported on this platform" | |
334 | #endif | |
335 | ||
176393d4 MD |
336 | /* |
337 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
338 | * them available. | |
339 | */ | |
340 | #define cmpxchg_local(ptr, o, n) \ | |
341 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | |
342 | (unsigned long)(n), sizeof(*(ptr)))) | |
343 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
344 | ||
345 | #ifndef CONFIG_SMP | |
346 | #include <asm-generic/cmpxchg.h> | |
347 | #endif | |
348 | ||
ecd322c9 MD |
349 | #else /* __LINUX_ARM_ARCH__ >= 6 */ |
350 | ||
351 | extern void __bad_cmpxchg(volatile void *ptr, int size); | |
352 | ||
353 | /* | |
354 | * cmpxchg only support 32-bits operands on ARMv6. | |
355 | */ | |
356 | ||
357 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |
358 | unsigned long new, int size) | |
359 | { | |
360 | unsigned long oldval, res; | |
361 | ||
362 | switch (size) { | |
363 | #ifdef CONFIG_CPU_32v6K | |
364 | case 1: | |
365 | do { | |
366 | asm volatile("@ __cmpxchg1\n" | |
367 | " ldrexb %1, [%2]\n" | |
368 | " mov %0, #0\n" | |
369 | " teq %1, %3\n" | |
370 | " strexbeq %0, %4, [%2]\n" | |
371 | : "=&r" (res), "=&r" (oldval) | |
372 | : "r" (ptr), "Ir" (old), "r" (new) | |
373 | : "memory", "cc"); | |
374 | } while (res); | |
375 | break; | |
376 | case 2: | |
377 | do { | |
378 | asm volatile("@ __cmpxchg1\n" | |
379 | " ldrexh %1, [%2]\n" | |
380 | " mov %0, #0\n" | |
381 | " teq %1, %3\n" | |
382 | " strexheq %0, %4, [%2]\n" | |
383 | : "=&r" (res), "=&r" (oldval) | |
384 | : "r" (ptr), "Ir" (old), "r" (new) | |
385 | : "memory", "cc"); | |
386 | } while (res); | |
387 | break; | |
388 | #endif /* CONFIG_CPU_32v6K */ | |
389 | case 4: | |
390 | do { | |
391 | asm volatile("@ __cmpxchg4\n" | |
392 | " ldrex %1, [%2]\n" | |
393 | " mov %0, #0\n" | |
394 | " teq %1, %3\n" | |
395 | " strexeq %0, %4, [%2]\n" | |
396 | : "=&r" (res), "=&r" (oldval) | |
397 | : "r" (ptr), "Ir" (old), "r" (new) | |
398 | : "memory", "cc"); | |
399 | } while (res); | |
400 | break; | |
401 | default: | |
402 | __bad_cmpxchg(ptr, size); | |
403 | oldval = 0; | |
404 | } | |
405 | ||
406 | return oldval; | |
407 | } | |
408 | ||
409 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |
410 | unsigned long new, int size) | |
411 | { | |
412 | unsigned long ret; | |
413 | ||
414 | smp_mb(); | |
415 | ret = __cmpxchg(ptr, old, new, size); | |
416 | smp_mb(); | |
417 | ||
418 | return ret; | |
419 | } | |
420 | ||
421 | #define cmpxchg(ptr,o,n) \ | |
422 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ | |
423 | (unsigned long)(o), \ | |
424 | (unsigned long)(n), \ | |
425 | sizeof(*(ptr)))) | |
426 | ||
427 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | |
428 | unsigned long old, | |
429 | unsigned long new, int size) | |
430 | { | |
431 | unsigned long ret; | |
432 | ||
433 | switch (size) { | |
434 | #ifndef CONFIG_CPU_32v6K | |
435 | case 1: | |
436 | case 2: | |
437 | ret = __cmpxchg_local_generic(ptr, old, new, size); | |
438 | break; | |
439 | #endif /* !CONFIG_CPU_32v6K */ | |
440 | default: | |
441 | ret = __cmpxchg(ptr, old, new, size); | |
442 | } | |
443 | ||
444 | return ret; | |
445 | } | |
446 | ||
447 | #define cmpxchg_local(ptr,o,n) \ | |
448 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | |
449 | (unsigned long)(o), \ | |
450 | (unsigned long)(n), \ | |
451 | sizeof(*(ptr)))) | |
452 | ||
453 | #ifdef CONFIG_CPU_32v6K | |
454 | ||
455 | /* | |
456 | * Note : ARMv7-M (currently unsupported by Linux) does not support | |
457 | * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should | |
458 | * not be allowed to use __cmpxchg64. | |
459 | */ | |
460 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | |
461 | unsigned long long old, | |
462 | unsigned long long new) | |
463 | { | |
464 | register unsigned long long oldval asm("r0"); | |
465 | register unsigned long long __old asm("r2") = old; | |
466 | register unsigned long long __new asm("r4") = new; | |
467 | unsigned long res; | |
468 | ||
469 | do { | |
470 | asm volatile( | |
471 | " @ __cmpxchg8\n" | |
472 | " ldrexd %1, %H1, [%2]\n" | |
473 | " mov %0, #0\n" | |
474 | " teq %1, %3\n" | |
475 | " teqeq %H1, %H3\n" | |
476 | " strexdeq %0, %4, %H4, [%2]\n" | |
477 | : "=&r" (res), "=&r" (oldval) | |
478 | : "r" (ptr), "Ir" (__old), "r" (__new) | |
479 | : "memory", "cc"); | |
480 | } while (res); | |
481 | ||
482 | return oldval; | |
483 | } | |
484 | ||
485 | static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, | |
486 | unsigned long long old, | |
487 | unsigned long long new) | |
488 | { | |
489 | unsigned long long ret; | |
490 | ||
491 | smp_mb(); | |
492 | ret = __cmpxchg64(ptr, old, new); | |
493 | smp_mb(); | |
494 | ||
495 | return ret; | |
496 | } | |
497 | ||
498 | #define cmpxchg64(ptr,o,n) \ | |
499 | ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ | |
500 | (unsigned long long)(o), \ | |
501 | (unsigned long long)(n))) | |
502 | ||
503 | #define cmpxchg64_local(ptr,o,n) \ | |
504 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ | |
505 | (unsigned long long)(o), \ | |
506 | (unsigned long long)(n))) | |
507 | ||
508 | #else /* !CONFIG_CPU_32v6K */ | |
509 | ||
510 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
511 | ||
512 | #endif /* CONFIG_CPU_32v6K */ | |
513 | ||
514 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | |
515 | ||
1da177e4 LT |
516 | #endif /* __ASSEMBLY__ */ |
517 | ||
518 | #define arch_align_stack(x) (x) | |
519 | ||
520 | #endif /* __KERNEL__ */ | |
521 | ||
522 | #endif |