Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/assembler.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996-2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This file contains arm architecture specific defines | |
11 | * for the different processors. | |
12 | * | |
13 | * Do not include any C declarations in this file - it is included by | |
14 | * assembler source. | |
15 | */ | |
16 | #ifndef __ASSEMBLY__ | |
17 | #error "Only include this from assembly code" | |
18 | #endif | |
19 | ||
20 | #include <asm/ptrace.h> | |
21 | ||
22 | /* | |
23 | * Endian independent macros for shifting bytes within registers. | |
24 | */ | |
25 | #ifndef __ARMEB__ | |
26 | #define pull lsr | |
27 | #define push lsl | |
28 | #define get_byte_0 lsl #0 | |
29 | #define get_byte_1 lsr #8 | |
30 | #define get_byte_2 lsr #16 | |
31 | #define get_byte_3 lsr #24 | |
32 | #define put_byte_0 lsl #0 | |
33 | #define put_byte_1 lsl #8 | |
34 | #define put_byte_2 lsl #16 | |
35 | #define put_byte_3 lsl #24 | |
36 | #else | |
37 | #define pull lsl | |
38 | #define push lsr | |
39 | #define get_byte_0 lsr #24 | |
40 | #define get_byte_1 lsr #16 | |
41 | #define get_byte_2 lsr #8 | |
42 | #define get_byte_3 lsl #0 | |
43 | #define put_byte_0 lsl #24 | |
44 | #define put_byte_1 lsl #16 | |
45 | #define put_byte_2 lsl #8 | |
46 | #define put_byte_3 lsl #0 | |
47 | #endif | |
48 | ||
49 | /* | |
50 | * Data preload for architectures that support it | |
51 | */ | |
52 | #if __LINUX_ARM_ARCH__ >= 5 | |
53 | #define PLD(code...) code | |
54 | #else | |
55 | #define PLD(code...) | |
56 | #endif | |
57 | ||
2239aff6 NP |
58 | /* |
59 | * This can be used to enable code to cacheline align the destination | |
60 | * pointer when bulk writing to memory. Experiments on StrongARM and | |
61 | * XScale didn't show this a worthwhile thing to do when the cache is not | |
62 | * set to write-allocate (this would need further testing on XScale when WA | |
63 | * is used). | |
64 | * | |
65 | * On Feroceon there is much to gain however, regardless of cache mode. | |
66 | */ | |
67 | #ifdef CONFIG_CPU_FEROCEON | |
68 | #define CALGN(code...) code | |
69 | #else | |
70 | #define CALGN(code...) | |
71 | #endif | |
72 | ||
1da177e4 | 73 | /* |
9c42954d | 74 | * Enable and disable interrupts |
1da177e4 | 75 | */ |
59d1ff3b | 76 | #if __LINUX_ARM_ARCH__ >= 6 |
9c42954d | 77 | .macro disable_irq |
59d1ff3b | 78 | cpsid i |
9c42954d RK |
79 | .endm |
80 | ||
81 | .macro enable_irq | |
82 | cpsie i | |
83 | .endm | |
59d1ff3b | 84 | #else |
9c42954d RK |
85 | .macro disable_irq |
86 | msr cpsr_c, #PSR_I_BIT | SVC_MODE | |
87 | .endm | |
88 | ||
89 | .macro enable_irq | |
90 | msr cpsr_c, #SVC_MODE | |
91 | .endm | |
59d1ff3b | 92 | #endif |
9c42954d RK |
93 | |
94 | /* | |
95 | * Save the current IRQ state and disable IRQs. Note that this macro | |
96 | * assumes FIQs are enabled, and that the processor is in SVC mode. | |
97 | */ | |
98 | .macro save_and_disable_irqs, oldcpsr | |
99 | mrs \oldcpsr, cpsr | |
100 | disable_irq | |
1da177e4 LT |
101 | .endm |
102 | ||
103 | /* | |
104 | * Restore interrupt state previously stored in a register. We don't | |
105 | * guarantee that this will preserve the flags. | |
106 | */ | |
107 | .macro restore_irqs, oldcpsr | |
108 | msr cpsr_c, \oldcpsr | |
109 | .endm | |
110 | ||
1da177e4 LT |
111 | #define USER(x...) \ |
112 | 9999: x; \ | |
113 | .section __ex_table,"a"; \ | |
114 | .align 3; \ | |
115 | .long 9999b,9001f; \ | |
116 | .previous | |
bac4e960 RK |
117 | |
118 | /* | |
119 | * SMP data memory barrier | |
120 | */ | |
121 | .macro smp_dmb | |
122 | #ifdef CONFIG_SMP | |
123 | #if __LINUX_ARM_ARCH__ >= 7 | |
124 | dmb | |
125 | #elif __LINUX_ARM_ARCH__ == 6 | |
126 | mcr p15, 0, r0, c7, c10, 5 @ dmb | |
127 | #endif | |
128 | #endif | |
129 | .endm |