[ARM SMP] Add Realview MPcore SMP support
[linux-block.git] / arch / arm / mm / proc-v6.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/proc-v6.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This is the "shell" of the ARMv6 processor support.
11 */
12#include <linux/linkage.h>
13#include <asm/assembler.h>
e6ae744d 14#include <asm/asm-offsets.h>
862184fe 15#include <asm/hardware/arm_scu.h>
1da177e4
LT
16#include <asm/procinfo.h>
17#include <asm/pgtable.h>
18
19#include "proc-macros.S"
20
21#define D_CACHE_LINE_SIZE 32
22
23 .macro cpsie, flags
24 .ifc \flags, f
25 .long 0xf1080040
26 .exitm
27 .endif
28 .ifc \flags, i
29 .long 0xf1080080
30 .exitm
31 .endif
32 .ifc \flags, if
33 .long 0xf10800c0
34 .exitm
35 .endif
36 .err
37 .endm
38
39 .macro cpsid, flags
40 .ifc \flags, f
41 .long 0xf10c0040
42 .exitm
43 .endif
44 .ifc \flags, i
45 .long 0xf10c0080
46 .exitm
47 .endif
48 .ifc \flags, if
49 .long 0xf10c00c0
50 .exitm
51 .endif
52 .err
53 .endm
54
55ENTRY(cpu_v6_proc_init)
56 mov pc, lr
57
58ENTRY(cpu_v6_proc_fin)
67c5587a
TL
59 stmfd sp!, {lr}
60 cpsid if @ disable interrupts
61 bl v6_flush_kern_cache_all
62 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
63 bic r0, r0, #0x1000 @ ...i............
64 bic r0, r0, #0x0006 @ .............ca.
65 mcr p15, 0, r0, c1, c0, 0 @ disable caches
66 ldmfd sp!, {pc}
1da177e4
LT
67
68/*
69 * cpu_v6_reset(loc)
70 *
71 * Perform a soft reset of the system. Put the CPU into the
72 * same state as it would be if it had been reset, and branch
73 * to what would be the reset vector.
74 *
75 * - loc - location to jump to for soft reset
76 *
77 * It is assumed that:
78 */
79 .align 5
80ENTRY(cpu_v6_reset)
81 mov pc, r0
82
83/*
84 * cpu_v6_do_idle()
85 *
86 * Idle the processor (eg, wait for interrupt).
87 *
88 * IRQs are already disabled.
89 */
90ENTRY(cpu_v6_do_idle)
91 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
92 mov pc, lr
93
94ENTRY(cpu_v6_dcache_clean_area)
95#ifndef TLB_CAN_READ_FROM_L1_CACHE
961: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
97 add r0, r0, #D_CACHE_LINE_SIZE
98 subs r1, r1, #D_CACHE_LINE_SIZE
99 bhi 1b
100#endif
101 mov pc, lr
102
103/*
104 * cpu_arm926_switch_mm(pgd_phys, tsk)
105 *
106 * Set the translation table base pointer to be pgd_phys
107 *
108 * - pgd_phys - physical address of new TTB
109 *
110 * It is assumed that:
111 * - we are not using split page tables
112 */
113ENTRY(cpu_v6_switch_mm)
114 mov r2, #0
115 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
cd03adb0
RK
116#ifdef CONFIG_SMP
117 orr r0, r0, #2 @ set shared pgtable
118#endif
d93742f5 119 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
1da177e4
LT
120 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
121 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
122 mcr p15, 0, r1, c13, c0, 1 @ set context ID
123 mov pc, lr
124
1da177e4
LT
125/*
126 * cpu_v6_set_pte(ptep, pte)
127 *
128 * Set a level 2 translation table entry.
129 *
130 * - ptep - pointer to level 2 translation table entry
131 * (hardware version is stored at -1024 bytes)
132 * - pte - PTE value to store
133 *
134 * Permissions:
135 * YUWD APX AP1 AP0 SVC User
136 * 0xxx 0 0 0 no acc no acc
137 * 100x 1 0 1 r/o no acc
138 * 10x0 1 0 1 r/o no acc
139 * 1011 0 0 1 r/w no acc
79042f08
CM
140 * 110x 0 1 0 r/w r/o
141 * 11x0 0 1 0 r/w r/o
1da177e4
LT
142 * 1111 0 1 1 r/w r/w
143 */
144ENTRY(cpu_v6_set_pte)
145 str r1, [r0], #-2048 @ linux version
146
cd03adb0 147 bic r2, r1, #0x000003f0
1da177e4 148 bic r2, r2, #0x00000003
1b9749e7 149 orr r2, r2, #PTE_EXT_AP0 | 2
1da177e4
LT
150
151 tst r1, #L_PTE_WRITE
152 tstne r1, #L_PTE_DIRTY
1b9749e7 153 orreq r2, r2, #PTE_EXT_APX
1da177e4
LT
154
155 tst r1, #L_PTE_USER
6626a707 156 orrne r2, r2, #PTE_EXT_AP1
1b9749e7
RK
157 tstne r2, #PTE_EXT_APX
158 bicne r2, r2, #PTE_EXT_APX | PTE_EXT_AP0
1da177e4
LT
159
160 tst r1, #L_PTE_YOUNG
1b9749e7 161 biceq r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK
1da177e4
LT
162
163@ tst r1, #L_PTE_EXEC
1b9749e7 164@ orreq r2, r2, #PTE_EXT_XN
1da177e4
LT
165
166 tst r1, #L_PTE_PRESENT
167 moveq r2, #0
168
169 str r2, [r0]
170 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
171 mov pc, lr
172
173
174
175
176cpu_v6_name:
177 .asciz "Some Random V6 Processor"
178 .align
179
180 .section ".text.init", #alloc, #execinstr
181
182/*
183 * __v6_setup
184 *
185 * Initialise TLB, Caches, and MMU state ready to switch the MMU
186 * on. Return in r0 the new CP15 C1 control register setting.
187 *
188 * We automatically detect if we have a Harvard cache, and use the
189 * Harvard cache control instructions insead of the unified cache
190 * control instructions.
191 *
192 * This should be able to cover all ARMv6 cores.
193 *
194 * It is assumed that:
195 * - cache type register is implemented
196 */
197__v6_setup:
862184fe
RK
198#ifdef CONFIG_SMP
199 /* Set up the SCU on core 0 only */
200 mrc p15, 0, r0, c0, c0, 5 @ CPU core number
201 ands r0, r0, #15
202 moveq r0, #0x10000000 @ SCU_BASE
203 orreq r0, r0, #0x00100000
204 ldreq r5, [r0, #SCU_CTRL]
205 orreq r5, r5, #1
206 streq r5, [r0, #SCU_CTRL]
207
208#ifndef CONFIG_CPU_DCACHE_DISABLE
209 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
210 orr r0, r0, #0x20
211 mcr p15, 0, r0, c1, c0, 1
212#endif
213#endif
214
1da177e4
LT
215 mov r0, #0
216 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
217 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
218 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
219 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
220 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
221 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
cd03adb0
RK
222#ifdef CONFIG_SMP
223 orr r4, r4, #2 @ set shared pgtable
224#endif
1da177e4
LT
225 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
226#ifdef CONFIG_VFP
227 mrc p15, 0, r0, c1, c0, 2
d1d890ed 228 orr r0, r0, #(0xf << 20)
1da177e4
LT
229 mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP
230#endif
231 mrc p15, 0, r0, c1, c0, 0 @ read control register
232 ldr r5, v6_cr1_clear @ get mask for bits to clear
233 bic r0, r0, r5 @ clear bits them
234 ldr r5, v6_cr1_set @ get mask for bits to set
235 orr r0, r0, r5 @ set them
236 mov pc, lr @ return to head.S:__ret
237
238 /*
239 * V X F I D LR
240 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
241 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
242 * 0 110 0011 1.00 .111 1101 < we want
243 */
244 .type v6_cr1_clear, #object
245 .type v6_cr1_set, #object
246v6_cr1_clear:
247 .word 0x01e0fb7f
248v6_cr1_set:
249 .word 0x00c0387d
250
251 .type v6_processor_functions, #object
252ENTRY(v6_processor_functions)
253 .word v6_early_abort
254 .word cpu_v6_proc_init
255 .word cpu_v6_proc_fin
256 .word cpu_v6_reset
257 .word cpu_v6_do_idle
258 .word cpu_v6_dcache_clean_area
259 .word cpu_v6_switch_mm
260 .word cpu_v6_set_pte
261 .size v6_processor_functions, . - v6_processor_functions
262
263 .type cpu_arch_name, #object
264cpu_arch_name:
265 .asciz "armv6"
266 .size cpu_arch_name, . - cpu_arch_name
267
268 .type cpu_elf_name, #object
269cpu_elf_name:
270 .asciz "v6"
271 .size cpu_elf_name, . - cpu_elf_name
272 .align
273
02b7dd12 274 .section ".proc.info.init", #alloc, #execinstr
1da177e4
LT
275
276 /*
277 * Match any ARMv6 processor core.
278 */
279 .type __v6_proc_info, #object
280__v6_proc_info:
281 .long 0x0007b000
282 .long 0x0007f000
283 .long PMD_TYPE_SECT | \
284 PMD_SECT_BUFFERABLE | \
285 PMD_SECT_CACHEABLE | \
286 PMD_SECT_AP_WRITE | \
287 PMD_SECT_AP_READ
288 b __v6_setup
289 .long cpu_arch_name
290 .long cpu_elf_name
291 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
292 .long cpu_v6_name
293 .long v6_processor_functions
294 .long v6wbi_tlb_fns
295 .long v6_user_fns
296 .long v6_cache_fns
297 .size __v6_proc_info, . - __v6_proc_info