Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
9703d9d7 CM |
2 | /* |
3 | * Low-level CPU initialisation | |
4 | * Based on arch/arm/kernel/head.S | |
5 | * | |
6 | * Copyright (C) 1994-2002 Russell King | |
7 | * Copyright (C) 2003-2012 ARM Ltd. | |
8 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
9 | * Will Deacon <will.deacon@arm.com> | |
9703d9d7 CM |
10 | */ |
11 | ||
12 | #include <linux/linkage.h> | |
13 | #include <linux/init.h> | |
65fddcfc | 14 | #include <linux/pgtable.h> |
9703d9d7 | 15 | |
62a679cb | 16 | #include <asm/asm_pointer_auth.h> |
9703d9d7 | 17 | #include <asm/assembler.h> |
08cdac61 | 18 | #include <asm/boot.h> |
7d7b720a | 19 | #include <asm/bug.h> |
9703d9d7 CM |
20 | #include <asm/ptrace.h> |
21 | #include <asm/asm-offsets.h> | |
c218bca7 | 22 | #include <asm/cache.h> |
0359b0e2 | 23 | #include <asm/cputype.h> |
78869f0f | 24 | #include <asm/el2_setup.h> |
1e48ef7f | 25 | #include <asm/elf.h> |
f56063c5 | 26 | #include <asm/image.h> |
87d1587b | 27 | #include <asm/kernel-pgtable.h> |
1f364c8c | 28 | #include <asm/kvm_arm.h> |
9703d9d7 | 29 | #include <asm/memory.h> |
9703d9d7 | 30 | #include <asm/pgtable-hwdef.h> |
9703d9d7 | 31 | #include <asm/page.h> |
5287569a | 32 | #include <asm/scs.h> |
bb905274 | 33 | #include <asm/smp.h> |
4bf8b96e | 34 | #include <asm/sysreg.h> |
c2c6b27b | 35 | #include <asm/stacktrace/frame.h> |
4bf8b96e | 36 | #include <asm/thread_info.h> |
f35a9205 | 37 | #include <asm/virt.h> |
9703d9d7 | 38 | |
b5f4a214 AB |
39 | #include "efi-header.S" |
40 | ||
120dc60d | 41 | #if (PAGE_OFFSET & 0x1fffff) != 0 |
da57a369 | 42 | #error PAGE_OFFSET must be at least 2MB aligned |
9703d9d7 CM |
43 | #endif |
44 | ||
9703d9d7 CM |
45 | /* |
46 | * Kernel startup entry point. | |
47 | * --------------------------- | |
48 | * | |
49 | * The requirements are: | |
50 | * MMU = off, D-cache = off, I-cache = on or off, | |
51 | * x0 = physical address to the FDT blob. | |
52 | * | |
9703d9d7 CM |
53 | * Note that the callee-saved registers are used for storing variables |
54 | * that are useful before the MMU is enabled. The allocations are described | |
55 | * in the entry routines. | |
56 | */ | |
57 | __HEAD | |
9703d9d7 CM |
58 | /* |
59 | * DO NOT MODIFY. Image header expected by Linux boot-loaders. | |
60 | */ | |
7919385b | 61 | efi_signature_nop // special NOP to identity as PE/COFF executable |
348a625d | 62 | b primary_entry // branch to kernel start, magic |
120dc60d | 63 | .quad 0 // Image load offset from start of RAM, little-endian |
6ad1fe5d AB |
64 | le64sym _kernel_size_le // Effective size of kernel image, little-endian |
65 | le64sym _kernel_flags_le // Informative flags, little-endian | |
4370eec0 RF |
66 | .quad 0 // reserved |
67 | .quad 0 // reserved | |
68 | .quad 0 // reserved | |
f56063c5 | 69 | .ascii ARM64_IMAGE_MAGIC // Magic number |
7919385b | 70 | .long .Lpe_header_offset // Offset to the PE header. |
3c7f2550 | 71 | |
b5f4a214 | 72 | __EFI_PE_HEADER |
9703d9d7 | 73 | |
d5417081 | 74 | .section ".idmap.text","a" |
546c8c44 | 75 | |
a9be2ee0 AB |
76 | /* |
77 | * The following callee saved general purpose registers are used on the | |
78 | * primary lowlevel boot path: | |
79 | * | |
80 | * Register Scope Purpose | |
9d7c13e5 | 81 | * x19 primary_entry() .. start_kernel() whether we entered with the MMU on |
005e1267 | 82 | * x20 primary_entry() .. __primary_switch() CPU boot mode |
348a625d | 83 | * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 |
a9be2ee0 | 84 | */ |
348a625d | 85 | SYM_CODE_START(primary_entry) |
9d7c13e5 | 86 | bl record_mmu_state |
da9c177d | 87 | bl preserve_boot_args |
84b04d3e AB |
88 | |
89 | adrp x1, early_init_stack | |
90 | mov sp, x1 | |
91 | mov x29, xzr | |
93d0d6f8 | 92 | adrp x0, __pi_init_idmap_pg_dir |
9684ec18 | 93 | mov x1, xzr |
84b04d3e AB |
94 | bl __pi_create_init_idmap |
95 | ||
96 | /* | |
97 | * If the page tables have been populated with non-cacheable | |
98 | * accesses (MMU disabled), invalidate those tables again to | |
99 | * remove any speculatively loaded cache lines. | |
100 | */ | |
101 | cbnz x19, 0f | |
102 | dmb sy | |
103 | mov x1, x0 // end of used region | |
93d0d6f8 | 104 | adrp x0, __pi_init_idmap_pg_dir |
84b04d3e AB |
105 | adr_l x2, dcache_inval_poc |
106 | blr x2 | |
107 | b 1f | |
3dcf60bb AB |
108 | |
109 | /* | |
110 | * If we entered with the MMU and caches on, clean the ID mapped part | |
111 | * of the primary boot code to the PoC so we can safely execute it with | |
112 | * the MMU off. | |
113 | */ | |
84b04d3e | 114 | 0: adrp x0, __idmap_text_start |
3dcf60bb | 115 | adr_l x1, __idmap_text_end |
d5417081 MR |
116 | adr_l x2, dcache_clean_poc |
117 | blr x2 | |
84b04d3e AB |
118 | |
119 | 1: mov x0, x19 | |
ecbb11ab | 120 | bl init_kernel_el // w0=cpu_boot_mode |
005e1267 | 121 | mov x20, x0 |
e42ade29 | 122 | |
9703d9d7 | 123 | /* |
a591ede4 MZ |
124 | * The following calls CPU setup code, see arch/arm64/mm/proc.S for |
125 | * details. | |
9703d9d7 CM |
126 | * On return, the CPU will be ready for the MMU to be turned on and |
127 | * the TCR will have been set. | |
128 | */ | |
0cd3defe | 129 | bl __cpu_setup // initialise processor |
3c5e9f23 | 130 | b __primary_switch |
348a625d | 131 | SYM_CODE_END(primary_entry) |
9703d9d7 | 132 | |
3dcf60bb | 133 | __INIT |
9d7c13e5 AB |
134 | SYM_CODE_START_LOCAL(record_mmu_state) |
135 | mrs x19, CurrentEL | |
136 | cmp x19, #CurrentEL_EL2 | |
137 | mrs x19, sctlr_el1 | |
138 | b.ne 0f | |
139 | mrs x19, sctlr_el2 | |
2ced0f30 AB |
140 | 0: |
141 | CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) | |
142 | CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) | |
143 | tst x19, #SCTLR_ELx_C // Z := (C == 0) | |
9d7c13e5 AB |
144 | and x19, x19, #SCTLR_ELx_M // isolate M bit |
145 | csel x19, xzr, x19, eq // clear x19 if Z | |
146 | ret | |
2ced0f30 AB |
147 | |
148 | /* | |
149 | * Set the correct endianness early so all memory accesses issued | |
150 | * before init_kernel_el() occur in the correct byte order. Note that | |
151 | * this means the MMU must be disabled, or the active ID map will end | |
152 | * up getting interpreted with the wrong byte order. | |
153 | */ | |
154 | 1: eor x19, x19, #SCTLR_ELx_EE | |
155 | bic x19, x19, #SCTLR_ELx_M | |
156 | b.ne 2f | |
157 | pre_disable_mmu_workaround | |
158 | msr sctlr_el2, x19 | |
159 | b 3f | |
4e8f6e44 NU |
160 | 2: pre_disable_mmu_workaround |
161 | msr sctlr_el1, x19 | |
2ced0f30 AB |
162 | 3: isb |
163 | mov x19, xzr | |
164 | ret | |
9d7c13e5 AB |
165 | SYM_CODE_END(record_mmu_state) |
166 | ||
da9c177d AB |
167 | /* |
168 | * Preserve the arguments passed by the bootloader in x0 .. x3 | |
169 | */ | |
ebdf44a1 | 170 | SYM_CODE_START_LOCAL(preserve_boot_args) |
da9c177d AB |
171 | mov x21, x0 // x21=FDT |
172 | ||
173 | adr_l x0, boot_args // record the contents of | |
174 | stp x21, x1, [x0] // x0 .. x3 at kernel entry | |
175 | stp x2, x3, [x0, #16] | |
176 | ||
9d7c13e5 | 177 | cbnz x19, 0f // skip cache invalidation if MMU is on |
da9c177d AB |
178 | dmb sy // needed before dc ivac with |
179 | // MMU off | |
180 | ||
e3974adb | 181 | add x1, x0, #0x20 // 4 x 8 bytes |
fade9c2c | 182 | b dcache_inval_poc // tail call |
9d7c13e5 AB |
183 | 0: str_l x19, mmu_enabled_at_boot, x0 |
184 | ret | |
ebdf44a1 | 185 | SYM_CODE_END(preserve_boot_args) |
da9c177d | 186 | |
7d7b720a | 187 | /* |
8e334d72 MR |
188 | * Initialize CPU registers with task-specific and cpu-specific context. |
189 | * | |
7d7b720a MV |
190 | * Create a final frame record at task_pt_regs(current)->stackframe, so |
191 | * that the unwinder can identify the final frame record of any task by | |
192 | * its location in the task stack. We reserve the entire pt_regs space | |
193 | * for consistency with user tasks and kthreads. | |
194 | */ | |
3d8c1a01 | 195 | .macro init_cpu_task tsk, tmp1, tmp2 |
8e334d72 MR |
196 | msr sp_el0, \tsk |
197 | ||
3d8c1a01 MR |
198 | ldr \tmp1, [\tsk, #TSK_STACK] |
199 | add sp, \tmp1, #THREAD_SIZE | |
7d7b720a | 200 | sub sp, sp, #PT_REGS_SIZE |
8e334d72 | 201 | |
7d7b720a | 202 | stp xzr, xzr, [sp, #S_STACKFRAME] |
c2c6b27b MR |
203 | mov \tmp1, #FRAME_META_TYPE_FINAL |
204 | str \tmp1, [sp, #S_STACKFRAME_TYPE] | |
7d7b720a | 205 | add x29, sp, #S_STACKFRAME |
8e334d72 | 206 | |
2198d07c | 207 | scs_load_current |
3d8c1a01 MR |
208 | |
209 | adr_l \tmp1, __per_cpu_offset | |
bcf9033e | 210 | ldr w\tmp2, [\tsk, #TSK_TI_CPU] |
3d8c1a01 MR |
211 | ldr \tmp1, [\tmp1, \tmp2, lsl #3] |
212 | set_this_cpu_offset \tmp1 | |
7d7b720a MV |
213 | .endm |
214 | ||
034edabe | 215 | /* |
a871d354 | 216 | * The following fragment of code is executed with the MMU enabled. |
b929fe32 | 217 | * |
bdbcd22d | 218 | * x0 = __pa(KERNEL_START) |
034edabe | 219 | */ |
c63d9f82 | 220 | SYM_FUNC_START_LOCAL(__primary_switched) |
8e334d72 | 221 | adr_l x4, init_task |
3d8c1a01 | 222 | init_cpu_task x4, x5, x6 |
60699ba1 | 223 | |
2bf31a4a AB |
224 | adr_l x8, vectors // load VBAR_EL1 with virtual |
225 | msr vbar_el1, x8 // vector table address | |
226 | isb | |
227 | ||
8e334d72 | 228 | stp x29, x30, [sp, #-16]! |
60699ba1 AB |
229 | mov x29, sp |
230 | ||
b929fe32 AB |
231 | str_l x21, __fdt_pointer, x5 // Save FDT pointer |
232 | ||
376f5a3b | 233 | adrp x4, _text // Save the offset between |
b929fe32 AB |
234 | sub x4, x4, x0 // the kernel virtual and |
235 | str_l x4, kimage_voffset, x5 // physical mappings | |
236 | ||
005e1267 AB |
237 | mov x0, x20 |
238 | bl set_cpu_boot_mode_flag | |
239 | ||
0fea6e9a | 240 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
39d114dd | 241 | bl kasan_early_init |
3b619e22 | 242 | #endif |
005e1267 | 243 | mov x0, x20 |
7ddb0c3d | 244 | bl finalise_el2 // Prefer VHE if possible |
8e334d72 | 245 | ldp x29, x30, [sp], #16 |
7d7b720a MV |
246 | bl start_kernel |
247 | ASM_BUG() | |
c63d9f82 | 248 | SYM_FUNC_END(__primary_switched) |
034edabe LA |
249 | |
250 | /* | |
251 | * end early head section, begin head code that is also used for | |
252 | * hotplug and needs to have the same protections as the text region | |
253 | */ | |
d5417081 | 254 | .section ".idmap.text","a" |
f80fb3a3 | 255 | |
9703d9d7 | 256 | /* |
ecbb11ab MR |
257 | * Starting from EL2 or EL1, configure the CPU to execute at the highest |
258 | * reachable EL supported by the kernel in a chosen default state. If dropping | |
259 | * from EL2 to EL1, configure EL2 before configuring EL1. | |
828e9834 | 260 | * |
d87a8e65 MR |
261 | * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if |
262 | * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET. | |
828e9834 | 263 | * |
b65e411d MZ |
264 | * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if |
265 | * booted in EL1 or EL2 respectively, with the top 32 bits containing | |
266 | * potential context flags. These flags are *not* stored in __boot_cpu_mode. | |
3dcf60bb AB |
267 | * |
268 | * x0: whether we are being called from the primary boot path with the MMU on | |
9703d9d7 | 269 | */ |
ecbb11ab | 270 | SYM_FUNC_START(init_kernel_el) |
3dcf60bb AB |
271 | mrs x1, CurrentEL |
272 | cmp x1, #CurrentEL_EL2 | |
d87a8e65 MR |
273 | b.eq init_el2 |
274 | ||
275 | SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) | |
31a32b49 | 276 | mov_q x0, INIT_SCTLR_EL1_MMU_OFF |
9d7c13e5 | 277 | pre_disable_mmu_workaround |
31a32b49 | 278 | msr sctlr_el1, x0 |
9cf71728 | 279 | isb |
d87a8e65 MR |
280 | mov_q x0, INIT_PSTATE_EL1 |
281 | msr spsr_el1, x0 | |
282 | msr elr_el1, lr | |
283 | mov w0, #BOOT_CPU_MODE_EL1 | |
284 | eret | |
9703d9d7 | 285 | |
d87a8e65 | 286 | SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) |
3dcf60bb AB |
287 | msr elr_el2, lr |
288 | ||
289 | // clean all HYP code to the PoC if we booted at EL2 with the MMU on | |
290 | cbz x0, 0f | |
291 | adrp x0, __hyp_idmap_text_start | |
292 | adr_l x1, __hyp_text_end | |
d5417081 MR |
293 | adr_l x2, dcache_clean_poc |
294 | blr x2 | |
34e526cb AB |
295 | |
296 | mov_q x0, INIT_SCTLR_EL2_MMU_OFF | |
297 | pre_disable_mmu_workaround | |
298 | msr sctlr_el2, x0 | |
299 | isb | |
3dcf60bb | 300 | 0: |
78869f0f | 301 | |
7a68b55f | 302 | init_el2_hcr HCR_HOST_NVHE_FLAGS |
e2df4641 | 303 | init_el2_state |
22043a3c | 304 | |
712c6ff4 | 305 | /* Hypervisor stub */ |
78869f0f | 306 | adr_l x0, __hyp_stub_vectors |
712c6ff4 | 307 | msr vbar_el2, x0 |
d87a8e65 | 308 | isb |
78869f0f | 309 | |
ae4b7e38 MZ |
310 | mov_q x1, INIT_SCTLR_EL1_MMU_OFF |
311 | ||
31a32b49 MZ |
312 | mrs x0, hcr_el2 |
313 | and x0, x0, #HCR_E2H | |
3944382f | 314 | cbz x0, 2f |
b3320142 | 315 | |
ae4b7e38 MZ |
316 | /* Set a sane SCTLR_EL1, the VHE way */ |
317 | msr_s SYS_SCTLR_EL12, x1 | |
318 | mov x2, #BOOT_CPU_FLAG_E2H | |
3944382f | 319 | b 3f |
31a32b49 | 320 | |
3944382f | 321 | 2: |
ae4b7e38 MZ |
322 | msr sctlr_el1, x1 |
323 | mov x2, xzr | |
3944382f | 324 | 3: |
3855a7b9 AG |
325 | mov x0, #INIT_PSTATE_EL1 |
326 | msr spsr_el2, x0 | |
1700f89c | 327 | |
d87a8e65 | 328 | mov w0, #BOOT_CPU_MODE_EL2 |
ae4b7e38 | 329 | orr x0, x0, x2 |
9703d9d7 | 330 | eret |
ecbb11ab | 331 | SYM_FUNC_END(init_kernel_el) |
9703d9d7 | 332 | |
9703d9d7 CM |
333 | /* |
334 | * This provides a "holding pen" for platforms to hold all secondary | |
335 | * cores are held until we're ready for them to initialise. | |
336 | */ | |
c63d9f82 | 337 | SYM_FUNC_START(secondary_holding_pen) |
3dcf60bb | 338 | mov x0, xzr |
ecbb11ab | 339 | bl init_kernel_el // w0=cpu_boot_mode |
005e1267 | 340 | mrs x2, mpidr_el1 |
b03cc885 | 341 | mov_q x1, MPIDR_HWID_BITMASK |
005e1267 | 342 | and x2, x2, x1 |
b1c98297 | 343 | adr_l x3, secondary_holding_pen_release |
9703d9d7 | 344 | pen: ldr x4, [x3] |
005e1267 | 345 | cmp x4, x2 |
9703d9d7 CM |
346 | b.eq secondary_startup |
347 | wfe | |
348 | b pen | |
c63d9f82 | 349 | SYM_FUNC_END(secondary_holding_pen) |
652af899 MR |
350 | |
351 | /* | |
352 | * Secondary entry point that jumps straight into the kernel. Only to | |
353 | * be used where CPUs are brought online dynamically by the kernel. | |
354 | */ | |
c63d9f82 | 355 | SYM_FUNC_START(secondary_entry) |
3dcf60bb | 356 | mov x0, xzr |
ecbb11ab | 357 | bl init_kernel_el // w0=cpu_boot_mode |
652af899 | 358 | b secondary_startup |
c63d9f82 | 359 | SYM_FUNC_END(secondary_entry) |
9703d9d7 | 360 | |
c63d9f82 | 361 | SYM_FUNC_START_LOCAL(secondary_startup) |
9703d9d7 CM |
362 | /* |
363 | * Common entry point for secondary CPUs. | |
364 | */ | |
005e1267 | 365 | mov x20, x0 // preserve boot mode |
9cce9c6c AB |
366 | |
367 | #ifdef CONFIG_ARM64_VA_BITS_52 | |
368 | alternative_if ARM64_HAS_VA52 | |
a96a33b1 | 369 | bl __cpu_secondary_check52bitva |
9cce9c6c | 370 | alternative_else_nop_endif |
0aaa6853 | 371 | #endif |
9cce9c6c | 372 | |
a591ede4 | 373 | bl __cpu_setup // initialise processor |
693d5639 | 374 | adrp x1, swapper_pg_dir |
723d3a8e | 375 | adrp x2, idmap_pg_dir |
9dcf7914 AB |
376 | bl __enable_mmu |
377 | ldr x8, =__secondary_switched | |
378 | br x8 | |
c63d9f82 | 379 | SYM_FUNC_END(secondary_startup) |
9703d9d7 | 380 | |
af7249b3 | 381 | .text |
c63d9f82 | 382 | SYM_FUNC_START_LOCAL(__secondary_switched) |
005e1267 AB |
383 | mov x0, x20 |
384 | bl set_cpu_boot_mode_flag | |
82e49588 AB |
385 | |
386 | mov x0, x20 | |
387 | bl finalise_el2 | |
388 | ||
005e1267 | 389 | str_l xzr, __early_cpu_boot_status, x3 |
2bf31a4a AB |
390 | adr_l x5, vectors |
391 | msr vbar_el1, x5 | |
392 | isb | |
393 | ||
bb905274 | 394 | adr_l x0, secondary_data |
c02433dd | 395 | ldr x2, [x0, #CPU_BOOT_TASK] |
5b1cfe3a | 396 | cbz x2, __secondary_too_slow |
3305e7f7 | 397 | |
3d8c1a01 | 398 | init_cpu_task x2, x1, x3 |
62a679cb MR |
399 | |
400 | #ifdef CONFIG_ARM64_PTR_AUTH | |
401 | ptrauth_keys_init_cpu x2, x3, x4, x5 | |
402 | #endif | |
403 | ||
7d7b720a MV |
404 | bl secondary_start_kernel |
405 | ASM_BUG() | |
c63d9f82 | 406 | SYM_FUNC_END(__secondary_switched) |
9703d9d7 | 407 | |
c63d9f82 | 408 | SYM_FUNC_START_LOCAL(__secondary_too_slow) |
5b1cfe3a WD |
409 | wfe |
410 | wfi | |
411 | b __secondary_too_slow | |
c63d9f82 | 412 | SYM_FUNC_END(__secondary_too_slow) |
5b1cfe3a | 413 | |
af7249b3 AB |
414 | /* |
415 | * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed | |
416 | * in w0. See arch/arm64/include/asm/virt.h for more info. | |
417 | */ | |
418 | SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) | |
419 | adr_l x1, __boot_cpu_mode | |
420 | cmp w0, #BOOT_CPU_MODE_EL2 | |
421 | b.ne 1f | |
422 | add x1, x1, #4 | |
423 | 1: str w0, [x1] // Save CPU boot mode | |
424 | ret | |
425 | SYM_FUNC_END(set_cpu_boot_mode_flag) | |
426 | ||
bb905274 SP |
427 | /* |
428 | * The booting CPU updates the failed status @__early_cpu_boot_status, | |
429 | * with MMU turned off. | |
430 | * | |
431 | * update_early_cpu_boot_status tmp, status | |
432 | * - Corrupts tmp1, tmp2 | |
433 | * - Writes 'status' to __early_cpu_boot_status and makes sure | |
434 | * it is committed to memory. | |
435 | */ | |
436 | ||
437 | .macro update_early_cpu_boot_status status, tmp1, tmp2 | |
438 | mov \tmp2, #\status | |
adb49070 AB |
439 | adr_l \tmp1, __early_cpu_boot_status |
440 | str \tmp2, [\tmp1] | |
bb905274 SP |
441 | dmb sy |
442 | dc ivac, \tmp1 // Invalidate potentially stale cache line | |
443 | .endm | |
444 | ||
9703d9d7 | 445 | /* |
8b0a9575 | 446 | * Enable the MMU. |
9703d9d7 | 447 | * |
8b0a9575 | 448 | * x0 = SCTLR_EL1 value for turning on the MMU. |
693d5639 | 449 | * x1 = TTBR1_EL1 value |
723d3a8e | 450 | * x2 = ID map root table address |
8b0a9575 | 451 | * |
9dcf7914 AB |
452 | * Returns to the caller via x30/lr. This requires the caller to be covered |
453 | * by the .idmap.text section. | |
4bf8b96e SP |
454 | * |
455 | * Checks if the selected granule size is supported by the CPU. | |
456 | * If it isn't, park the CPU | |
9703d9d7 | 457 | */ |
d5417081 | 458 | .section ".idmap.text","a" |
c63d9f82 | 459 | SYM_FUNC_START(__enable_mmu) |
723d3a8e | 460 | mrs x3, ID_AA64MMFR0_EL1 |
2d987e64 MB |
461 | ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 |
462 | cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN | |
26f55386 | 463 | b.lt __no_granule_support |
2d987e64 | 464 | cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX |
26f55386 | 465 | b.gt __no_granule_support |
693d5639 JY |
466 | phys_to_ttbr x2, x2 |
467 | msr ttbr0_el1, x2 // load TTBR0 | |
c0be8f18 | 468 | load_ttbr1 x1, x1, x3 |
8cc8a324 MZ |
469 | |
470 | set_sctlr_el1 x0 | |
471 | ||
9dcf7914 | 472 | ret |
c63d9f82 | 473 | SYM_FUNC_END(__enable_mmu) |
4bf8b96e | 474 | |
9cce9c6c | 475 | #ifdef CONFIG_ARM64_VA_BITS_52 |
c63d9f82 | 476 | SYM_FUNC_START(__cpu_secondary_check52bitva) |
9684ec18 | 477 | #ifndef CONFIG_ARM64_LPA2 |
a96a33b1 | 478 | mrs_s x0, SYS_ID_AA64MMFR2_EL1 |
62ce7af9 | 479 | and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK |
a96a33b1 | 480 | cbnz x0, 2f |
9684ec18 AB |
481 | #else |
482 | mrs x0, id_aa64mmfr0_el1 | |
483 | sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 | |
484 | cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LPA2 | |
485 | b.ge 2f | |
486 | #endif | |
a96a33b1 | 487 | |
66f16a24 WD |
488 | update_early_cpu_boot_status \ |
489 | CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 | |
a96a33b1 SC |
490 | 1: wfe |
491 | wfi | |
492 | b 1b | |
493 | ||
a96a33b1 | 494 | 2: ret |
c63d9f82 | 495 | SYM_FUNC_END(__cpu_secondary_check52bitva) |
9cce9c6c | 496 | #endif |
a96a33b1 | 497 | |
c63d9f82 | 498 | SYM_FUNC_START_LOCAL(__no_granule_support) |
bb905274 | 499 | /* Indicate that this CPU can't boot and is stuck in the kernel */ |
66f16a24 WD |
500 | update_early_cpu_boot_status \ |
501 | CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 | |
bb905274 | 502 | 1: |
4bf8b96e | 503 | wfe |
bb905274 | 504 | wfi |
3c5e9f23 | 505 | b 1b |
c63d9f82 | 506 | SYM_FUNC_END(__no_granule_support) |
e5ebeec8 | 507 | |
c63d9f82 | 508 | SYM_FUNC_START_LOCAL(__primary_switch) |
6495b9ba | 509 | adrp x1, reserved_pg_dir |
93d0d6f8 | 510 | adrp x2, __pi_init_idmap_pg_dir |
9dcf7914 | 511 | bl __enable_mmu |
aa99aad7 | 512 | |
734958ef | 513 | adrp x1, early_init_stack |
aacd149b AB |
514 | mov sp, x1 |
515 | mov x29, xzr | |
dcfe969a | 516 | mov x0, x20 // pass the full boot status |
84b04d3e | 517 | mov x1, x21 // pass the FDT |
97a6f43b | 518 | bl __pi_early_map_kernel // Map and relocate the kernel |
6495b9ba | 519 | |
0cd3defe | 520 | ldr x8, =__primary_switched |
bdbcd22d | 521 | adrp x0, KERNEL_START // __pa(KERNEL_START) |
0cd3defe | 522 | br x8 |
c63d9f82 | 523 | SYM_FUNC_END(__primary_switch) |