Commit | Line | Data |
---|---|---|
9703d9d7 CM |
1 | /* |
2 | * Low-level CPU initialisation | |
3 | * Based on arch/arm/kernel/head.S | |
4 | * | |
5 | * Copyright (C) 1994-2002 Russell King | |
6 | * Copyright (C) 2003-2012 ARM Ltd. | |
7 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
8 | * Will Deacon <will.deacon@arm.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | #include <linux/linkage.h> | |
24 | #include <linux/init.h> | |
021f6537 | 25 | #include <linux/irqchip/arm-gic-v3.h> |
9703d9d7 CM |
26 | |
27 | #include <asm/assembler.h> | |
08cdac61 | 28 | #include <asm/boot.h> |
9703d9d7 CM |
29 | #include <asm/ptrace.h> |
30 | #include <asm/asm-offsets.h> | |
c218bca7 | 31 | #include <asm/cache.h> |
0359b0e2 | 32 | #include <asm/cputype.h> |
1e48ef7f | 33 | #include <asm/elf.h> |
87d1587b | 34 | #include <asm/kernel-pgtable.h> |
1f364c8c | 35 | #include <asm/kvm_arm.h> |
9703d9d7 | 36 | #include <asm/memory.h> |
9703d9d7 CM |
37 | #include <asm/pgtable-hwdef.h> |
38 | #include <asm/pgtable.h> | |
39 | #include <asm/page.h> | |
bb905274 | 40 | #include <asm/smp.h> |
4bf8b96e SP |
41 | #include <asm/sysreg.h> |
42 | #include <asm/thread_info.h> | |
f35a9205 | 43 | #include <asm/virt.h> |
9703d9d7 | 44 | |
b5f4a214 AB |
45 | #include "efi-header.S" |
46 | ||
6f4d57fa | 47 | #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) |
9703d9d7 | 48 | |
4190312b AB |
49 | #if (TEXT_OFFSET & 0xfff) != 0 |
50 | #error TEXT_OFFSET must be at least 4KB aligned | |
51 | #elif (PAGE_OFFSET & 0x1fffff) != 0 | |
da57a369 | 52 | #error PAGE_OFFSET must be at least 2MB aligned |
4190312b | 53 | #elif TEXT_OFFSET > 0x1fffff |
da57a369 | 54 | #error TEXT_OFFSET must be less than 2MB |
9703d9d7 CM |
55 | #endif |
56 | ||
9703d9d7 CM |
57 | /* |
58 | * Kernel startup entry point. | |
59 | * --------------------------- | |
60 | * | |
61 | * The requirements are: | |
62 | * MMU = off, D-cache = off, I-cache = on or off, | |
63 | * x0 = physical address to the FDT blob. | |
64 | * | |
65 | * This code is mostly position independent so you call this at | |
66 | * __pa(PAGE_OFFSET + TEXT_OFFSET). | |
67 | * | |
68 | * Note that the callee-saved registers are used for storing variables | |
69 | * that are useful before the MMU is enabled. The allocations are described | |
70 | * in the entry routines. | |
71 | */ | |
72 | __HEAD | |
2bf31a4a | 73 | _head: |
9703d9d7 CM |
74 | /* |
75 | * DO NOT MODIFY. Image header expected by Linux boot-loaders. | |
76 | */ | |
3c7f2550 | 77 | #ifdef CONFIG_EFI |
3c7f2550 MS |
78 | /* |
79 | * This add instruction has no meaningful effect except that | |
80 | * its opcode forms the magic "MZ" signature required by UEFI. | |
81 | */ | |
82 | add x13, x18, #0x16 | |
83 | b stext | |
84 | #else | |
9703d9d7 CM |
85 | b stext // branch to kernel start, magic |
86 | .long 0 // reserved | |
3c7f2550 | 87 | #endif |
6ad1fe5d AB |
88 | le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian |
89 | le64sym _kernel_size_le // Effective size of kernel image, little-endian | |
90 | le64sym _kernel_flags_le // Informative flags, little-endian | |
4370eec0 RF |
91 | .quad 0 // reserved |
92 | .quad 0 // reserved | |
93 | .quad 0 // reserved | |
99922257 | 94 | .ascii "ARM\x64" // Magic number |
3c7f2550 | 95 | #ifdef CONFIG_EFI |
2bf31a4a | 96 | .long pe_header - _head // Offset to the PE header. |
3c7f2550 | 97 | |
3c7f2550 | 98 | pe_header: |
b5f4a214 | 99 | __EFI_PE_HEADER |
99922257 AB |
100 | #else |
101 | .long 0 // reserved | |
3c7f2550 | 102 | #endif |
9703d9d7 | 103 | |
546c8c44 AB |
104 | __INIT |
105 | ||
a9be2ee0 AB |
106 | /* |
107 | * The following callee saved general purpose registers are used on the | |
108 | * primary lowlevel boot path: | |
109 | * | |
110 | * Register Scope Purpose | |
111 | * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 | |
112 | * x23 stext() .. start_kernel() physical misalignment/KASLR offset | |
113 | * x28 __create_page_tables() callee preserved temp register | |
114 | * x19/x20 __primary_switch() callee preserved temp registers | |
115 | */ | |
9703d9d7 | 116 | ENTRY(stext) |
da9c177d | 117 | bl preserve_boot_args |
23c8a500 | 118 | bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
b929fe32 AB |
119 | adrp x23, __PHYS_OFFSET |
120 | and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 | |
828e9834 | 121 | bl set_cpu_boot_mode_flag |
aea73abb | 122 | bl __create_page_tables |
9703d9d7 | 123 | /* |
a591ede4 MZ |
124 | * The following calls CPU setup code, see arch/arm64/mm/proc.S for |
125 | * details. | |
9703d9d7 CM |
126 | * On return, the CPU will be ready for the MMU to be turned on and |
127 | * the TCR will have been set. | |
128 | */ | |
0cd3defe | 129 | bl __cpu_setup // initialise processor |
3c5e9f23 | 130 | b __primary_switch |
9703d9d7 CM |
131 | ENDPROC(stext) |
132 | ||
da9c177d AB |
133 | /* |
134 | * Preserve the arguments passed by the bootloader in x0 .. x3 | |
135 | */ | |
136 | preserve_boot_args: | |
137 | mov x21, x0 // x21=FDT | |
138 | ||
139 | adr_l x0, boot_args // record the contents of | |
140 | stp x21, x1, [x0] // x0 .. x3 at kernel entry | |
141 | stp x2, x3, [x0, #16] | |
142 | ||
143 | dmb sy // needed before dc ivac with | |
144 | // MMU off | |
145 | ||
d46befef RM |
146 | mov x1, #0x20 // 4 x 8 bytes |
147 | b __inval_dcache_area // tail call | |
da9c177d AB |
148 | ENDPROC(preserve_boot_args) |
149 | ||
e6d588a8 KM |
150 | /* |
151 | * Macro to arrange a physical address in a page table entry, taking care of | |
152 | * 52-bit addresses. | |
153 | * | |
154 | * Preserves: phys | |
155 | * Returns: pte | |
156 | */ | |
157 | .macro phys_to_pte, phys, pte | |
158 | #ifdef CONFIG_ARM64_PA_BITS_52 | |
159 | /* | |
160 | * We assume \phys is 64K aligned and this is guaranteed by only | |
161 | * supporting this configuration with 64K pages. | |
162 | */ | |
163 | orr \pte, \phys, \phys, lsr #36 | |
75387b92 | 164 | and \pte, \pte, #PTE_ADDR_MASK |
e6d588a8 KM |
165 | #else |
166 | mov \pte, \phys | |
167 | #endif | |
168 | .endm | |
169 | ||
034edabe LA |
170 | /* |
171 | * Macro to create a table entry to the next page. | |
172 | * | |
173 | * tbl: page table address | |
174 | * virt: virtual address | |
175 | * shift: #imm page table shift | |
176 | * ptrs: #imm pointers per table page | |
177 | * | |
178 | * Preserves: virt | |
179 | * Corrupts: tmp1, tmp2 | |
180 | * Returns: tbl -> next level table page address | |
181 | */ | |
182 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | |
e6d588a8 KM |
183 | add \tmp1, \tbl, #PAGE_SIZE |
184 | phys_to_pte \tmp1, \tmp2 | |
185 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type | |
034edabe LA |
186 | lsr \tmp1, \virt, #\shift |
187 | and \tmp1, \tmp1, #\ptrs - 1 // table index | |
034edabe LA |
188 | str \tmp2, [\tbl, \tmp1, lsl #3] |
189 | add \tbl, \tbl, #PAGE_SIZE // next level table page | |
190 | .endm | |
191 | ||
192 | /* | |
193 | * Macro to populate the PGD (and possibily PUD) for the corresponding | |
194 | * block entry in the next level (tbl) for the given virtual address. | |
195 | * | |
196 | * Preserves: tbl, next, virt | |
197 | * Corrupts: tmp1, tmp2 | |
198 | */ | |
199 | .macro create_pgd_entry, tbl, virt, tmp1, tmp2 | |
200 | create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 | |
6a3fd402 SP |
201 | #if SWAPPER_PGTABLE_LEVELS > 3 |
202 | create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 | |
203 | #endif | |
204 | #if SWAPPER_PGTABLE_LEVELS > 2 | |
87d1587b | 205 | create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 |
034edabe LA |
206 | #endif |
207 | .endm | |
208 | ||
209 | /* | |
210 | * Macro to populate block entries in the page table for the start..end | |
211 | * virtual range (inclusive). | |
212 | * | |
213 | * Preserves: tbl, flags | |
e6d588a8 | 214 | * Corrupts: phys, start, end, tmp, pstate |
034edabe | 215 | */ |
e6d588a8 | 216 | .macro create_block_map, tbl, flags, phys, start, end, tmp |
87d1587b | 217 | lsr \start, \start, #SWAPPER_BLOCK_SHIFT |
034edabe | 218 | and \start, \start, #PTRS_PER_PTE - 1 // table index |
e6d588a8 | 219 | bic \phys, \phys, #SWAPPER_BLOCK_SIZE - 1 |
87d1587b | 220 | lsr \end, \end, #SWAPPER_BLOCK_SHIFT |
034edabe | 221 | and \end, \end, #PTRS_PER_PTE - 1 // table end index |
e6d588a8 KM |
222 | 9999: phys_to_pte \phys, \tmp |
223 | orr \tmp, \tmp, \flags // table entry | |
224 | str \tmp, [\tbl, \start, lsl #3] // store the entry | |
034edabe | 225 | add \start, \start, #1 // next entry |
87d1587b | 226 | add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block |
034edabe LA |
227 | cmp \start, \end |
228 | b.ls 9999b | |
229 | .endm | |
230 | ||
231 | /* | |
232 | * Setup the initial page tables. We only setup the barest amount which is | |
233 | * required to get the kernel running. The following sections are required: | |
234 | * - identity mapping to enable the MMU (low address, TTBR0) | |
235 | * - first few MB of the kernel linear mapping to jump to once the MMU has | |
61bd93ce | 236 | * been enabled |
034edabe LA |
237 | */ |
238 | __create_page_tables: | |
f80fb3a3 | 239 | mov x28, lr |
034edabe LA |
240 | |
241 | /* | |
242 | * Invalidate the idmap and swapper page tables to avoid potential | |
243 | * dirty cache lines being evicted. | |
244 | */ | |
aea73abb | 245 | adrp x0, idmap_pg_dir |
d46befef RM |
246 | ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) |
247 | bl __inval_dcache_area | |
034edabe LA |
248 | |
249 | /* | |
250 | * Clear the idmap and swapper page tables. | |
251 | */ | |
aea73abb | 252 | adrp x0, idmap_pg_dir |
d46befef | 253 | ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) |
034edabe LA |
254 | 1: stp xzr, xzr, [x0], #16 |
255 | stp xzr, xzr, [x0], #16 | |
256 | stp xzr, xzr, [x0], #16 | |
257 | stp xzr, xzr, [x0], #16 | |
d46befef RM |
258 | subs x1, x1, #64 |
259 | b.ne 1b | |
034edabe | 260 | |
b03cc885 | 261 | mov x7, SWAPPER_MM_MMUFLAGS |
034edabe LA |
262 | |
263 | /* | |
264 | * Create the identity mapping. | |
265 | */ | |
aea73abb | 266 | adrp x0, idmap_pg_dir |
5dfe9d7d | 267 | adrp x3, __idmap_text_start // __pa(__idmap_text_start) |
dd006da2 AB |
268 | |
269 | #ifndef CONFIG_ARM64_VA_BITS_48 | |
270 | #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) | |
271 | #define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) | |
272 | ||
273 | /* | |
274 | * If VA_BITS < 48, it may be too small to allow for an ID mapping to be | |
275 | * created that covers system RAM if that is located sufficiently high | |
276 | * in the physical address space. So for the ID map, use an extended | |
277 | * virtual range in that case, by configuring an additional translation | |
278 | * level. | |
279 | * First, we have to verify our assumption that the current value of | |
280 | * VA_BITS was chosen such that all translation levels are fully | |
281 | * utilised, and that lowering T0SZ will always result in an additional | |
282 | * translation level to be configured. | |
283 | */ | |
284 | #if VA_BITS != EXTRA_SHIFT | |
285 | #error "Mismatch between VA_BITS and page size/number of translation levels" | |
286 | #endif | |
287 | ||
288 | /* | |
289 | * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the | |
5dfe9d7d | 290 | * entire ID map region can be mapped. As T0SZ == (64 - #bits used), |
dd006da2 | 291 | * this number conveniently equals the number of leading zeroes in |
5dfe9d7d | 292 | * the physical address of __idmap_text_end. |
dd006da2 | 293 | */ |
5dfe9d7d | 294 | adrp x5, __idmap_text_end |
dd006da2 AB |
295 | clz x5, x5 |
296 | cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? | |
297 | b.ge 1f // .. then skip additional level | |
298 | ||
0c20856c MR |
299 | adr_l x6, idmap_t0sz |
300 | str x5, [x6] | |
301 | dmb sy | |
302 | dc ivac, x6 // Invalidate potentially stale cache line | |
dd006da2 AB |
303 | |
304 | create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 | |
305 | 1: | |
306 | #endif | |
307 | ||
034edabe | 308 | create_pgd_entry x0, x3, x5, x6 |
5dfe9d7d AB |
309 | mov x5, x3 // __pa(__idmap_text_start) |
310 | adr_l x6, __idmap_text_end // __pa(__idmap_text_end) | |
e6d588a8 | 311 | create_block_map x0, x7, x3, x5, x6, x4 |
034edabe LA |
312 | |
313 | /* | |
314 | * Map the kernel image (starting with PHYS_OFFSET). | |
315 | */ | |
aea73abb | 316 | adrp x0, swapper_pg_dir |
18b9c0d6 | 317 | mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) |
f80fb3a3 | 318 | add x5, x5, x23 // add KASLR displacement |
034edabe | 319 | create_pgd_entry x0, x5, x3, x6 |
18b9c0d6 AB |
320 | adrp x6, _end // runtime __pa(_end) |
321 | adrp x3, _text // runtime __pa(_text) | |
322 | sub x6, x6, x3 // _end - _text | |
323 | add x6, x6, x5 // runtime __va(_end) | |
e6d588a8 | 324 | create_block_map x0, x7, x3, x5, x6, x4 |
034edabe | 325 | |
034edabe LA |
326 | /* |
327 | * Since the page tables have been populated with non-cacheable | |
328 | * accesses (MMU disabled), invalidate the idmap and swapper page | |
329 | * tables again to remove any speculatively loaded cache lines. | |
330 | */ | |
aea73abb | 331 | adrp x0, idmap_pg_dir |
d46befef | 332 | ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) |
91d57155 | 333 | dmb sy |
d46befef | 334 | bl __inval_dcache_area |
034edabe | 335 | |
f80fb3a3 | 336 | ret x28 |
034edabe LA |
337 | ENDPROC(__create_page_tables) |
338 | .ltorg | |
339 | ||
034edabe | 340 | /* |
a871d354 | 341 | * The following fragment of code is executed with the MMU enabled. |
b929fe32 AB |
342 | * |
343 | * x0 = __PHYS_OFFSET | |
034edabe | 344 | */ |
0cd3defe | 345 | __primary_switched: |
60699ba1 AB |
346 | adrp x4, init_thread_union |
347 | add sp, x4, #THREAD_SIZE | |
c02433dd MR |
348 | adr_l x5, init_task |
349 | msr sp_el0, x5 // Save thread_info | |
60699ba1 | 350 | |
2bf31a4a AB |
351 | adr_l x8, vectors // load VBAR_EL1 with virtual |
352 | msr vbar_el1, x8 // vector table address | |
353 | isb | |
354 | ||
60699ba1 AB |
355 | stp xzr, x30, [sp, #-16]! |
356 | mov x29, sp | |
357 | ||
b929fe32 AB |
358 | str_l x21, __fdt_pointer, x5 // Save FDT pointer |
359 | ||
360 | ldr_l x4, kimage_vaddr // Save the offset between | |
361 | sub x4, x4, x0 // the kernel virtual and | |
362 | str_l x4, kimage_voffset, x5 // physical mappings | |
363 | ||
2a803c4d MR |
364 | // Clear BSS |
365 | adr_l x0, __bss_start | |
366 | mov x1, xzr | |
367 | adr_l x2, __bss_stop | |
368 | sub x2, x2, x0 | |
369 | bl __pi_memset | |
5227cfa7 | 370 | dsb ishst // Make zero page visible to PTW |
2a803c4d | 371 | |
39d114dd AR |
372 | #ifdef CONFIG_KASAN |
373 | bl kasan_early_init | |
f80fb3a3 AB |
374 | #endif |
375 | #ifdef CONFIG_RANDOMIZE_BASE | |
08cdac61 AB |
376 | tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? |
377 | b.ne 0f | |
f80fb3a3 AB |
378 | mov x0, x21 // pass FDT address in x0 |
379 | bl kaslr_early_init // parse FDT for KASLR options | |
380 | cbz x0, 0f // KASLR disabled? just proceed | |
08cdac61 | 381 | orr x23, x23, x0 // record KASLR offset |
60699ba1 AB |
382 | ldp x29, x30, [sp], #16 // we must enable KASLR, return |
383 | ret // to __primary_switch() | |
f80fb3a3 | 384 | 0: |
39d114dd | 385 | #endif |
73267498 AB |
386 | add sp, sp, #16 |
387 | mov x29, #0 | |
388 | mov x30, #0 | |
034edabe | 389 | b start_kernel |
0cd3defe | 390 | ENDPROC(__primary_switched) |
034edabe LA |
391 | |
392 | /* | |
393 | * end early head section, begin head code that is also used for | |
394 | * hotplug and needs to have the same protections as the text region | |
395 | */ | |
b6113038 | 396 | .section ".idmap.text","ax" |
f80fb3a3 AB |
397 | |
398 | ENTRY(kimage_vaddr) | |
399 | .quad _text - TEXT_OFFSET | |
400 | ||
9703d9d7 CM |
401 | /* |
402 | * If we're fortunate enough to boot at EL2, ensure that the world is | |
403 | * sane before dropping to EL1. | |
828e9834 | 404 | * |
510224c2 | 405 | * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if |
828e9834 | 406 | * booted in EL1 or EL2 respectively. |
9703d9d7 CM |
407 | */ |
408 | ENTRY(el2_setup) | |
5371513f | 409 | msr SPsel, #1 // We want to use SP_EL{1,2} |
9703d9d7 | 410 | mrs x0, CurrentEL |
974c8e45 | 411 | cmp x0, #CurrentEL_EL2 |
3ad47d05 MR |
412 | b.eq 1f |
413 | mrs x0, sctlr_el1 | |
9cf71728 ML |
414 | CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 |
415 | CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 | |
416 | msr sctlr_el1, x0 | |
23c8a500 | 417 | mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 |
9cf71728 | 418 | isb |
9703d9d7 CM |
419 | ret |
420 | ||
3ad47d05 MR |
421 | 1: mrs x0, sctlr_el2 |
422 | CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 | |
423 | CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 | |
424 | msr sctlr_el2, x0 | |
425 | ||
1f364c8c MZ |
426 | #ifdef CONFIG_ARM64_VHE |
427 | /* | |
428 | * Check for VHE being present. For the rest of the EL2 setup, | |
429 | * x2 being non-zero indicates that we do have VHE, and that the | |
430 | * kernel is intended to run at EL2. | |
431 | */ | |
432 | mrs x2, id_aa64mmfr1_el1 | |
433 | ubfx x2, x2, #8, #4 | |
434 | #else | |
435 | mov x2, xzr | |
436 | #endif | |
437 | ||
9703d9d7 | 438 | /* Hyp configuration. */ |
1f364c8c MZ |
439 | mov x0, #HCR_RW // 64-bit EL1 |
440 | cbz x2, set_hcr | |
441 | orr x0, x0, #HCR_TGE // Enable Host Extensions | |
442 | orr x0, x0, #HCR_E2H | |
443 | set_hcr: | |
9703d9d7 | 444 | msr hcr_el2, x0 |
1f364c8c | 445 | isb |
9703d9d7 | 446 | |
1650ac49 J |
447 | /* |
448 | * Allow Non-secure EL1 and EL0 to access physical timer and counter. | |
449 | * This is not necessary for VHE, since the host kernel runs in EL2, | |
450 | * and EL0 accesses are configured in the later stage of boot process. | |
451 | * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout | |
452 | * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined | |
453 | * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 | |
454 | * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in | |
455 | * EL2. | |
456 | */ | |
457 | cbnz x2, 1f | |
9703d9d7 CM |
458 | mrs x0, cnthctl_el2 |
459 | orr x0, x0, #3 // Enable EL1 physical timers | |
460 | msr cnthctl_el2, x0 | |
1650ac49 | 461 | 1: |
1f75ff0a | 462 | msr cntvoff_el2, xzr // Clear virtual offset |
9703d9d7 | 463 | |
021f6537 MZ |
464 | #ifdef CONFIG_ARM_GIC_V3 |
465 | /* GICv3 system register access */ | |
466 | mrs x0, id_aa64pfr0_el1 | |
467 | ubfx x0, x0, #24, #4 | |
468 | cmp x0, #1 | |
469 | b.ne 3f | |
470 | ||
0e9884fe | 471 | mrs_s x0, SYS_ICC_SRE_EL2 |
021f6537 MZ |
472 | orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 |
473 | orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 | |
0e9884fe | 474 | msr_s SYS_ICC_SRE_EL2, x0 |
021f6537 | 475 | isb // Make sure SRE is now set |
0e9884fe | 476 | mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, |
d271976d | 477 | tbz x0, #0, 3f // and check that it sticks |
0e9884fe | 478 | msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults |
021f6537 MZ |
479 | |
480 | 3: | |
481 | #endif | |
482 | ||
9703d9d7 CM |
483 | /* Populate ID registers. */ |
484 | mrs x0, midr_el1 | |
485 | mrs x1, mpidr_el1 | |
486 | msr vpidr_el2, x0 | |
487 | msr vmpidr_el2, x1 | |
488 | ||
9703d9d7 CM |
489 | #ifdef CONFIG_COMPAT |
490 | msr hstr_el2, xzr // Disable CP15 traps to EL2 | |
491 | #endif | |
492 | ||
d10bcd47 | 493 | /* EL2 debug */ |
2bf47e19 WD |
494 | mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer |
495 | sbfx x0, x1, #8, #4 | |
f436b2ac LP |
496 | cmp x0, #1 |
497 | b.lt 4f // Skip if no PMU present | |
d10bcd47 WD |
498 | mrs x0, pmcr_el0 // Disable debug access traps |
499 | ubfx x0, x0, #11, #5 // to EL2 and allow access to | |
f436b2ac | 500 | 4: |
2bf47e19 WD |
501 | csel x3, xzr, x0, lt // all PMU counters from EL1 |
502 | ||
503 | /* Statistical profiling */ | |
504 | ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer | |
b0c57e10 WD |
505 | cbz x0, 7f // Skip if SPE not present |
506 | cbnz x2, 6f // VHE? | |
507 | mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, | |
508 | and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) | |
509 | cbnz x4, 5f // then permit sampling of physical | |
510 | mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ | |
511 | 1 << SYS_PMSCR_EL2_PA_SHIFT) | |
512 | msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter | |
513 | 5: | |
2bf47e19 WD |
514 | mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) |
515 | orr x3, x3, x1 // If we don't have VHE, then | |
b0c57e10 WD |
516 | b 7f // use EL1&0 translation. |
517 | 6: // For VHE, use EL2 translation | |
2bf47e19 | 518 | orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 |
b0c57e10 | 519 | 7: |
2bf47e19 | 520 | msr mdcr_el2, x3 // Configure debug traps |
d10bcd47 | 521 | |
7dbfbe5b MZ |
522 | /* Stage-2 translation */ |
523 | msr vttbr_el2, xzr | |
524 | ||
1f364c8c MZ |
525 | cbz x2, install_el2_stub |
526 | ||
23c8a500 | 527 | mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
1f364c8c MZ |
528 | isb |
529 | ret | |
530 | ||
531 | install_el2_stub: | |
d61c97a7 MR |
532 | /* |
533 | * When VHE is not in use, early init of EL2 and EL1 needs to be | |
534 | * done here. | |
535 | * When VHE _is_ in use, EL1 will not be used in the host and | |
536 | * requires no configuration, and all non-hyp-specific EL2 setup | |
537 | * will be done via the _EL1 system register aliases in __cpu_setup. | |
538 | */ | |
539 | /* sctlr_el1 */ | |
540 | mov x0, #0x0800 // Set/clear RES{1,0} bits | |
541 | CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems | |
542 | CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | |
543 | msr sctlr_el1, x0 | |
544 | ||
545 | /* Coprocessor traps. */ | |
546 | mov x0, #0x33ff | |
547 | msr cptr_el2, x0 // Disable copro. traps to EL2 | |
548 | ||
22043a3c DM |
549 | /* SVE register access */ |
550 | mrs x1, id_aa64pfr0_el1 | |
551 | ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 | |
552 | cbz x1, 7f | |
553 | ||
554 | bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps | |
555 | msr cptr_el2, x0 // Disable copro. traps to EL2 | |
556 | isb | |
557 | mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector | |
558 | msr_s SYS_ZCR_EL2, x1 // length for EL1. | |
559 | ||
712c6ff4 | 560 | /* Hypervisor stub */ |
22043a3c | 561 | 7: adr_l x0, __hyp_stub_vectors |
712c6ff4 MZ |
562 | msr vbar_el2, x0 |
563 | ||
9703d9d7 CM |
564 | /* spsr */ |
565 | mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | |
566 | PSR_MODE_EL1h) | |
567 | msr spsr_el2, x0 | |
568 | msr elr_el2, lr | |
23c8a500 | 569 | mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
9703d9d7 CM |
570 | eret |
571 | ENDPROC(el2_setup) | |
572 | ||
828e9834 ML |
573 | /* |
574 | * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed | |
510224c2 | 575 | * in w0. See arch/arm64/include/asm/virt.h for more info. |
828e9834 | 576 | */ |
190c056f | 577 | set_cpu_boot_mode_flag: |
6f4d57fa | 578 | adr_l x1, __boot_cpu_mode |
23c8a500 | 579 | cmp w0, #BOOT_CPU_MODE_EL2 |
828e9834 ML |
580 | b.ne 1f |
581 | add x1, x1, #4 | |
23c8a500 | 582 | 1: str w0, [x1] // This CPU has booted in EL1 |
d0488597 WD |
583 | dmb sy |
584 | dc ivac, x1 // Invalidate potentially stale cache line | |
828e9834 ML |
585 | ret |
586 | ENDPROC(set_cpu_boot_mode_flag) | |
587 | ||
b6113038 JM |
588 | /* |
589 | * These values are written with the MMU off, but read with the MMU on. | |
590 | * Writers will invalidate the corresponding address, discarding up to a | |
591 | * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures | |
592 | * sufficient alignment that the CWG doesn't overlap another section. | |
593 | */ | |
594 | .pushsection ".mmuoff.data.write", "aw" | |
f35a9205 MZ |
595 | /* |
596 | * We need to find out the CPU boot mode long after boot, so we need to | |
597 | * store it in a writable variable. | |
598 | * | |
599 | * This is not in .bss, because we set it sufficiently early that the boot-time | |
600 | * zeroing of .bss would clobber it. | |
601 | */ | |
947bb758 | 602 | ENTRY(__boot_cpu_mode) |
f35a9205 | 603 | .long BOOT_CPU_MODE_EL2 |
424a3838 | 604 | .long BOOT_CPU_MODE_EL1 |
b6113038 JM |
605 | /* |
606 | * The booting CPU updates the failed status @__early_cpu_boot_status, | |
607 | * with MMU turned off. | |
608 | */ | |
609 | ENTRY(__early_cpu_boot_status) | |
610 | .long 0 | |
611 | ||
f35a9205 MZ |
612 | .popsection |
613 | ||
9703d9d7 CM |
614 | /* |
615 | * This provides a "holding pen" for platforms to hold all secondary | |
616 | * cores are held until we're ready for them to initialise. | |
617 | */ | |
618 | ENTRY(secondary_holding_pen) | |
23c8a500 | 619 | bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
828e9834 | 620 | bl set_cpu_boot_mode_flag |
9703d9d7 | 621 | mrs x0, mpidr_el1 |
b03cc885 | 622 | mov_q x1, MPIDR_HWID_BITMASK |
0359b0e2 | 623 | and x0, x0, x1 |
b1c98297 | 624 | adr_l x3, secondary_holding_pen_release |
9703d9d7 CM |
625 | pen: ldr x4, [x3] |
626 | cmp x4, x0 | |
627 | b.eq secondary_startup | |
628 | wfe | |
629 | b pen | |
630 | ENDPROC(secondary_holding_pen) | |
652af899 MR |
631 | |
632 | /* | |
633 | * Secondary entry point that jumps straight into the kernel. Only to | |
634 | * be used where CPUs are brought online dynamically by the kernel. | |
635 | */ | |
636 | ENTRY(secondary_entry) | |
652af899 | 637 | bl el2_setup // Drop to EL1 |
85cc00ea | 638 | bl set_cpu_boot_mode_flag |
652af899 MR |
639 | b secondary_startup |
640 | ENDPROC(secondary_entry) | |
9703d9d7 | 641 | |
190c056f | 642 | secondary_startup: |
9703d9d7 CM |
643 | /* |
644 | * Common entry point for secondary CPUs. | |
645 | */ | |
a591ede4 | 646 | bl __cpu_setup // initialise processor |
9dcf7914 AB |
647 | bl __enable_mmu |
648 | ldr x8, =__secondary_switched | |
649 | br x8 | |
9703d9d7 CM |
650 | ENDPROC(secondary_startup) |
651 | ||
190c056f | 652 | __secondary_switched: |
2bf31a4a AB |
653 | adr_l x5, vectors |
654 | msr vbar_el1, x5 | |
655 | isb | |
656 | ||
bb905274 | 657 | adr_l x0, secondary_data |
c02433dd MR |
658 | ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack |
659 | mov sp, x1 | |
660 | ldr x2, [x0, #CPU_BOOT_TASK] | |
661 | msr sp_el0, x2 | |
9703d9d7 | 662 | mov x29, #0 |
73267498 | 663 | mov x30, #0 |
9703d9d7 CM |
664 | b secondary_start_kernel |
665 | ENDPROC(__secondary_switched) | |
9703d9d7 | 666 | |
bb905274 SP |
667 | /* |
668 | * The booting CPU updates the failed status @__early_cpu_boot_status, | |
669 | * with MMU turned off. | |
670 | * | |
671 | * update_early_cpu_boot_status tmp, status | |
672 | * - Corrupts tmp1, tmp2 | |
673 | * - Writes 'status' to __early_cpu_boot_status and makes sure | |
674 | * it is committed to memory. | |
675 | */ | |
676 | ||
677 | .macro update_early_cpu_boot_status status, tmp1, tmp2 | |
678 | mov \tmp2, #\status | |
adb49070 AB |
679 | adr_l \tmp1, __early_cpu_boot_status |
680 | str \tmp2, [\tmp1] | |
bb905274 SP |
681 | dmb sy |
682 | dc ivac, \tmp1 // Invalidate potentially stale cache line | |
683 | .endm | |
684 | ||
9703d9d7 | 685 | /* |
8b0a9575 | 686 | * Enable the MMU. |
9703d9d7 | 687 | * |
8b0a9575 | 688 | * x0 = SCTLR_EL1 value for turning on the MMU. |
8b0a9575 | 689 | * |
9dcf7914 AB |
690 | * Returns to the caller via x30/lr. This requires the caller to be covered |
691 | * by the .idmap.text section. | |
4bf8b96e SP |
692 | * |
693 | * Checks if the selected granule size is supported by the CPU. | |
694 | * If it isn't, park the CPU | |
9703d9d7 | 695 | */ |
cabe1c81 | 696 | ENTRY(__enable_mmu) |
4bf8b96e SP |
697 | mrs x1, ID_AA64MMFR0_EL1 |
698 | ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 | |
699 | cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED | |
700 | b.ne __no_granule_support | |
bb905274 | 701 | update_early_cpu_boot_status 0, x1, x2 |
aea73abb AB |
702 | adrp x1, idmap_pg_dir |
703 | adrp x2, swapper_pg_dir | |
529c4b05 KM |
704 | phys_to_ttbr x1, x3 |
705 | phys_to_ttbr x2, x4 | |
706 | msr ttbr0_el1, x3 // load TTBR0 | |
707 | msr ttbr1_el1, x4 // load TTBR1 | |
9703d9d7 | 708 | isb |
9703d9d7 CM |
709 | msr sctlr_el1, x0 |
710 | isb | |
8ec41987 WD |
711 | /* |
712 | * Invalidate the local I-cache so that any instructions fetched | |
713 | * speculatively from the PoC are discarded, since they may have | |
714 | * been dynamically patched at the PoU. | |
715 | */ | |
716 | ic iallu | |
717 | dsb nsh | |
718 | isb | |
9dcf7914 | 719 | ret |
8b0a9575 | 720 | ENDPROC(__enable_mmu) |
4bf8b96e SP |
721 | |
722 | __no_granule_support: | |
bb905274 SP |
723 | /* Indicate that this CPU can't boot and is stuck in the kernel */ |
724 | update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 | |
725 | 1: | |
4bf8b96e | 726 | wfe |
bb905274 | 727 | wfi |
3c5e9f23 | 728 | b 1b |
4bf8b96e | 729 | ENDPROC(__no_granule_support) |
e5ebeec8 | 730 | |
0cd3defe | 731 | #ifdef CONFIG_RELOCATABLE |
3c5e9f23 | 732 | __relocate_kernel: |
0cd3defe AB |
733 | /* |
734 | * Iterate over each entry in the relocation table, and apply the | |
735 | * relocations in place. | |
736 | */ | |
0cd3defe AB |
737 | ldr w9, =__rela_offset // offset to reloc table |
738 | ldr w10, =__rela_size // size of reloc table | |
739 | ||
b03cc885 | 740 | mov_q x11, KIMAGE_VADDR // default virtual offset |
0cd3defe | 741 | add x11, x11, x23 // actual virtual offset |
0cd3defe AB |
742 | add x9, x9, x11 // __va(.rela) |
743 | add x10, x9, x10 // __va(.rela) + sizeof(.rela) | |
744 | ||
745 | 0: cmp x9, x10 | |
08cc55b2 | 746 | b.hs 1f |
0cd3defe AB |
747 | ldp x11, x12, [x9], #24 |
748 | ldr x13, [x9, #-8] | |
749 | cmp w12, #R_AARCH64_RELATIVE | |
08cc55b2 | 750 | b.ne 0b |
0cd3defe AB |
751 | add x13, x13, x23 // relocate |
752 | str x13, [x11, x23] | |
753 | b 0b | |
3c5e9f23 AB |
754 | 1: ret |
755 | ENDPROC(__relocate_kernel) | |
756 | #endif | |
0cd3defe | 757 | |
3c5e9f23 AB |
758 | __primary_switch: |
759 | #ifdef CONFIG_RANDOMIZE_BASE | |
760 | mov x19, x0 // preserve new SCTLR_EL1 value | |
761 | mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value | |
762 | #endif | |
763 | ||
9dcf7914 | 764 | bl __enable_mmu |
3c5e9f23 AB |
765 | #ifdef CONFIG_RELOCATABLE |
766 | bl __relocate_kernel | |
767 | #ifdef CONFIG_RANDOMIZE_BASE | |
768 | ldr x8, =__primary_switched | |
b929fe32 | 769 | adrp x0, __PHYS_OFFSET |
3c5e9f23 AB |
770 | blr x8 |
771 | ||
772 | /* | |
773 | * If we return here, we have a KASLR displacement in x23 which we need | |
774 | * to take into account by discarding the current kernel mapping and | |
775 | * creating a new one. | |
776 | */ | |
777 | msr sctlr_el1, x20 // disable the MMU | |
778 | isb | |
779 | bl __create_page_tables // recreate kernel mapping | |
780 | ||
781 | tlbi vmalle1 // Remove any stale TLB entries | |
782 | dsb nsh | |
783 | ||
784 | msr sctlr_el1, x19 // re-enable the MMU | |
785 | isb | |
786 | ic iallu // flush instructions fetched | |
787 | dsb nsh // via old mapping | |
788 | isb | |
789 | ||
790 | bl __relocate_kernel | |
791 | #endif | |
0cd3defe AB |
792 | #endif |
793 | ldr x8, =__primary_switched | |
b929fe32 | 794 | adrp x0, __PHYS_OFFSET |
0cd3defe AB |
795 | br x8 |
796 | ENDPROC(__primary_switch) |