1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/mm/proc.S
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 * Author: Catalin Marinas <catalin.marinas@arm.com>
10 #include <linux/init.h>
11 #include <linux/linkage.h>
12 #include <linux/pgtable.h>
13 #include <linux/cfi_types.h>
14 #include <asm/assembler.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/asm_pointer_auth.h>
17 #include <asm/hwcap.h>
18 #include <asm/kernel-pgtable.h>
19 #include <asm/pgtable-hwdef.h>
20 #include <asm/cpufeature.h>
21 #include <asm/alternative.h>
23 #include <asm/sysreg.h>
25 #ifdef CONFIG_ARM64_64K_PAGES
26 #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
27 #elif defined(CONFIG_ARM64_16K_PAGES)
28 #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K
29 #else /* CONFIG_ARM64_4K_PAGES */
30 #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
33 #ifdef CONFIG_RANDOMIZE_BASE
34 #define TCR_KASLR_FLAGS TCR_NFD1
36 #define TCR_KASLR_FLAGS 0
39 #define TCR_SMP_FLAGS TCR_SHARED
41 /* PTWs cacheable, inner/outer WBWA */
42 #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
44 #ifdef CONFIG_KASAN_SW_TAGS
45 #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
47 #define TCR_KASAN_SW_FLAGS 0
50 #ifdef CONFIG_KASAN_HW_TAGS
51 #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
52 #elif defined(CONFIG_ARM64_MTE)
54 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
55 * TBI being enabled at EL1.
57 #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
59 #define TCR_MTE_FLAGS 0
63 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
64 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
66 #define MAIR_EL1_SET \
67 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
68 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
69 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
70 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
71 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
75 * cpu_do_suspend - save CPU registers context
77 * x0: virtual address of context pointer
79 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
81 SYM_FUNC_START(cpu_do_suspend)
84 mrs x4, contextidr_el1
92 get_this_cpu_offset x12
98 stp x10, x11, [x0, #64]
99 stp x12, x13, [x0, #80]
101 * Save x18 as it may be used as a platform register, e.g. by shadow
106 SYM_FUNC_END(cpu_do_suspend)
109 * cpu_do_resume - restore CPU register context
111 * x0: Address of context pointer
113 SYM_FUNC_START(cpu_do_resume)
115 ldp x4, x5, [x0, #16]
116 ldp x6, x8, [x0, #32]
117 ldp x9, x10, [x0, #48]
118 ldp x11, x12, [x0, #64]
119 ldp x13, x14, [x0, #80]
121 * Restore x18, as it may be used as a platform register, and clear
122 * the buffer to minimize the risk of exposure when used for shadow
129 msr contextidr_el1, x4
132 /* Don't change t0sz here, mask those bits when restoring */
134 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
140 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
141 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
142 * exception. Mask them until local_daif_restore() in cpu_suspend()
149 set_this_cpu_offset x13
152 * Restore oslsr_el1 by writing oslar_el1
155 ubfx x11, x11, #1, #1
157 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
158 reset_amuserenr_el0 x0 // Disable AMU access from EL0
160 alternative_if ARM64_HAS_RAS_EXTN
161 msr_s SYS_DISR_EL1, xzr
162 alternative_else_nop_endif
164 ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
167 SYM_FUNC_END(cpu_do_resume)
170 .pushsection ".idmap.text", "a"
172 .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
173 adrp \tmp1, reserved_pg_dir
174 phys_to_ttbr \tmp2, \tmp1
175 offset_ttbr1 \tmp2, \tmp1
184 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
186 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
187 * called by anything else. It can only be executed from a TTBR0 mapping.
189 SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
190 __idmap_cpu_set_reserved_ttbr1 x1, x3
197 SYM_FUNC_END(idmap_cpu_replace_ttbr1)
200 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
202 #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS | PTE_WRITE)
204 .pushsection ".idmap.text", "a"
206 .macro kpti_mk_tbl_ng, type, num_entries
207 add end_\type\()p, cur_\type\()p, #\num_entries * 8
209 ldr \type, [cur_\type\()p] // Load the entry
210 tbz \type, #0, .Lnext_\type // Skip invalid and
211 tbnz \type, #11, .Lnext_\type // non-global entries
212 orr \type, \type, #PTE_NG // Same bit for blocks and pages
213 str \type, [cur_\type\()p] // Update the entry
215 tbnz \type, #1, .Lderef_\type
218 add cur_\type\()p, cur_\type\()p, #8
219 cmp cur_\type\()p, end_\type\()p
224 * Dereference the current table entry and map it into the temporary
225 * fixmap slot associated with the current level.
227 .macro kpti_map_pgtbl, type, level
228 str xzr, [temp_pte, #8 * (\level + 1)] // break before make
230 add pte, temp_pte, #PAGE_SIZE * (\level + 1)
236 phys_to_pte pte, cur_\type\()p
237 add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
238 orr pte, pte, pte_flags
239 str pte, [temp_pte, #8 * (\level + 1)]
244 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
245 * unsigned long temp_pte_va)
247 * Called exactly once from stop_machine context by each CPU found during boot.
249 .pushsection ".data", "aw", %progbits
250 SYM_DATA(__idmap_kpti_flag, .long 1)
253 SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
258 temp_pgd_phys .req x2
273 mov x5, x3 // preserve temp_pte arg
274 mrs swapper_ttb, ttbr1_el1
275 adr_l flag_ptr, __idmap_kpti_flag
277 cbnz cpu, __idmap_kpti_secondary
279 /* We're the boot CPU. Wait for the others to catch up */
282 ldaxr w17, [flag_ptr]
283 eor w17, w17, num_cpus
286 /* Switch to the temporary page tables on this CPU only */
287 __idmap_cpu_set_reserved_ttbr1 x8, x9
288 offset_ttbr1 temp_pgd_phys, x8
289 msr ttbr1_el1, temp_pgd_phys
293 mov_q pte_flags, KPTI_NG_PTE_FLAGS
295 /* Everybody is enjoying the idmap, so we can rewrite swapper. */
297 adrp cur_pgdp, swapper_pg_dir
298 kpti_map_pgtbl pgd, 0
299 kpti_mk_tbl_ng pgd, PTRS_PER_PGD
301 /* Ensure all the updated entries are visible to secondary CPUs */
304 /* We're done: fire up swapper_pg_dir again */
305 __idmap_cpu_set_reserved_ttbr1 x8, x9
306 msr ttbr1_el1, swapper_ttb
309 /* Set the flag to zero to indicate that we're all done */
315 .if CONFIG_PGTABLE_LEVELS > 3
317 pte_to_phys cur_pudp, pgd
318 kpti_map_pgtbl pud, 1
319 kpti_mk_tbl_ng pud, PTRS_PER_PUD
321 .else /* CONFIG_PGTABLE_LEVELS <= 3 */
323 .set .Lnext_pud, .Lnext_pgd
328 .if CONFIG_PGTABLE_LEVELS > 2
330 pte_to_phys cur_pmdp, pud
331 kpti_map_pgtbl pmd, 2
332 kpti_mk_tbl_ng pmd, PTRS_PER_PMD
334 .else /* CONFIG_PGTABLE_LEVELS <= 2 */
336 .set .Lnext_pmd, .Lnext_pgd
341 pte_to_phys cur_ptep, pmd
342 kpti_map_pgtbl pte, 3
343 kpti_mk_tbl_ng pte, PTRS_PER_PTE
365 /* Secondary CPUs end up here */
366 __idmap_kpti_secondary:
367 /* Uninstall swapper before surgery begins */
368 __idmap_cpu_set_reserved_ttbr1 x16, x17
370 /* Increment the flag to let the boot CPU we're ready */
371 1: ldxr w16, [flag_ptr]
373 stxr w17, w16, [flag_ptr]
376 /* Wait for the boot CPU to finish messing around with swapper */
382 /* All done, act like nothing happened */
383 msr ttbr1_el1, swapper_ttb
389 SYM_FUNC_END(idmap_kpti_install_ng_mappings)
396 * Initialise the processor for turning the MMU on.
399 * x0 - actual number of VA bits (ignored unless VA_BITS > 48)
401 * Return in x0 the value of the SCTLR_EL1 register.
403 .pushsection ".idmap.text", "a"
404 SYM_FUNC_START(__cpu_setup)
405 tlbi vmalle1 // Invalidate local TLB
408 msr cpacr_el1, xzr // Reset cpacr_el1
409 mov x1, #1 << 12 // Reset mdscr_el1 and disable
410 msr mdscr_el1, x1 // access to the DCC from EL0
411 isb // Unmask debug exceptions now,
412 enable_dbg // since this is per-cpu
413 reset_pmuserenr_el0 x1 // Disable PMU access from EL0
414 reset_amuserenr_el0 x1 // Disable AMU access from EL0
417 * Default values for VMSA control registers. These will be adjusted
418 * below depending on detected CPU features.
422 mov_q mair, MAIR_EL1_SET
423 mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
424 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
425 TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
427 tcr_clear_errata_bits tcr, x9, x5
429 #ifdef CONFIG_ARM64_VA_BITS_52
439 * Set the IPS bits in TCR_EL1.
441 tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
442 #ifdef CONFIG_ARM64_HW_AFDBM
444 * Enable hardware update of the Access Flags bit.
445 * Hardware dirty bit management is enabled later,
448 mrs x9, ID_AA64MMFR1_EL1
449 and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK
451 orr tcr, tcr, #TCR_HA // hardware Access flag update
453 #endif /* CONFIG_ARM64_HW_AFDBM */
457 mrs_s x1, SYS_ID_AA64MMFR3_EL1
458 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
459 cbz x1, .Lskip_indirection
462 msr REG_PIRE0_EL1, x0
466 mov x0, TCR2_EL1x_PIE
474 mov_q x0, INIT_SCTLR_EL1_MMU_ON
475 ret // return to head.S
479 SYM_FUNC_END(__cpu_setup)