Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f84d0275 MS |
2 | /* |
3 | * Extensible Firmware Interface | |
4 | * | |
5 | * Based on Extensible Firmware Interface Specification version 2.4 | |
6 | * | |
7 | * Copyright (C) 2013, 2014 Linaro Ltd. | |
f84d0275 MS |
8 | */ |
9 | ||
10 | #include <linux/efi.h> | |
e5bc22a4 | 11 | #include <linux/init.h> |
46e27b99 | 12 | #include <linux/kmemleak.h> |
8b0d1354 | 13 | #include <linux/screen_info.h> |
0069455b | 14 | #include <linux/vmalloc.h> |
f84d0275 | 15 | |
f84d0275 | 16 | #include <asm/efi.h> |
8a9a1a18 | 17 | #include <asm/stacktrace.h> |
ef8923e6 | 18 | #include <asm/vmap_stack.h> |
d1ae8c00 | 19 | |
9b9eaee9 AB |
20 | static bool region_is_misaligned(const efi_memory_desc_t *md) |
21 | { | |
22 | if (PAGE_SIZE == EFI_PAGE_SIZE) | |
23 | return false; | |
24 | return !PAGE_ALIGNED(md->phys_addr) || | |
25 | !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT); | |
26 | } | |
27 | ||
1fd55a9a AB |
28 | /* |
29 | * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be | |
30 | * executable, everything else can be mapped with the XN bits | |
31 | * set. Also take the new (optional) RO/XP bits into account. | |
32 | */ | |
dbb9c166 | 33 | static __init ptdesc_t create_mapping_protection(efi_memory_desc_t *md) |
f7d92489 | 34 | { |
1fd55a9a AB |
35 | u64 attr = md->attribute; |
36 | u32 type = md->type; | |
f7d92489 | 37 | |
491db21d SP |
38 | if (type == EFI_MEMORY_MAPPED_IO) { |
39 | pgprot_t prot = __pgprot(PROT_DEVICE_nGnRE); | |
40 | ||
41 | if (arm64_is_protected_mmio(md->phys_addr, | |
42 | md->num_pages << EFI_PAGE_SHIFT)) | |
43 | prot = pgprot_encrypted(prot); | |
44 | else | |
45 | prot = pgprot_decrypted(prot); | |
46 | return pgprot_val(prot); | |
47 | } | |
1fd55a9a | 48 | |
9b9eaee9 AB |
49 | if (region_is_misaligned(md)) { |
50 | static bool __initdata code_is_misaligned; | |
51 | ||
1fd55a9a | 52 | /* |
9b9eaee9 AB |
53 | * Regions that are not aligned to the OS page size cannot be |
54 | * mapped with strict permissions, as those might interfere | |
55 | * with the permissions that are needed by the adjacent | |
56 | * region's mapping. However, if we haven't encountered any | |
57 | * misaligned runtime code regions so far, we can safely use | |
58 | * non-executable permissions for non-code regions. | |
1fd55a9a | 59 | */ |
9b9eaee9 AB |
60 | code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE); |
61 | ||
62 | return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC) | |
63 | : pgprot_val(PAGE_KERNEL); | |
64 | } | |
1fd55a9a AB |
65 | |
66 | /* R-- */ | |
67 | if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == | |
68 | (EFI_MEMORY_XP | EFI_MEMORY_RO)) | |
69 | return pgprot_val(PAGE_KERNEL_RO); | |
70 | ||
71 | /* R-X */ | |
72 | if (attr & EFI_MEMORY_RO) | |
73 | return pgprot_val(PAGE_KERNEL_ROX); | |
74 | ||
75 | /* RW- */ | |
1e9de1d2 AB |
76 | if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) == |
77 | EFI_MEMORY_XP) || | |
78 | type != EFI_RUNTIME_SERVICES_CODE) | |
1fd55a9a AB |
79 | return pgprot_val(PAGE_KERNEL); |
80 | ||
81 | /* RWX */ | |
82 | return pgprot_val(PAGE_KERNEL_EXEC); | |
83 | } | |
84 | ||
85 | int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) | |
86 | { | |
dbb9c166 | 87 | ptdesc_t prot_val = create_mapping_protection(md); |
f14c66ce AB |
88 | bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || |
89 | md->type == EFI_RUNTIME_SERVICES_DATA); | |
f7d92489 | 90 | |
9b9eaee9 AB |
91 | /* |
92 | * If this region is not aligned to the page size used by the OS, the | |
93 | * mapping will be rounded outwards, and may end up sharing a page | |
94 | * frame with an adjacent runtime memory region. Given that the page | |
95 | * table descriptor covering the shared page will be rewritten when the | |
96 | * adjacent region gets mapped, we must avoid block mappings here so we | |
97 | * don't have to worry about splitting them when that happens. | |
98 | */ | |
99 | if (region_is_misaligned(md)) | |
f14c66ce | 100 | page_mappings_only = true; |
74c102c9 | 101 | |
f7d92489 AB |
102 | create_pgd_mapping(mm, md->phys_addr, md->virt_addr, |
103 | md->num_pages << EFI_PAGE_SHIFT, | |
f14c66ce | 104 | __pgprot(prot_val | PTE_NG), page_mappings_only); |
f7d92489 AB |
105 | return 0; |
106 | } | |
107 | ||
1d959312 AB |
108 | struct set_perm_data { |
109 | const efi_memory_desc_t *md; | |
110 | bool has_bti; | |
111 | }; | |
112 | ||
8b1e0f81 | 113 | static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) |
bd264d04 | 114 | { |
1d959312 AB |
115 | struct set_perm_data *spd = data; |
116 | const efi_memory_desc_t *md = spd->md; | |
5a00bfd6 | 117 | pte_t pte = __ptep_get(ptep); |
bd264d04 AB |
118 | |
119 | if (md->attribute & EFI_MEMORY_RO) | |
120 | pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
121 | if (md->attribute & EFI_MEMORY_XP) | |
122 | pte = set_pte_bit(pte, __pgprot(PTE_PXN)); | |
bbbb6577 | 123 | else if (system_supports_bti_kernel() && spd->has_bti) |
1d959312 | 124 | pte = set_pte_bit(pte, __pgprot(PTE_GP)); |
5a00bfd6 | 125 | __set_pte(ptep, pte); |
bd264d04 AB |
126 | return 0; |
127 | } | |
128 | ||
129 | int __init efi_set_mapping_permissions(struct mm_struct *mm, | |
cf1d2ffc AB |
130 | efi_memory_desc_t *md, |
131 | bool has_bti) | |
bd264d04 | 132 | { |
1d959312 AB |
133 | struct set_perm_data data = { md, has_bti }; |
134 | ||
bd264d04 AB |
135 | BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && |
136 | md->type != EFI_RUNTIME_SERVICES_DATA); | |
137 | ||
9b9eaee9 AB |
138 | if (region_is_misaligned(md)) |
139 | return 0; | |
140 | ||
bd264d04 AB |
141 | /* |
142 | * Calling apply_to_page_range() is only safe on regions that are | |
143 | * guaranteed to be mapped down to pages. Since we are only called | |
144 | * for regions that have been mapped using efi_create_mapping() above | |
145 | * (and this is checked by the generic Memory Attributes table parsing | |
146 | * routines), there is no need to check that again here. | |
147 | */ | |
148 | return apply_to_page_range(mm, md->virt_addr, | |
149 | md->num_pages << EFI_PAGE_SHIFT, | |
1d959312 | 150 | set_permissions, &data); |
bd264d04 AB |
151 | } |
152 | ||
60c0d45a AB |
153 | /* |
154 | * UpdateCapsule() depends on the system being shutdown via | |
155 | * ResetSystem(). | |
156 | */ | |
157 | bool efi_poweroff_required(void) | |
158 | { | |
159 | return efi_enabled(EFI_RUNTIME_SERVICES); | |
160 | } | |
7e611e7d AB |
161 | |
162 | asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) | |
163 | { | |
164 | pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f); | |
165 | return s; | |
166 | } | |
ff7a1679 | 167 | |
c37ce235 AB |
168 | static DEFINE_RAW_SPINLOCK(efi_rt_lock); |
169 | ||
170 | void arch_efi_call_virt_setup(void) | |
171 | { | |
172 | efi_virtmap_load(); | |
c37ce235 | 173 | raw_spin_lock(&efi_rt_lock); |
e04796c8 | 174 | __efi_fpsimd_begin(); |
c37ce235 AB |
175 | } |
176 | ||
177 | void arch_efi_call_virt_teardown(void) | |
178 | { | |
c37ce235 | 179 | __efi_fpsimd_end(); |
e04796c8 | 180 | raw_spin_unlock(&efi_rt_lock); |
c37ce235 AB |
181 | efi_virtmap_unload(); |
182 | } | |
ff7a1679 AB |
183 | |
184 | asmlinkage u64 *efi_rt_stack_top __ro_after_init; | |
185 | ||
e8dfdf31 AB |
186 | asmlinkage efi_status_t __efi_rt_asm_recover(void); |
187 | ||
188 | bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) | |
189 | { | |
190 | /* Check whether the exception occurred while running the firmware */ | |
8a9a1a18 | 191 | if (!current_in_efi() || regs->pc >= TASK_SIZE_64) |
e8dfdf31 AB |
192 | return false; |
193 | ||
194 | pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg); | |
195 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | |
196 | clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); | |
197 | ||
198 | regs->regs[0] = EFI_ABORTED; | |
199 | regs->regs[30] = efi_rt_stack_top[-1]; | |
200 | regs->pc = (u64)__efi_rt_asm_recover; | |
201 | ||
202 | if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) | |
203 | regs->regs[18] = efi_rt_stack_top[-2]; | |
204 | ||
205 | return true; | |
206 | } | |
207 | ||
ff7a1679 AB |
208 | /* EFI requires 8 KiB of stack space for runtime services */ |
209 | static_assert(THREAD_SIZE >= SZ_8K); | |
210 | ||
211 | static int __init arm64_efi_rt_init(void) | |
212 | { | |
213 | void *p; | |
214 | ||
215 | if (!efi_enabled(EFI_RUNTIME_SERVICES)) | |
216 | return 0; | |
217 | ||
ef8923e6 BL |
218 | if (!IS_ENABLED(CONFIG_VMAP_STACK)) { |
219 | clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); | |
220 | return -ENOMEM; | |
221 | } | |
222 | ||
223 | p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE); | |
224 | if (!p) { | |
ff7a1679 AB |
225 | pr_warn("Failed to allocate EFI runtime stack\n"); |
226 | clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); | |
227 | return -ENOMEM; | |
228 | } | |
229 | ||
46e27b99 | 230 | kmemleak_not_leak(p); |
ff7a1679 AB |
231 | efi_rt_stack_top = p + THREAD_SIZE; |
232 | return 0; | |
233 | } | |
234 | core_initcall(arm64_efi_rt_init); |