Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_MMU_CONTEXT_H |
3 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
4 | |
5 | #include <asm/desc.h> | |
60063497 | 6 | #include <linux/atomic.h> |
d17d8f9d | 7 | #include <linux/mm_types.h> |
7d06d9c9 | 8 | #include <linux/pkeys.h> |
d17d8f9d DH |
9 | |
10 | #include <trace/events/tlb.h> | |
11 | ||
c3c2fee3 JF |
12 | #include <asm/pgalloc.h> |
13 | #include <asm/tlbflush.h> | |
14 | #include <asm/paravirt.h> | |
fe3d197f | 15 | #include <asm/mpx.h> |
d97080eb | 16 | #include <asm/debugreg.h> |
f39681ed AL |
17 | |
18 | extern atomic64_t last_mm_ctx_id; | |
19 | ||
fdc0269e | 20 | #ifndef CONFIG_PARAVIRT_XXL |
c3c2fee3 JF |
21 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
22 | struct mm_struct *next) | |
23 | { | |
24 | } | |
fdc0269e | 25 | #endif /* !CONFIG_PARAVIRT_XXL */ |
c3c2fee3 | 26 | |
7911d3f7 | 27 | #ifdef CONFIG_PERF_EVENTS |
631fe154 DB |
28 | |
29 | DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); | |
a6673429 | 30 | |
7911d3f7 AL |
31 | static inline void load_mm_cr4(struct mm_struct *mm) |
32 | { | |
631fe154 | 33 | if (static_branch_unlikely(&rdpmc_always_available_key) || |
a6673429 | 34 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
7911d3f7 AL |
35 | cr4_set_bits(X86_CR4_PCE); |
36 | else | |
37 | cr4_clear_bits(X86_CR4_PCE); | |
38 | } | |
39 | #else | |
40 | static inline void load_mm_cr4(struct mm_struct *mm) {} | |
41 | #endif | |
42 | ||
a5b9e5a2 | 43 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
44 | /* |
45 | * ldt_structs can be allocated, used, and freed, but they are never | |
46 | * modified while live. | |
47 | */ | |
48 | struct ldt_struct { | |
49 | /* | |
50 | * Xen requires page-aligned LDTs with special permissions. This is | |
51 | * needed to prevent us from installing evil descriptors such as | |
52 | * call gates. On native, we could merge the ldt_struct and LDT | |
53 | * allocations, but it's not worth trying to optimize. | |
54 | */ | |
f55f0501 AL |
55 | struct desc_struct *entries; |
56 | unsigned int nr_entries; | |
57 | ||
58 | /* | |
59 | * If PTI is in use, then the entries array is not mapped while we're | |
60 | * in user mode. The whole array will be aliased at the addressed | |
61 | * given by ldt_slot_va(slot). We use two slots so that we can allocate | |
62 | * and map, and enable a new LDT without invalidating the mapping | |
63 | * of an older, still-in-use LDT. | |
64 | * | |
65 | * slot will be -1 if this LDT doesn't have an alias mapping. | |
66 | */ | |
67 | int slot; | |
37868fe1 AL |
68 | }; |
69 | ||
f55f0501 AL |
70 | /* This is a multiple of PAGE_SIZE. */ |
71 | #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) | |
72 | ||
73 | static inline void *ldt_slot_va(int slot) | |
74 | { | |
f55f0501 | 75 | return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); |
f55f0501 AL |
76 | } |
77 | ||
a5b9e5a2 AL |
78 | /* |
79 | * Used for LDT copy/destruction. | |
80 | */ | |
a4828f81 TG |
81 | static inline void init_new_context_ldt(struct mm_struct *mm) |
82 | { | |
83 | mm->context.ldt = NULL; | |
84 | init_rwsem(&mm->context.ldt_usr_sem); | |
85 | } | |
86 | int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); | |
39a0526f | 87 | void destroy_context_ldt(struct mm_struct *mm); |
f55f0501 | 88 | void ldt_arch_exit_mmap(struct mm_struct *mm); |
a5b9e5a2 | 89 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
a4828f81 TG |
90 | static inline void init_new_context_ldt(struct mm_struct *mm) { } |
91 | static inline int ldt_dup_context(struct mm_struct *oldmm, | |
92 | struct mm_struct *mm) | |
a5b9e5a2 AL |
93 | { |
94 | return 0; | |
95 | } | |
f55f0501 AL |
96 | static inline void destroy_context_ldt(struct mm_struct *mm) { } |
97 | static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } | |
a5b9e5a2 AL |
98 | #endif |
99 | ||
37868fe1 AL |
100 | static inline void load_mm_ldt(struct mm_struct *mm) |
101 | { | |
a5b9e5a2 | 102 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
103 | struct ldt_struct *ldt; |
104 | ||
3382290e WD |
105 | /* READ_ONCE synchronizes with smp_store_release */ |
106 | ldt = READ_ONCE(mm->context.ldt); | |
37868fe1 AL |
107 | |
108 | /* | |
109 | * Any change to mm->context.ldt is followed by an IPI to all | |
110 | * CPUs with the mm active. The LDT will not be freed until | |
111 | * after the IPI is handled by all such CPUs. This means that, | |
112 | * if the ldt_struct changes before we return, the values we see | |
113 | * will be safe, and the new values will be loaded before we run | |
114 | * any user code. | |
115 | * | |
116 | * NB: don't try to convert this to use RCU without extreme care. | |
117 | * We would still need IRQs off, because we don't want to change | |
118 | * the local LDT after an IPI loaded a newer value than the one | |
119 | * that we can see. | |
120 | */ | |
121 | ||
f55f0501 AL |
122 | if (unlikely(ldt)) { |
123 | if (static_cpu_has(X86_FEATURE_PTI)) { | |
124 | if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { | |
125 | /* | |
126 | * Whoops -- either the new LDT isn't mapped | |
127 | * (if slot == -1) or is mapped into a bogus | |
128 | * slot (if slot > 1). | |
129 | */ | |
130 | clear_LDT(); | |
131 | return; | |
132 | } | |
133 | ||
134 | /* | |
135 | * If page table isolation is enabled, ldt->entries | |
136 | * will not be mapped in the userspace pagetables. | |
137 | * Tell the CPU to access the LDT through the alias | |
138 | * at ldt_slot_va(ldt->slot). | |
139 | */ | |
140 | set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); | |
141 | } else { | |
142 | set_ldt(ldt->entries, ldt->nr_entries); | |
143 | } | |
144 | } else { | |
37868fe1 | 145 | clear_LDT(); |
f55f0501 | 146 | } |
a5b9e5a2 AL |
147 | #else |
148 | clear_LDT(); | |
149 | #endif | |
73534258 AL |
150 | } |
151 | ||
152 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | |
153 | { | |
154 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
155 | /* | |
156 | * Load the LDT if either the old or new mm had an LDT. | |
157 | * | |
158 | * An mm will never go from having an LDT to not having an LDT. Two | |
159 | * mms never share an LDT, so we don't gain anything by checking to | |
160 | * see whether the LDT changed. There's also no guarantee that | |
161 | * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, | |
162 | * then prev->context.ldt will also be non-NULL. | |
163 | * | |
164 | * If we really cared, we could optimize the case where prev == next | |
165 | * and we're exiting lazy mode. Most of the time, if this happens, | |
166 | * we don't actually need to reload LDTR, but modify_ldt() is mostly | |
167 | * used by legacy code and emulators where we don't need this level of | |
168 | * performance. | |
169 | * | |
170 | * This uses | instead of || because it generates better code. | |
171 | */ | |
172 | if (unlikely((unsigned long)prev->context.ldt | | |
173 | (unsigned long)next->context.ldt)) | |
174 | load_mm_ldt(next); | |
175 | #endif | |
37868fe1 AL |
176 | |
177 | DEBUG_LOCKS_WARN_ON(preemptible()); | |
178 | } | |
179 | ||
b956575b | 180 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
6826c8ff | 181 | |
a31e184e DH |
182 | /* |
183 | * Init a new mm. Used on mm copies, like at fork() | |
184 | * and on mm's that are brand-new, like at execve(). | |
185 | */ | |
39a0526f DH |
186 | static inline int init_new_context(struct task_struct *tsk, |
187 | struct mm_struct *mm) | |
188 | { | |
c2b3496b PZ |
189 | mutex_init(&mm->context.lock); |
190 | ||
f39681ed AL |
191 | mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); |
192 | atomic64_set(&mm->context.tlb_gen, 0); | |
193 | ||
a4828f81 | 194 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
e8c24d3a | 195 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { |
2fa9d1cf | 196 | /* pkey 0 is the default and allocated implicitly */ |
e8c24d3a DH |
197 | mm->context.pkey_allocation_map = 0x1; |
198 | /* -1 means unallocated or invalid */ | |
199 | mm->context.execute_only_pkey = -1; | |
200 | } | |
a4828f81 TG |
201 | #endif |
202 | init_new_context_ldt(mm); | |
203 | return 0; | |
39a0526f DH |
204 | } |
205 | static inline void destroy_context(struct mm_struct *mm) | |
206 | { | |
207 | destroy_context_ldt(mm); | |
208 | } | |
209 | ||
69c0319a AL |
210 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
211 | struct task_struct *tsk); | |
6826c8ff | 212 | |
078194f8 AL |
213 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
214 | struct task_struct *tsk); | |
215 | #define switch_mm_irqs_off switch_mm_irqs_off | |
c3c2fee3 JF |
216 | |
217 | #define activate_mm(prev, next) \ | |
218 | do { \ | |
219 | paravirt_activate_mm((prev), (next)); \ | |
220 | switch_mm((prev), (next), NULL); \ | |
221 | } while (0); | |
222 | ||
6826c8ff BG |
223 | #ifdef CONFIG_X86_32 |
224 | #define deactivate_mm(tsk, mm) \ | |
225 | do { \ | |
ccbeed3a | 226 | lazy_load_gs(0); \ |
6826c8ff BG |
227 | } while (0) |
228 | #else | |
229 | #define deactivate_mm(tsk, mm) \ | |
230 | do { \ | |
231 | load_gs_index(0); \ | |
232 | loadsegment(fs, 0); \ | |
233 | } while (0) | |
234 | #endif | |
c3c2fee3 | 235 | |
a31e184e DH |
236 | static inline void arch_dup_pkeys(struct mm_struct *oldmm, |
237 | struct mm_struct *mm) | |
238 | { | |
239 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | |
240 | if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) | |
241 | return; | |
242 | ||
243 | /* Duplicate the oldmm pkey state in mm: */ | |
244 | mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; | |
245 | mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; | |
246 | #endif | |
247 | } | |
248 | ||
c10e83f5 | 249 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
a1ea1c03 | 250 | { |
a31e184e | 251 | arch_dup_pkeys(oldmm, mm); |
a1ea1c03 | 252 | paravirt_arch_dup_mmap(oldmm, mm); |
a4828f81 | 253 | return ldt_dup_context(oldmm, mm); |
a1ea1c03 DH |
254 | } |
255 | ||
256 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
257 | { | |
258 | paravirt_arch_exit_mmap(mm); | |
f55f0501 | 259 | ldt_arch_exit_mmap(mm); |
a1ea1c03 DH |
260 | } |
261 | ||
b0e9b09b DH |
262 | #ifdef CONFIG_X86_64 |
263 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
264 | { | |
97f2645f | 265 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
b0e9b09b DH |
266 | !(mm->context.ia32_compat == TIF_IA32); |
267 | } | |
268 | #else | |
269 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
270 | { | |
271 | return false; | |
272 | } | |
273 | #endif | |
274 | ||
fe3d197f DH |
275 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
276 | struct vm_area_struct *vma) | |
277 | { | |
278 | mpx_mm_init(mm); | |
279 | } | |
280 | ||
5a28fc94 DH |
281 | static inline void arch_unmap(struct mm_struct *mm, unsigned long start, |
282 | unsigned long end) | |
1de4fa14 | 283 | { |
c922228e DH |
284 | /* |
285 | * mpx_notify_unmap() goes and reads a rarely-hot | |
286 | * cacheline in the mm_struct. That can be expensive | |
287 | * enough to be seen in profiles. | |
288 | * | |
289 | * The mpx_notify_unmap() call and its contents have been | |
290 | * observed to affect munmap() performance on hardware | |
291 | * where MPX is not present. | |
292 | * | |
293 | * The unlikely() optimizes for the fast case: no MPX | |
294 | * in the CPU, or no MPX use in the process. Even if | |
295 | * we get this wrong (in the unlikely event that MPX | |
296 | * is widely enabled on some system) the overhead of | |
297 | * MPX itself (reading bounds tables) is expected to | |
298 | * overwhelm the overhead of getting this unlikely() | |
299 | * consistently wrong. | |
300 | */ | |
301 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | |
5a28fc94 | 302 | mpx_notify_unmap(mm, start, end); |
1de4fa14 DH |
303 | } |
304 | ||
33a709b2 DH |
305 | /* |
306 | * We only want to enforce protection keys on the current process | |
307 | * because we effectively have no access to PKRU for other | |
308 | * processes or any way to tell *which * PKRU in a threaded | |
309 | * process we could use. | |
310 | * | |
311 | * So do not enforce things if the VMA is not from the current | |
312 | * mm, or if we are in a kernel thread. | |
313 | */ | |
314 | static inline bool vma_is_foreign(struct vm_area_struct *vma) | |
315 | { | |
316 | if (!current->mm) | |
317 | return true; | |
318 | /* | |
319 | * Should PKRU be enforced on the access to this VMA? If | |
320 | * the VMA is from another process, then PKRU has no | |
321 | * relevance and should not be enforced. | |
322 | */ | |
323 | if (current->mm != vma->vm_mm) | |
324 | return true; | |
325 | ||
326 | return false; | |
327 | } | |
328 | ||
1b2ee126 | 329 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 330 | bool write, bool execute, bool foreign) |
33a709b2 | 331 | { |
d61172b4 DH |
332 | /* pkeys never affect instruction fetches */ |
333 | if (execute) | |
334 | return true; | |
33a709b2 | 335 | /* allow access if the VMA is not one from this process */ |
1b2ee126 | 336 | if (foreign || vma_is_foreign(vma)) |
33a709b2 DH |
337 | return true; |
338 | return __pkru_allows_pkey(vma_pkey(vma), write); | |
339 | } | |
340 | ||
d6e41f11 AL |
341 | /* |
342 | * This can be used from process context to figure out what the value of | |
6c690ee1 | 343 | * CR3 is without needing to do a (slow) __read_cr3(). |
d6e41f11 AL |
344 | * |
345 | * It's intended to be used for code like KVM that sneakily changes CR3 | |
346 | * and needs to restore it. It needs to be used very carefully. | |
347 | */ | |
348 | static inline unsigned long __get_current_cr3_fast(void) | |
349 | { | |
50fb83a6 | 350 | unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, |
47061a24 | 351 | this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
10af6235 | 352 | |
d6e41f11 | 353 | /* For now, be very restrictive about when this can be called. */ |
4c07f904 | 354 | VM_WARN_ON(in_nmi() || preemptible()); |
d6e41f11 | 355 | |
6c690ee1 | 356 | VM_BUG_ON(cr3 != __read_cr3()); |
d6e41f11 AL |
357 | return cr3; |
358 | } | |
359 | ||
cefa929c AL |
360 | typedef struct { |
361 | struct mm_struct *mm; | |
362 | } temp_mm_state_t; | |
363 | ||
364 | /* | |
365 | * Using a temporary mm allows to set temporary mappings that are not accessible | |
366 | * by other CPUs. Such mappings are needed to perform sensitive memory writes | |
367 | * that override the kernel memory protections (e.g., W^X), without exposing the | |
368 | * temporary page-table mappings that are required for these write operations to | |
369 | * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the | |
370 | * mapping is torn down. | |
371 | * | |
372 | * Context: The temporary mm needs to be used exclusively by a single core. To | |
373 | * harden security IRQs must be disabled while the temporary mm is | |
374 | * loaded, thereby preventing interrupt handler bugs from overriding | |
375 | * the kernel memory protection. | |
376 | */ | |
377 | static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) | |
378 | { | |
379 | temp_mm_state_t temp_state; | |
380 | ||
381 | lockdep_assert_irqs_disabled(); | |
382 | temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); | |
383 | switch_mm_irqs_off(NULL, mm, current); | |
d97080eb NA |
384 | |
385 | /* | |
386 | * If breakpoints are enabled, disable them while the temporary mm is | |
387 | * used. Userspace might set up watchpoints on addresses that are used | |
388 | * in the temporary mm, which would lead to wrong signals being sent or | |
389 | * crashes. | |
390 | * | |
391 | * Note that breakpoints are not disabled selectively, which also causes | |
392 | * kernel breakpoints (e.g., perf's) to be disabled. This might be | |
393 | * undesirable, but still seems reasonable as the code that runs in the | |
394 | * temporary mm should be short. | |
395 | */ | |
396 | if (hw_breakpoint_active()) | |
397 | hw_breakpoint_disable(); | |
398 | ||
cefa929c AL |
399 | return temp_state; |
400 | } | |
401 | ||
402 | static inline void unuse_temporary_mm(temp_mm_state_t prev_state) | |
403 | { | |
404 | lockdep_assert_irqs_disabled(); | |
405 | switch_mm_irqs_off(NULL, prev_state.mm, current); | |
d97080eb NA |
406 | |
407 | /* | |
408 | * Restore the breakpoints if they were disabled before the temporary mm | |
409 | * was loaded. | |
410 | */ | |
411 | if (hw_breakpoint_active()) | |
412 | hw_breakpoint_restore(); | |
cefa929c AL |
413 | } |
414 | ||
1965aae3 | 415 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |