Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_TLBFLUSH_H |
3 | #define _ASM_X86_TLBFLUSH_H | |
d291cf83 TG |
4 | |
5 | #include <linux/mm.h> | |
6 | #include <linux/sched.h> | |
7 | ||
8 | #include <asm/processor.h> | |
cd4d09ec | 9 | #include <asm/cpufeature.h> |
f05e798a | 10 | #include <asm/special_insns.h> |
ce4a4e56 | 11 | #include <asm/smp.h> |
1a3b0cae | 12 | #include <asm/invpcid.h> |
6fd166aa PZ |
13 | #include <asm/pti.h> |
14 | #include <asm/processor-flags.h> | |
d291cf83 | 15 | |
0a126abd PZ |
16 | /* |
17 | * The x86 feature is called PCID (Process Context IDentifier). It is similar | |
18 | * to what is traditionally called ASID on the RISC processors. | |
19 | * | |
20 | * We don't use the traditional ASID implementation, where each process/mm gets | |
21 | * its own ASID and flush/restart when we run out of ASID space. | |
22 | * | |
23 | * Instead we have a small per-cpu array of ASIDs and cache the last few mm's | |
24 | * that came by on this CPU, allowing cheaper switch_mm between processes on | |
25 | * this CPU. | |
26 | * | |
27 | * We end up with different spaces for different things. To avoid confusion we | |
28 | * use different names for each of them: | |
29 | * | |
30 | * ASID - [0, TLB_NR_DYN_ASIDS-1] | |
31 | * the canonical identifier for an mm | |
32 | * | |
33 | * kPCID - [1, TLB_NR_DYN_ASIDS] | |
34 | * the value we write into the PCID part of CR3; corresponds to the | |
35 | * ASID+1, because PCID 0 is special. | |
36 | * | |
37 | * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] | |
38 | * for KPTI each mm has two address spaces and thus needs two | |
39 | * PCID values, but we can still do with a single ASID denomination | |
40 | * for each mm. Corresponds to kPCID + 2048. | |
41 | * | |
42 | */ | |
060a402a | 43 | |
cb0a9144 DH |
44 | /* There are 12 bits of space for ASIDS in CR3 */ |
45 | #define CR3_HW_ASID_BITS 12 | |
6fd166aa | 46 | |
cb0a9144 DH |
47 | /* |
48 | * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for | |
49 | * user/kernel switches | |
50 | */ | |
6fd166aa PZ |
51 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
52 | # define PTI_CONSUMED_PCID_BITS 1 | |
53 | #else | |
54 | # define PTI_CONSUMED_PCID_BITS 0 | |
55 | #endif | |
56 | ||
57 | #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) | |
060a402a | 58 | |
cb0a9144 DH |
59 | /* |
60 | * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account | |
0a126abd | 61 | * for them being zero-based. Another -1 is because PCID 0 is reserved for |
cb0a9144 DH |
62 | * use by non-PCID-aware users. |
63 | */ | |
6fd166aa | 64 | #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) |
060a402a | 65 | |
6fd166aa PZ |
66 | /* |
67 | * 6 because 6 should be plenty and struct tlb_state will fit in two cache | |
68 | * lines. | |
69 | */ | |
70 | #define TLB_NR_DYN_ASIDS 6 | |
cb0a9144 | 71 | |
0a126abd PZ |
72 | /* |
73 | * Given @asid, compute kPCID | |
74 | */ | |
dd95f1a4 | 75 | static inline u16 kern_pcid(u16 asid) |
060a402a | 76 | { |
dd95f1a4 | 77 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
6fd166aa PZ |
78 | |
79 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
dd95f1a4 | 80 | /* |
6fd166aa PZ |
81 | * Make sure that the dynamic ASID space does not confict with the |
82 | * bit we are using to switch between user and kernel ASIDs. | |
83 | */ | |
f10ee3dc | 84 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); |
6fd166aa PZ |
85 | |
86 | /* | |
87 | * The ASID being passed in here should have respected the | |
88 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. | |
89 | */ | |
f10ee3dc | 90 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); |
6fd166aa | 91 | #endif |
dd95f1a4 | 92 | /* |
6fd166aa PZ |
93 | * The dynamically-assigned ASIDs that get passed in are small |
94 | * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, | |
95 | * so do not bother to clear it. | |
96 | * | |
dd95f1a4 DH |
97 | * If PCID is on, ASID-aware code paths put the ASID+1 into the |
98 | * PCID bits. This serves two purposes. It prevents a nasty | |
99 | * situation in which PCID-unaware code saves CR3, loads some other | |
100 | * value (with PCID == 0), and then restores CR3, thus corrupting | |
101 | * the TLB for ASID 0 if the saved ASID was nonzero. It also means | |
102 | * that any bugs involving loading a PCID-enabled CR3 with | |
103 | * CR4.PCIDE off will trigger deterministically. | |
104 | */ | |
105 | return asid + 1; | |
060a402a AL |
106 | } |
107 | ||
6cff64b8 | 108 | /* |
0a126abd | 109 | * Given @asid, compute uPCID |
6cff64b8 DH |
110 | */ |
111 | static inline u16 user_pcid(u16 asid) | |
112 | { | |
113 | u16 ret = kern_pcid(asid); | |
114 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
f10ee3dc | 115 | ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; |
6cff64b8 DH |
116 | #endif |
117 | return ret; | |
118 | } | |
119 | ||
50fb83a6 DH |
120 | struct pgd_t; |
121 | static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) | |
060a402a | 122 | { |
50fb83a6 | 123 | if (static_cpu_has(X86_FEATURE_PCID)) { |
dd95f1a4 | 124 | return __sme_pa(pgd) | kern_pcid(asid); |
50fb83a6 DH |
125 | } else { |
126 | VM_WARN_ON_ONCE(asid != 0); | |
127 | return __sme_pa(pgd); | |
128 | } | |
060a402a AL |
129 | } |
130 | ||
50fb83a6 | 131 | static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) |
060a402a | 132 | { |
cb0a9144 | 133 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
162ee5a8 SP |
134 | /* |
135 | * Use boot_cpu_has() instead of this_cpu_has() as this function | |
136 | * might be called during early boot. This should work even after | |
137 | * boot because all CPU's the have same capabilities: | |
138 | */ | |
139 | VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); | |
dd95f1a4 | 140 | return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; |
f39681ed AL |
141 | } |
142 | ||
29def599 TG |
143 | struct flush_tlb_info; |
144 | ||
4b04e6c2 | 145 | void __flush_tlb_all(void); |
2faf153b | 146 | void flush_tlb_local(void); |
127ac915 | 147 | void flush_tlb_one_user(unsigned long addr); |
58430c5d | 148 | void flush_tlb_one_kernel(unsigned long addr); |
29def599 TG |
149 | void flush_tlb_others(const struct cpumask *cpumask, |
150 | const struct flush_tlb_info *info); | |
2faf153b | 151 | |
d291cf83 TG |
152 | #ifdef CONFIG_PARAVIRT |
153 | #include <asm/paravirt.h> | |
d291cf83 TG |
154 | #endif |
155 | ||
b0579ade AL |
156 | struct tlb_context { |
157 | u64 ctx_id; | |
158 | u64 tlb_gen; | |
159 | }; | |
160 | ||
1e02ce4c | 161 | struct tlb_state { |
3d28ebce AL |
162 | /* |
163 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts | |
164 | * are on. This means that it may not match current->active_mm, | |
165 | * which will contain the previous user mm when we're in lazy TLB | |
166 | * mode even if we've already switched back to swapper_pg_dir. | |
4012e77a AL |
167 | * |
168 | * During switch_mm_irqs_off(), loaded_mm will be set to | |
169 | * LOADED_MM_SWITCHING during the brief interrupts-off window | |
170 | * when CR3 and loaded_mm would otherwise be inconsistent. This | |
171 | * is for nmi_uaccess_okay()'s benefit. | |
3d28ebce AL |
172 | */ |
173 | struct mm_struct *loaded_mm; | |
4012e77a | 174 | |
a72a1932 | 175 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL) |
4012e77a | 176 | |
4c71a2b6 TG |
177 | /* Last user mm for optimizing IBPB */ |
178 | union { | |
179 | struct mm_struct *last_user_mm; | |
180 | unsigned long last_user_mm_ibpb; | |
181 | }; | |
182 | ||
10af6235 AL |
183 | u16 loaded_mm_asid; |
184 | u16 next_asid; | |
1e02ce4c | 185 | |
b956575b AL |
186 | /* |
187 | * We can be in one of several states: | |
188 | * | |
189 | * - Actively using an mm. Our CPU's bit will be set in | |
190 | * mm_cpumask(loaded_mm) and is_lazy == false; | |
191 | * | |
192 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | |
193 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | |
194 | * | |
195 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | |
196 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | |
197 | * We're heuristically guessing that the CR3 load we | |
198 | * skipped more than makes up for the overhead added by | |
199 | * lazy mode. | |
200 | */ | |
201 | bool is_lazy; | |
202 | ||
2ea907c4 DH |
203 | /* |
204 | * If set we changed the page tables in such a way that we | |
205 | * needed an invalidation of all contexts (aka. PCIDs / ASIDs). | |
206 | * This tells us to go invalidate all the non-loaded ctxs[] | |
207 | * on the next context switch. | |
208 | * | |
209 | * The current ctx was kept up-to-date as it ran and does not | |
210 | * need to be invalidated. | |
211 | */ | |
212 | bool invalidate_other; | |
213 | ||
6fd166aa PZ |
214 | /* |
215 | * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate | |
216 | * the corresponding user PCID needs a flush next time we | |
217 | * switch to it; see SWITCH_TO_USER_CR3. | |
218 | */ | |
219 | unsigned short user_pcid_flush_mask; | |
220 | ||
1e02ce4c AL |
221 | /* |
222 | * Access to this CR4 shadow and to H/W CR4 is protected by | |
223 | * disabling interrupts when modifying either one. | |
224 | */ | |
225 | unsigned long cr4; | |
b0579ade AL |
226 | |
227 | /* | |
228 | * This is a list of all contexts that might exist in the TLB. | |
10af6235 AL |
229 | * There is one per ASID that we use, and the ASID (what the |
230 | * CPU calls PCID) is the index into ctxts. | |
b0579ade AL |
231 | * |
232 | * For each context, ctx_id indicates which mm the TLB's user | |
233 | * entries came from. As an invariant, the TLB will never | |
234 | * contain entries that are out-of-date as when that mm reached | |
235 | * the tlb_gen in the list. | |
236 | * | |
237 | * To be clear, this means that it's legal for the TLB code to | |
238 | * flush the TLB without updating tlb_gen. This can happen | |
239 | * (for now, at least) due to paravirt remote flushes. | |
10af6235 AL |
240 | * |
241 | * NB: context 0 is a bit special, since it's also used by | |
242 | * various bits of init code. This is fine -- code that | |
243 | * isn't aware of PCID will end up harmlessly flushing | |
244 | * context 0. | |
b0579ade | 245 | */ |
10af6235 | 246 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; |
1e02ce4c AL |
247 | }; |
248 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | |
249 | ||
4012e77a AL |
250 | /* |
251 | * Blindly accessing user memory from NMI context can be dangerous | |
252 | * if we're in the middle of switching the current user task or | |
253 | * switching the loaded mm. It can also be dangerous if we | |
254 | * interrupted some kernel code that was temporarily using a | |
255 | * different mm. | |
256 | */ | |
257 | static inline bool nmi_uaccess_okay(void) | |
258 | { | |
259 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); | |
260 | struct mm_struct *current_mm = current->mm; | |
261 | ||
262 | VM_WARN_ON_ONCE(!loaded_mm); | |
263 | ||
264 | /* | |
265 | * The condition we want to check is | |
266 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, | |
267 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() | |
268 | * is supposed to be reasonably fast. | |
269 | * | |
270 | * Instead, we check the almost equivalent but somewhat conservative | |
271 | * condition below, and we rely on the fact that switch_mm_irqs_off() | |
272 | * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. | |
273 | */ | |
274 | if (loaded_mm != current_mm) | |
275 | return false; | |
276 | ||
277 | VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); | |
278 | ||
279 | return true; | |
280 | } | |
281 | ||
5932c9fd NA |
282 | #define nmi_uaccess_okay nmi_uaccess_okay |
283 | ||
d8f0b353 TG |
284 | void cr4_update_irqsoff(unsigned long set, unsigned long clear); |
285 | unsigned long cr4_read_shadow(void); | |
286 | ||
1e02ce4c AL |
287 | /* Initialize cr4 shadow for this CPU. */ |
288 | static inline void cr4_init_shadow(void) | |
289 | { | |
1ef55be1 | 290 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
1e02ce4c AL |
291 | } |
292 | ||
375074cc | 293 | /* Set in this cpu's CR4. */ |
21e450d2 | 294 | static inline void cr4_set_bits_irqsoff(unsigned long mask) |
375074cc | 295 | { |
d8f0b353 | 296 | cr4_update_irqsoff(mask, 0); |
375074cc AL |
297 | } |
298 | ||
299 | /* Clear in this cpu's CR4. */ | |
21e450d2 | 300 | static inline void cr4_clear_bits_irqsoff(unsigned long mask) |
375074cc | 301 | { |
d8f0b353 | 302 | cr4_update_irqsoff(0, mask); |
21e450d2 JK |
303 | } |
304 | ||
305 | /* Set in this cpu's CR4. */ | |
306 | static inline void cr4_set_bits(unsigned long mask) | |
307 | { | |
308 | unsigned long flags; | |
309 | ||
310 | local_irq_save(flags); | |
311 | cr4_set_bits_irqsoff(mask); | |
312 | local_irq_restore(flags); | |
313 | } | |
314 | ||
315 | /* Clear in this cpu's CR4. */ | |
316 | static inline void cr4_clear_bits(unsigned long mask) | |
317 | { | |
318 | unsigned long flags; | |
319 | ||
320 | local_irq_save(flags); | |
321 | cr4_clear_bits_irqsoff(mask); | |
9d0b6232 | 322 | local_irq_restore(flags); |
1e02ce4c AL |
323 | } |
324 | ||
375074cc AL |
325 | extern unsigned long mmu_cr4_features; |
326 | extern u32 *trampoline_cr4_features; | |
327 | ||
72c0098d AL |
328 | extern void initialize_tlbstate_and_flush(void); |
329 | ||
3e7f3db0 | 330 | #define TLB_FLUSH_ALL -1UL |
d291cf83 TG |
331 | |
332 | /* | |
333 | * TLB flushing: | |
334 | * | |
d291cf83 TG |
335 | * - flush_tlb_all() flushes all processes TLBs |
336 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
337 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
338 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
339 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
a2055abe | 340 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
d291cf83 TG |
341 | * |
342 | * ..but the i386 has somewhat limited tlb flushing capabilities, | |
343 | * and page-granular flushes are available only on i486 and up. | |
d291cf83 | 344 | */ |
a2055abe | 345 | struct flush_tlb_info { |
b0579ade AL |
346 | /* |
347 | * We support several kinds of flushes. | |
348 | * | |
349 | * - Fully flush a single mm. .mm will be set, .end will be | |
350 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to | |
351 | * which the IPI sender is trying to catch us up. | |
352 | * | |
353 | * - Partially flush a single mm. .mm will be set, .start and | |
354 | * .end will indicate the range, and .new_tlb_gen will be set | |
355 | * such that the changes between generation .new_tlb_gen-1 and | |
356 | * .new_tlb_gen are entirely contained in the indicated range. | |
357 | * | |
358 | * - Fully flush all mms whose tlb_gens have been updated. .mm | |
359 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen | |
360 | * will be zero. | |
361 | */ | |
362 | struct mm_struct *mm; | |
363 | unsigned long start; | |
364 | unsigned long end; | |
365 | u64 new_tlb_gen; | |
a31acd3e | 366 | unsigned int stride_shift; |
97807813 | 367 | bool freed_tables; |
a2055abe AL |
368 | }; |
369 | ||
016c4d92 RR |
370 | #define flush_tlb_mm(mm) \ |
371 | flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) | |
611ae8e3 | 372 | |
a31acd3e PZ |
373 | #define flush_tlb_range(vma, start, end) \ |
374 | flush_tlb_mm_range((vma)->vm_mm, start, end, \ | |
375 | ((vma)->vm_flags & VM_HUGETLB) \ | |
376 | ? huge_page_shift(hstate_vma(vma)) \ | |
016c4d92 | 377 | : PAGE_SHIFT, false) |
611ae8e3 | 378 | |
d291cf83 | 379 | extern void flush_tlb_all(void); |
611ae8e3 | 380 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
016c4d92 RR |
381 | unsigned long end, unsigned int stride_shift, |
382 | bool freed_tables); | |
effee4b9 | 383 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
d291cf83 | 384 | |
ca6c99c0 AL |
385 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
386 | { | |
016c4d92 | 387 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); |
ca6c99c0 AL |
388 | } |
389 | ||
0a126abd PZ |
390 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
391 | { | |
392 | /* | |
393 | * Bump the generation count. This also serves as a full barrier | |
394 | * that synchronizes with switch_mm(): callers are required to order | |
395 | * their read of mm_cpumask after their writes to the paging | |
396 | * structures. | |
397 | */ | |
398 | return atomic64_inc_return(&mm->context.tlb_gen); | |
399 | } | |
400 | ||
e73ad5ff AL |
401 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
402 | struct mm_struct *mm) | |
403 | { | |
f39681ed | 404 | inc_mm_tlb_gen(mm); |
e73ad5ff AL |
405 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
406 | } | |
407 | ||
408 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); | |
409 | ||
1965aae3 | 410 | #endif /* _ASM_X86_TLBFLUSH_H */ |