Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_TLBFLUSH_H |
3 | #define _ASM_X86_TLBFLUSH_H | |
d291cf83 TG |
4 | |
5 | #include <linux/mm.h> | |
6 | #include <linux/sched.h> | |
7 | ||
8 | #include <asm/processor.h> | |
cd4d09ec | 9 | #include <asm/cpufeature.h> |
f05e798a | 10 | #include <asm/special_insns.h> |
ce4a4e56 | 11 | #include <asm/smp.h> |
1a3b0cae | 12 | #include <asm/invpcid.h> |
6fd166aa PZ |
13 | #include <asm/pti.h> |
14 | #include <asm/processor-flags.h> | |
d291cf83 | 15 | |
0a126abd PZ |
16 | /* |
17 | * The x86 feature is called PCID (Process Context IDentifier). It is similar | |
18 | * to what is traditionally called ASID on the RISC processors. | |
19 | * | |
20 | * We don't use the traditional ASID implementation, where each process/mm gets | |
21 | * its own ASID and flush/restart when we run out of ASID space. | |
22 | * | |
23 | * Instead we have a small per-cpu array of ASIDs and cache the last few mm's | |
24 | * that came by on this CPU, allowing cheaper switch_mm between processes on | |
25 | * this CPU. | |
26 | * | |
27 | * We end up with different spaces for different things. To avoid confusion we | |
28 | * use different names for each of them: | |
29 | * | |
30 | * ASID - [0, TLB_NR_DYN_ASIDS-1] | |
31 | * the canonical identifier for an mm | |
32 | * | |
33 | * kPCID - [1, TLB_NR_DYN_ASIDS] | |
34 | * the value we write into the PCID part of CR3; corresponds to the | |
35 | * ASID+1, because PCID 0 is special. | |
36 | * | |
37 | * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] | |
38 | * for KPTI each mm has two address spaces and thus needs two | |
39 | * PCID values, but we can still do with a single ASID denomination | |
40 | * for each mm. Corresponds to kPCID + 2048. | |
41 | * | |
42 | */ | |
060a402a | 43 | |
cb0a9144 DH |
44 | /* There are 12 bits of space for ASIDS in CR3 */ |
45 | #define CR3_HW_ASID_BITS 12 | |
6fd166aa | 46 | |
cb0a9144 DH |
47 | /* |
48 | * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for | |
49 | * user/kernel switches | |
50 | */ | |
6fd166aa PZ |
51 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
52 | # define PTI_CONSUMED_PCID_BITS 1 | |
53 | #else | |
54 | # define PTI_CONSUMED_PCID_BITS 0 | |
55 | #endif | |
56 | ||
57 | #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) | |
060a402a | 58 | |
cb0a9144 DH |
59 | /* |
60 | * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account | |
0a126abd | 61 | * for them being zero-based. Another -1 is because PCID 0 is reserved for |
cb0a9144 DH |
62 | * use by non-PCID-aware users. |
63 | */ | |
6fd166aa | 64 | #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) |
060a402a | 65 | |
6fd166aa PZ |
66 | /* |
67 | * 6 because 6 should be plenty and struct tlb_state will fit in two cache | |
68 | * lines. | |
69 | */ | |
70 | #define TLB_NR_DYN_ASIDS 6 | |
cb0a9144 | 71 | |
0a126abd PZ |
72 | /* |
73 | * Given @asid, compute kPCID | |
74 | */ | |
dd95f1a4 | 75 | static inline u16 kern_pcid(u16 asid) |
060a402a | 76 | { |
dd95f1a4 | 77 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
6fd166aa PZ |
78 | |
79 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
dd95f1a4 | 80 | /* |
6fd166aa PZ |
81 | * Make sure that the dynamic ASID space does not confict with the |
82 | * bit we are using to switch between user and kernel ASIDs. | |
83 | */ | |
f10ee3dc | 84 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); |
6fd166aa PZ |
85 | |
86 | /* | |
87 | * The ASID being passed in here should have respected the | |
88 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. | |
89 | */ | |
f10ee3dc | 90 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); |
6fd166aa | 91 | #endif |
dd95f1a4 | 92 | /* |
6fd166aa PZ |
93 | * The dynamically-assigned ASIDs that get passed in are small |
94 | * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, | |
95 | * so do not bother to clear it. | |
96 | * | |
dd95f1a4 DH |
97 | * If PCID is on, ASID-aware code paths put the ASID+1 into the |
98 | * PCID bits. This serves two purposes. It prevents a nasty | |
99 | * situation in which PCID-unaware code saves CR3, loads some other | |
100 | * value (with PCID == 0), and then restores CR3, thus corrupting | |
101 | * the TLB for ASID 0 if the saved ASID was nonzero. It also means | |
102 | * that any bugs involving loading a PCID-enabled CR3 with | |
103 | * CR4.PCIDE off will trigger deterministically. | |
104 | */ | |
105 | return asid + 1; | |
060a402a AL |
106 | } |
107 | ||
6cff64b8 | 108 | /* |
0a126abd | 109 | * Given @asid, compute uPCID |
6cff64b8 DH |
110 | */ |
111 | static inline u16 user_pcid(u16 asid) | |
112 | { | |
113 | u16 ret = kern_pcid(asid); | |
114 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
f10ee3dc | 115 | ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; |
6cff64b8 DH |
116 | #endif |
117 | return ret; | |
118 | } | |
119 | ||
50fb83a6 DH |
120 | struct pgd_t; |
121 | static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) | |
060a402a | 122 | { |
50fb83a6 | 123 | if (static_cpu_has(X86_FEATURE_PCID)) { |
dd95f1a4 | 124 | return __sme_pa(pgd) | kern_pcid(asid); |
50fb83a6 DH |
125 | } else { |
126 | VM_WARN_ON_ONCE(asid != 0); | |
127 | return __sme_pa(pgd); | |
128 | } | |
060a402a AL |
129 | } |
130 | ||
50fb83a6 | 131 | static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) |
060a402a | 132 | { |
cb0a9144 | 133 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
162ee5a8 SP |
134 | /* |
135 | * Use boot_cpu_has() instead of this_cpu_has() as this function | |
136 | * might be called during early boot. This should work even after | |
137 | * boot because all CPU's the have same capabilities: | |
138 | */ | |
139 | VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); | |
dd95f1a4 | 140 | return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; |
f39681ed AL |
141 | } |
142 | ||
d291cf83 TG |
143 | #ifdef CONFIG_PARAVIRT |
144 | #include <asm/paravirt.h> | |
145 | #else | |
146 | #define __flush_tlb() __native_flush_tlb() | |
147 | #define __flush_tlb_global() __native_flush_tlb_global() | |
1299ef1d | 148 | #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) |
d291cf83 TG |
149 | #endif |
150 | ||
b0579ade AL |
151 | struct tlb_context { |
152 | u64 ctx_id; | |
153 | u64 tlb_gen; | |
154 | }; | |
155 | ||
1e02ce4c | 156 | struct tlb_state { |
3d28ebce AL |
157 | /* |
158 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts | |
159 | * are on. This means that it may not match current->active_mm, | |
160 | * which will contain the previous user mm when we're in lazy TLB | |
161 | * mode even if we've already switched back to swapper_pg_dir. | |
4012e77a AL |
162 | * |
163 | * During switch_mm_irqs_off(), loaded_mm will be set to | |
164 | * LOADED_MM_SWITCHING during the brief interrupts-off window | |
165 | * when CR3 and loaded_mm would otherwise be inconsistent. This | |
166 | * is for nmi_uaccess_okay()'s benefit. | |
3d28ebce AL |
167 | */ |
168 | struct mm_struct *loaded_mm; | |
4012e77a | 169 | |
a72a1932 | 170 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL) |
4012e77a | 171 | |
4c71a2b6 TG |
172 | /* Last user mm for optimizing IBPB */ |
173 | union { | |
174 | struct mm_struct *last_user_mm; | |
175 | unsigned long last_user_mm_ibpb; | |
176 | }; | |
177 | ||
10af6235 AL |
178 | u16 loaded_mm_asid; |
179 | u16 next_asid; | |
1e02ce4c | 180 | |
b956575b AL |
181 | /* |
182 | * We can be in one of several states: | |
183 | * | |
184 | * - Actively using an mm. Our CPU's bit will be set in | |
185 | * mm_cpumask(loaded_mm) and is_lazy == false; | |
186 | * | |
187 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | |
188 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | |
189 | * | |
190 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | |
191 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | |
192 | * We're heuristically guessing that the CR3 load we | |
193 | * skipped more than makes up for the overhead added by | |
194 | * lazy mode. | |
195 | */ | |
196 | bool is_lazy; | |
197 | ||
2ea907c4 DH |
198 | /* |
199 | * If set we changed the page tables in such a way that we | |
200 | * needed an invalidation of all contexts (aka. PCIDs / ASIDs). | |
201 | * This tells us to go invalidate all the non-loaded ctxs[] | |
202 | * on the next context switch. | |
203 | * | |
204 | * The current ctx was kept up-to-date as it ran and does not | |
205 | * need to be invalidated. | |
206 | */ | |
207 | bool invalidate_other; | |
208 | ||
6fd166aa PZ |
209 | /* |
210 | * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate | |
211 | * the corresponding user PCID needs a flush next time we | |
212 | * switch to it; see SWITCH_TO_USER_CR3. | |
213 | */ | |
214 | unsigned short user_pcid_flush_mask; | |
215 | ||
1e02ce4c AL |
216 | /* |
217 | * Access to this CR4 shadow and to H/W CR4 is protected by | |
218 | * disabling interrupts when modifying either one. | |
219 | */ | |
220 | unsigned long cr4; | |
b0579ade AL |
221 | |
222 | /* | |
223 | * This is a list of all contexts that might exist in the TLB. | |
10af6235 AL |
224 | * There is one per ASID that we use, and the ASID (what the |
225 | * CPU calls PCID) is the index into ctxts. | |
b0579ade AL |
226 | * |
227 | * For each context, ctx_id indicates which mm the TLB's user | |
228 | * entries came from. As an invariant, the TLB will never | |
229 | * contain entries that are out-of-date as when that mm reached | |
230 | * the tlb_gen in the list. | |
231 | * | |
232 | * To be clear, this means that it's legal for the TLB code to | |
233 | * flush the TLB without updating tlb_gen. This can happen | |
234 | * (for now, at least) due to paravirt remote flushes. | |
10af6235 AL |
235 | * |
236 | * NB: context 0 is a bit special, since it's also used by | |
237 | * various bits of init code. This is fine -- code that | |
238 | * isn't aware of PCID will end up harmlessly flushing | |
239 | * context 0. | |
b0579ade | 240 | */ |
10af6235 | 241 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; |
1e02ce4c AL |
242 | }; |
243 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | |
244 | ||
4012e77a AL |
245 | /* |
246 | * Blindly accessing user memory from NMI context can be dangerous | |
247 | * if we're in the middle of switching the current user task or | |
248 | * switching the loaded mm. It can also be dangerous if we | |
249 | * interrupted some kernel code that was temporarily using a | |
250 | * different mm. | |
251 | */ | |
252 | static inline bool nmi_uaccess_okay(void) | |
253 | { | |
254 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); | |
255 | struct mm_struct *current_mm = current->mm; | |
256 | ||
257 | VM_WARN_ON_ONCE(!loaded_mm); | |
258 | ||
259 | /* | |
260 | * The condition we want to check is | |
261 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, | |
262 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() | |
263 | * is supposed to be reasonably fast. | |
264 | * | |
265 | * Instead, we check the almost equivalent but somewhat conservative | |
266 | * condition below, and we rely on the fact that switch_mm_irqs_off() | |
267 | * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. | |
268 | */ | |
269 | if (loaded_mm != current_mm) | |
270 | return false; | |
271 | ||
272 | VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); | |
273 | ||
274 | return true; | |
275 | } | |
276 | ||
5932c9fd NA |
277 | #define nmi_uaccess_okay nmi_uaccess_okay |
278 | ||
1e02ce4c AL |
279 | /* Initialize cr4 shadow for this CPU. */ |
280 | static inline void cr4_init_shadow(void) | |
281 | { | |
1ef55be1 | 282 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
1e02ce4c AL |
283 | } |
284 | ||
0c3292ca NA |
285 | static inline void __cr4_set(unsigned long cr4) |
286 | { | |
9d0b6232 | 287 | lockdep_assert_irqs_disabled(); |
0c3292ca NA |
288 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
289 | __write_cr4(cr4); | |
290 | } | |
291 | ||
375074cc | 292 | /* Set in this cpu's CR4. */ |
21e450d2 | 293 | static inline void cr4_set_bits_irqsoff(unsigned long mask) |
375074cc | 294 | { |
21e450d2 | 295 | unsigned long cr4; |
375074cc | 296 | |
1e02ce4c | 297 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
0c3292ca NA |
298 | if ((cr4 | mask) != cr4) |
299 | __cr4_set(cr4 | mask); | |
375074cc AL |
300 | } |
301 | ||
302 | /* Clear in this cpu's CR4. */ | |
21e450d2 | 303 | static inline void cr4_clear_bits_irqsoff(unsigned long mask) |
375074cc | 304 | { |
21e450d2 | 305 | unsigned long cr4; |
375074cc | 306 | |
1e02ce4c | 307 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
0c3292ca NA |
308 | if ((cr4 & ~mask) != cr4) |
309 | __cr4_set(cr4 & ~mask); | |
21e450d2 JK |
310 | } |
311 | ||
312 | /* Set in this cpu's CR4. */ | |
313 | static inline void cr4_set_bits(unsigned long mask) | |
314 | { | |
315 | unsigned long flags; | |
316 | ||
317 | local_irq_save(flags); | |
318 | cr4_set_bits_irqsoff(mask); | |
319 | local_irq_restore(flags); | |
320 | } | |
321 | ||
322 | /* Clear in this cpu's CR4. */ | |
323 | static inline void cr4_clear_bits(unsigned long mask) | |
324 | { | |
325 | unsigned long flags; | |
326 | ||
327 | local_irq_save(flags); | |
328 | cr4_clear_bits_irqsoff(mask); | |
9d0b6232 | 329 | local_irq_restore(flags); |
1e02ce4c AL |
330 | } |
331 | ||
9d0b6232 | 332 | static inline void cr4_toggle_bits_irqsoff(unsigned long mask) |
5a920155 TG |
333 | { |
334 | unsigned long cr4; | |
335 | ||
336 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
0c3292ca | 337 | __cr4_set(cr4 ^ mask); |
5a920155 TG |
338 | } |
339 | ||
1e02ce4c AL |
340 | /* Read the CR4 shadow. */ |
341 | static inline unsigned long cr4_read_shadow(void) | |
342 | { | |
343 | return this_cpu_read(cpu_tlbstate.cr4); | |
375074cc AL |
344 | } |
345 | ||
2ea907c4 DH |
346 | /* |
347 | * Mark all other ASIDs as invalid, preserves the current. | |
348 | */ | |
349 | static inline void invalidate_other_asid(void) | |
350 | { | |
351 | this_cpu_write(cpu_tlbstate.invalidate_other, true); | |
352 | } | |
353 | ||
375074cc AL |
354 | /* |
355 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB | |
356 | * enable and PPro Global page enable), so that any CPU's that boot | |
357 | * up after us can get the correct flags. This should only be used | |
358 | * during boot on the boot cpu. | |
359 | */ | |
360 | extern unsigned long mmu_cr4_features; | |
361 | extern u32 *trampoline_cr4_features; | |
362 | ||
363 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) | |
364 | { | |
365 | mmu_cr4_features |= mask; | |
366 | if (trampoline_cr4_features) | |
367 | *trampoline_cr4_features = mmu_cr4_features; | |
368 | cr4_set_bits(mask); | |
369 | } | |
370 | ||
72c0098d AL |
371 | extern void initialize_tlbstate_and_flush(void); |
372 | ||
6fd166aa PZ |
373 | /* |
374 | * Given an ASID, flush the corresponding user ASID. We can delay this | |
375 | * until the next time we switch to it. | |
376 | * | |
377 | * See SWITCH_TO_USER_CR3. | |
378 | */ | |
379 | static inline void invalidate_user_asid(u16 asid) | |
380 | { | |
381 | /* There is no user ASID if address space separation is off */ | |
382 | if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) | |
383 | return; | |
384 | ||
385 | /* | |
386 | * We only have a single ASID if PCID is off and the CR3 | |
387 | * write will have flushed it. | |
388 | */ | |
389 | if (!cpu_feature_enabled(X86_FEATURE_PCID)) | |
390 | return; | |
391 | ||
392 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
393 | return; | |
394 | ||
395 | __set_bit(kern_pcid(asid), | |
396 | (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); | |
397 | } | |
398 | ||
3f67af51 PZ |
399 | /* |
400 | * flush the entire current user mapping | |
401 | */ | |
d291cf83 TG |
402 | static inline void __native_flush_tlb(void) |
403 | { | |
5cf0791d | 404 | /* |
decab088 TG |
405 | * Preemption or interrupts must be disabled to protect the access |
406 | * to the per CPU variable and to prevent being preempted between | |
407 | * read_cr3() and write_cr3(). | |
5cf0791d | 408 | */ |
decab088 TG |
409 | WARN_ON_ONCE(preemptible()); |
410 | ||
411 | invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); | |
412 | ||
413 | /* If current->mm == NULL then the read_cr3() "borrows" an mm */ | |
6c690ee1 | 414 | native_write_cr3(__native_read_cr3()); |
d291cf83 TG |
415 | } |
416 | ||
3f67af51 PZ |
417 | /* |
418 | * flush everything | |
419 | */ | |
d291cf83 TG |
420 | static inline void __native_flush_tlb_global(void) |
421 | { | |
23cb7d46 | 422 | unsigned long cr4, flags; |
d291cf83 | 423 | |
d8bced79 AL |
424 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
425 | /* | |
426 | * Using INVPCID is considerably faster than a pair of writes | |
427 | * to CR4 sandwiched inside an IRQ flag save/restore. | |
6cff64b8 DH |
428 | * |
429 | * Note, this works with CR4.PCIDE=0 or 1. | |
d8bced79 AL |
430 | */ |
431 | invpcid_flush_all(); | |
432 | return; | |
433 | } | |
434 | ||
b1979a5f IM |
435 | /* |
436 | * Read-modify-write to CR4 - protect it from preemption and | |
437 | * from interrupts. (Use the raw variant because this code can | |
438 | * be called from deep inside debugging code.) | |
439 | */ | |
440 | raw_local_irq_save(flags); | |
441 | ||
23cb7d46 PZ |
442 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
443 | /* toggle PGE */ | |
444 | native_write_cr4(cr4 ^ X86_CR4_PGE); | |
445 | /* write old PGE again and flush TLBs */ | |
446 | native_write_cr4(cr4); | |
b1979a5f IM |
447 | |
448 | raw_local_irq_restore(flags); | |
d291cf83 TG |
449 | } |
450 | ||
3f67af51 PZ |
451 | /* |
452 | * flush one page in the user mapping | |
453 | */ | |
1299ef1d | 454 | static inline void __native_flush_tlb_one_user(unsigned long addr) |
d291cf83 | 455 | { |
6fd166aa PZ |
456 | u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
457 | ||
94cf8de0 | 458 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
6fd166aa PZ |
459 | |
460 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
461 | return; | |
462 | ||
6cff64b8 DH |
463 | /* |
464 | * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. | |
465 | * Just use invalidate_user_asid() in case we are called early. | |
466 | */ | |
467 | if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) | |
468 | invalidate_user_asid(loaded_mm_asid); | |
469 | else | |
470 | invpcid_flush_one(user_pcid(loaded_mm_asid), addr); | |
d291cf83 TG |
471 | } |
472 | ||
3f67af51 PZ |
473 | /* |
474 | * flush everything | |
475 | */ | |
d291cf83 TG |
476 | static inline void __flush_tlb_all(void) |
477 | { | |
f77084d9 SAS |
478 | /* |
479 | * This is to catch users with enabled preemption and the PGE feature | |
480 | * and don't trigger the warning in __native_flush_tlb(). | |
481 | */ | |
482 | VM_WARN_ON_ONCE(preemptible()); | |
483 | ||
3f67af51 | 484 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
d291cf83 | 485 | __flush_tlb_global(); |
3f67af51 PZ |
486 | } else { |
487 | /* | |
488 | * !PGE -> !PCID (setup_pcid()), thus every flush is total. | |
489 | */ | |
d291cf83 | 490 | __flush_tlb(); |
3f67af51 | 491 | } |
d291cf83 TG |
492 | } |
493 | ||
3f67af51 PZ |
494 | /* |
495 | * flush one page in the kernel mapping | |
496 | */ | |
1299ef1d | 497 | static inline void __flush_tlb_one_kernel(unsigned long addr) |
d291cf83 | 498 | { |
ec659934 | 499 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
1299ef1d AL |
500 | |
501 | /* | |
502 | * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its | |
503 | * paravirt equivalent. Even with PCID, this is sufficient: we only | |
504 | * use PCID if we also use global PTEs for the kernel mapping, and | |
505 | * INVLPG flushes global translations across all address spaces. | |
506 | * | |
507 | * If PTI is on, then the kernel is mapped with non-global PTEs, and | |
508 | * __flush_tlb_one_user() will flush the given address for the current | |
509 | * kernel address space and for its usermode counterpart, but it does | |
510 | * not flush it for other address spaces. | |
511 | */ | |
512 | __flush_tlb_one_user(addr); | |
2ea907c4 DH |
513 | |
514 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
515 | return; | |
516 | ||
517 | /* | |
1299ef1d AL |
518 | * See above. We need to propagate the flush to all other address |
519 | * spaces. In principle, we only need to propagate it to kernelmode | |
520 | * address spaces, but the extra bookkeeping we would need is not | |
521 | * worth it. | |
2ea907c4 DH |
522 | */ |
523 | invalidate_other_asid(); | |
d291cf83 TG |
524 | } |
525 | ||
3e7f3db0 | 526 | #define TLB_FLUSH_ALL -1UL |
d291cf83 TG |
527 | |
528 | /* | |
529 | * TLB flushing: | |
530 | * | |
d291cf83 TG |
531 | * - flush_tlb_all() flushes all processes TLBs |
532 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
533 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
534 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
535 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
a2055abe | 536 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
d291cf83 TG |
537 | * |
538 | * ..but the i386 has somewhat limited tlb flushing capabilities, | |
539 | * and page-granular flushes are available only on i486 and up. | |
d291cf83 | 540 | */ |
a2055abe | 541 | struct flush_tlb_info { |
b0579ade AL |
542 | /* |
543 | * We support several kinds of flushes. | |
544 | * | |
545 | * - Fully flush a single mm. .mm will be set, .end will be | |
546 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to | |
547 | * which the IPI sender is trying to catch us up. | |
548 | * | |
549 | * - Partially flush a single mm. .mm will be set, .start and | |
550 | * .end will indicate the range, and .new_tlb_gen will be set | |
551 | * such that the changes between generation .new_tlb_gen-1 and | |
552 | * .new_tlb_gen are entirely contained in the indicated range. | |
553 | * | |
554 | * - Fully flush all mms whose tlb_gens have been updated. .mm | |
555 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen | |
556 | * will be zero. | |
557 | */ | |
558 | struct mm_struct *mm; | |
559 | unsigned long start; | |
560 | unsigned long end; | |
561 | u64 new_tlb_gen; | |
a31acd3e | 562 | unsigned int stride_shift; |
97807813 | 563 | bool freed_tables; |
a2055abe AL |
564 | }; |
565 | ||
d291cf83 TG |
566 | #define local_flush_tlb() __flush_tlb() |
567 | ||
016c4d92 RR |
568 | #define flush_tlb_mm(mm) \ |
569 | flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) | |
611ae8e3 | 570 | |
a31acd3e PZ |
571 | #define flush_tlb_range(vma, start, end) \ |
572 | flush_tlb_mm_range((vma)->vm_mm, start, end, \ | |
573 | ((vma)->vm_flags & VM_HUGETLB) \ | |
574 | ? huge_page_shift(hstate_vma(vma)) \ | |
016c4d92 | 575 | : PAGE_SHIFT, false) |
611ae8e3 | 576 | |
d291cf83 | 577 | extern void flush_tlb_all(void); |
611ae8e3 | 578 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
016c4d92 RR |
579 | unsigned long end, unsigned int stride_shift, |
580 | bool freed_tables); | |
effee4b9 | 581 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
d291cf83 | 582 | |
ca6c99c0 AL |
583 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
584 | { | |
016c4d92 | 585 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); |
ca6c99c0 AL |
586 | } |
587 | ||
4595f962 | 588 | void native_flush_tlb_others(const struct cpumask *cpumask, |
a2055abe | 589 | const struct flush_tlb_info *info); |
d291cf83 | 590 | |
0a126abd PZ |
591 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
592 | { | |
593 | /* | |
594 | * Bump the generation count. This also serves as a full barrier | |
595 | * that synchronizes with switch_mm(): callers are required to order | |
596 | * their read of mm_cpumask after their writes to the paging | |
597 | * structures. | |
598 | */ | |
599 | return atomic64_inc_return(&mm->context.tlb_gen); | |
600 | } | |
601 | ||
e73ad5ff AL |
602 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
603 | struct mm_struct *mm) | |
604 | { | |
f39681ed | 605 | inc_mm_tlb_gen(mm); |
e73ad5ff AL |
606 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
607 | } | |
608 | ||
609 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); | |
610 | ||
d291cf83 | 611 | #ifndef CONFIG_PARAVIRT |
a2055abe AL |
612 | #define flush_tlb_others(mask, info) \ |
613 | native_flush_tlb_others(mask, info) | |
48a8b97c PZ |
614 | |
615 | #define paravirt_tlb_remove_table(tlb, page) \ | |
616 | tlb_remove_page(tlb, (void *)(page)) | |
96a388de | 617 | #endif |
d291cf83 | 618 | |
1965aae3 | 619 | #endif /* _ASM_X86_TLBFLUSH_H */ |