Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_TLBFLUSH_H |
3 | #define _ASM_X86_TLBFLUSH_H | |
d291cf83 | 4 | |
82721d8b | 5 | #include <linux/mm_types.h> |
d291cf83 TG |
6 | #include <linux/sched.h> |
7 | ||
8 | #include <asm/processor.h> | |
cd4d09ec | 9 | #include <asm/cpufeature.h> |
f05e798a | 10 | #include <asm/special_insns.h> |
ce4a4e56 | 11 | #include <asm/smp.h> |
1a3b0cae | 12 | #include <asm/invpcid.h> |
6fd166aa PZ |
13 | #include <asm/pti.h> |
14 | #include <asm/processor-flags.h> | |
82721d8b | 15 | #include <asm/pgtable.h> |
d291cf83 | 16 | |
013fdeb0 BPA |
17 | DECLARE_PER_CPU(u64, tlbstate_untag_mask); |
18 | ||
4b04e6c2 | 19 | void __flush_tlb_all(void); |
2faf153b | 20 | |
bfe3d8f6 | 21 | #define TLB_FLUSH_ALL -1UL |
8f1d56f6 | 22 | #define TLB_GENERATION_INVALID 0 |
bfe3d8f6 TG |
23 | |
24 | void cr4_update_irqsoff(unsigned long set, unsigned long clear); | |
25 | unsigned long cr4_read_shadow(void); | |
26 | ||
27 | /* Set in this cpu's CR4. */ | |
28 | static inline void cr4_set_bits_irqsoff(unsigned long mask) | |
29 | { | |
30 | cr4_update_irqsoff(mask, 0); | |
31 | } | |
d291cf83 | 32 | |
bfe3d8f6 TG |
33 | /* Clear in this cpu's CR4. */ |
34 | static inline void cr4_clear_bits_irqsoff(unsigned long mask) | |
35 | { | |
36 | cr4_update_irqsoff(0, mask); | |
37 | } | |
38 | ||
39 | /* Set in this cpu's CR4. */ | |
40 | static inline void cr4_set_bits(unsigned long mask) | |
41 | { | |
42 | unsigned long flags; | |
43 | ||
44 | local_irq_save(flags); | |
45 | cr4_set_bits_irqsoff(mask); | |
46 | local_irq_restore(flags); | |
47 | } | |
48 | ||
49 | /* Clear in this cpu's CR4. */ | |
50 | static inline void cr4_clear_bits(unsigned long mask) | |
51 | { | |
52 | unsigned long flags; | |
53 | ||
54 | local_irq_save(flags); | |
55 | cr4_clear_bits_irqsoff(mask); | |
56 | local_irq_restore(flags); | |
57 | } | |
58 | ||
59 | #ifndef MODULE | |
6c9b7d79 TG |
60 | /* |
61 | * 6 because 6 should be plenty and struct tlb_state will fit in two cache | |
62 | * lines. | |
63 | */ | |
64 | #define TLB_NR_DYN_ASIDS 6 | |
65 | ||
b0579ade AL |
66 | struct tlb_context { |
67 | u64 ctx_id; | |
68 | u64 tlb_gen; | |
69 | }; | |
70 | ||
1e02ce4c | 71 | struct tlb_state { |
3d28ebce AL |
72 | /* |
73 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts | |
74 | * are on. This means that it may not match current->active_mm, | |
75 | * which will contain the previous user mm when we're in lazy TLB | |
76 | * mode even if we've already switched back to swapper_pg_dir. | |
4012e77a AL |
77 | * |
78 | * During switch_mm_irqs_off(), loaded_mm will be set to | |
79 | * LOADED_MM_SWITCHING during the brief interrupts-off window | |
80 | * when CR3 and loaded_mm would otherwise be inconsistent. This | |
81 | * is for nmi_uaccess_okay()'s benefit. | |
3d28ebce AL |
82 | */ |
83 | struct mm_struct *loaded_mm; | |
4012e77a | 84 | |
a72a1932 | 85 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL) |
4012e77a | 86 | |
4c71a2b6 TG |
87 | /* Last user mm for optimizing IBPB */ |
88 | union { | |
89 | struct mm_struct *last_user_mm; | |
371b09c6 | 90 | unsigned long last_user_mm_spec; |
4c71a2b6 TG |
91 | }; |
92 | ||
10af6235 AL |
93 | u16 loaded_mm_asid; |
94 | u16 next_asid; | |
1e02ce4c | 95 | |
2ea907c4 DH |
96 | /* |
97 | * If set we changed the page tables in such a way that we | |
98 | * needed an invalidation of all contexts (aka. PCIDs / ASIDs). | |
99 | * This tells us to go invalidate all the non-loaded ctxs[] | |
100 | * on the next context switch. | |
101 | * | |
102 | * The current ctx was kept up-to-date as it ran and does not | |
103 | * need to be invalidated. | |
104 | */ | |
105 | bool invalidate_other; | |
106 | ||
82721d8b KS |
107 | #ifdef CONFIG_ADDRESS_MASKING |
108 | /* | |
109 | * Active LAM mode. | |
110 | * | |
111 | * X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM | |
112 | * disabled. | |
113 | */ | |
114 | u8 lam; | |
115 | #endif | |
116 | ||
6fd166aa PZ |
117 | /* |
118 | * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate | |
119 | * the corresponding user PCID needs a flush next time we | |
120 | * switch to it; see SWITCH_TO_USER_CR3. | |
121 | */ | |
122 | unsigned short user_pcid_flush_mask; | |
123 | ||
1e02ce4c AL |
124 | /* |
125 | * Access to this CR4 shadow and to H/W CR4 is protected by | |
126 | * disabling interrupts when modifying either one. | |
127 | */ | |
128 | unsigned long cr4; | |
b0579ade AL |
129 | |
130 | /* | |
131 | * This is a list of all contexts that might exist in the TLB. | |
10af6235 AL |
132 | * There is one per ASID that we use, and the ASID (what the |
133 | * CPU calls PCID) is the index into ctxts. | |
b0579ade AL |
134 | * |
135 | * For each context, ctx_id indicates which mm the TLB's user | |
136 | * entries came from. As an invariant, the TLB will never | |
137 | * contain entries that are out-of-date as when that mm reached | |
138 | * the tlb_gen in the list. | |
139 | * | |
140 | * To be clear, this means that it's legal for the TLB code to | |
141 | * flush the TLB without updating tlb_gen. This can happen | |
142 | * (for now, at least) due to paravirt remote flushes. | |
10af6235 AL |
143 | * |
144 | * NB: context 0 is a bit special, since it's also used by | |
145 | * various bits of init code. This is fine -- code that | |
146 | * isn't aware of PCID will end up harmlessly flushing | |
147 | * context 0. | |
b0579ade | 148 | */ |
10af6235 | 149 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; |
1e02ce4c | 150 | }; |
2f4305b1 NA |
151 | DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate); |
152 | ||
153 | struct tlb_state_shared { | |
154 | /* | |
155 | * We can be in one of several states: | |
156 | * | |
157 | * - Actively using an mm. Our CPU's bit will be set in | |
158 | * mm_cpumask(loaded_mm) and is_lazy == false; | |
159 | * | |
160 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | |
161 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | |
162 | * | |
163 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | |
164 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | |
165 | * We're heuristically guessing that the CR3 load we | |
166 | * skipped more than makes up for the overhead added by | |
167 | * lazy mode. | |
168 | */ | |
169 | bool is_lazy; | |
170 | }; | |
171 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); | |
1e02ce4c | 172 | |
af5c40c6 | 173 | bool nmi_uaccess_okay(void); |
5932c9fd NA |
174 | #define nmi_uaccess_okay nmi_uaccess_okay |
175 | ||
1e02ce4c AL |
176 | /* Initialize cr4 shadow for this CPU. */ |
177 | static inline void cr4_init_shadow(void) | |
178 | { | |
1ef55be1 | 179 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
1e02ce4c AL |
180 | } |
181 | ||
375074cc AL |
182 | extern unsigned long mmu_cr4_features; |
183 | extern u32 *trampoline_cr4_features; | |
184 | ||
72c0098d AL |
185 | extern void initialize_tlbstate_and_flush(void); |
186 | ||
d291cf83 TG |
187 | /* |
188 | * TLB flushing: | |
189 | * | |
d291cf83 TG |
190 | * - flush_tlb_all() flushes all processes TLBs |
191 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
192 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
193 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
194 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
4ce94eab | 195 | * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus |
d291cf83 TG |
196 | * |
197 | * ..but the i386 has somewhat limited tlb flushing capabilities, | |
198 | * and page-granular flushes are available only on i486 and up. | |
d291cf83 | 199 | */ |
a2055abe | 200 | struct flush_tlb_info { |
b0579ade AL |
201 | /* |
202 | * We support several kinds of flushes. | |
203 | * | |
204 | * - Fully flush a single mm. .mm will be set, .end will be | |
205 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to | |
206 | * which the IPI sender is trying to catch us up. | |
207 | * | |
208 | * - Partially flush a single mm. .mm will be set, .start and | |
209 | * .end will indicate the range, and .new_tlb_gen will be set | |
210 | * such that the changes between generation .new_tlb_gen-1 and | |
211 | * .new_tlb_gen are entirely contained in the indicated range. | |
212 | * | |
213 | * - Fully flush all mms whose tlb_gens have been updated. .mm | |
214 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen | |
215 | * will be zero. | |
216 | */ | |
217 | struct mm_struct *mm; | |
218 | unsigned long start; | |
219 | unsigned long end; | |
220 | u64 new_tlb_gen; | |
4c1ba392 NA |
221 | unsigned int initiating_cpu; |
222 | u8 stride_shift; | |
223 | u8 freed_tables; | |
a2055abe AL |
224 | }; |
225 | ||
bfe3d8f6 TG |
226 | void flush_tlb_local(void); |
227 | void flush_tlb_one_user(unsigned long addr); | |
228 | void flush_tlb_one_kernel(unsigned long addr); | |
4ce94eab | 229 | void flush_tlb_multi(const struct cpumask *cpumask, |
bfe3d8f6 TG |
230 | const struct flush_tlb_info *info); |
231 | ||
232 | #ifdef CONFIG_PARAVIRT | |
233 | #include <asm/paravirt.h> | |
234 | #endif | |
235 | ||
016c4d92 RR |
236 | #define flush_tlb_mm(mm) \ |
237 | flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) | |
611ae8e3 | 238 | |
a31acd3e PZ |
239 | #define flush_tlb_range(vma, start, end) \ |
240 | flush_tlb_mm_range((vma)->vm_mm, start, end, \ | |
241 | ((vma)->vm_flags & VM_HUGETLB) \ | |
242 | ? huge_page_shift(hstate_vma(vma)) \ | |
016c4d92 | 243 | : PAGE_SHIFT, false) |
611ae8e3 | 244 | |
d291cf83 | 245 | extern void flush_tlb_all(void); |
611ae8e3 | 246 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
016c4d92 RR |
247 | unsigned long end, unsigned int stride_shift, |
248 | bool freed_tables); | |
effee4b9 | 249 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
d291cf83 | 250 | |
ca6c99c0 AL |
251 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
252 | { | |
016c4d92 | 253 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); |
ca6c99c0 AL |
254 | } |
255 | ||
0a126abd PZ |
256 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
257 | { | |
258 | /* | |
259 | * Bump the generation count. This also serves as a full barrier | |
260 | * that synchronizes with switch_mm(): callers are required to order | |
261 | * their read of mm_cpumask after their writes to the paging | |
262 | * structures. | |
263 | */ | |
264 | return atomic64_inc_return(&mm->context.tlb_gen); | |
265 | } | |
266 | ||
e73ad5ff AL |
267 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
268 | struct mm_struct *mm) | |
269 | { | |
f39681ed | 270 | inc_mm_tlb_gen(mm); |
e73ad5ff AL |
271 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
272 | } | |
273 | ||
274 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); | |
275 | ||
c9fe6656 NA |
276 | static inline bool pte_flags_need_flush(unsigned long oldflags, |
277 | unsigned long newflags, | |
278 | bool ignore_access) | |
279 | { | |
280 | /* | |
281 | * Flags that require a flush when cleared but not when they are set. | |
282 | * Only include flags that would not trigger spurious page-faults. | |
283 | * Non-present entries are not cached. Hardware would set the | |
284 | * dirty/access bit if needed without a fault. | |
285 | */ | |
286 | const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT | | |
287 | _PAGE_ACCESSED; | |
288 | const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 | | |
289 | _PAGE_SOFTW3 | _PAGE_SOFTW4; | |
290 | const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT | | |
291 | _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT | | |
292 | _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 | | |
293 | _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX; | |
294 | unsigned long diff = oldflags ^ newflags; | |
295 | ||
296 | BUILD_BUG_ON(flush_on_clear & software_flags); | |
297 | BUILD_BUG_ON(flush_on_clear & flush_on_change); | |
298 | BUILD_BUG_ON(flush_on_change & software_flags); | |
299 | ||
300 | /* Ignore software flags */ | |
301 | diff &= ~software_flags; | |
302 | ||
303 | if (ignore_access) | |
304 | diff &= ~_PAGE_ACCESSED; | |
305 | ||
306 | /* | |
307 | * Did any of the 'flush_on_clear' flags was clleared set from between | |
308 | * 'oldflags' and 'newflags'? | |
309 | */ | |
310 | if (diff & oldflags & flush_on_clear) | |
311 | return true; | |
312 | ||
313 | /* Flush on modified flags. */ | |
314 | if (diff & flush_on_change) | |
315 | return true; | |
316 | ||
317 | /* Ensure there are no flags that were left behind */ | |
318 | if (IS_ENABLED(CONFIG_DEBUG_VM) && | |
319 | (diff & ~(flush_on_clear | software_flags | flush_on_change))) { | |
320 | VM_WARN_ON_ONCE(1); | |
321 | return true; | |
322 | } | |
323 | ||
324 | return false; | |
325 | } | |
326 | ||
327 | /* | |
328 | * pte_needs_flush() checks whether permissions were demoted and require a | |
329 | * flush. It should only be used for userspace PTEs. | |
330 | */ | |
331 | static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) | |
332 | { | |
333 | /* !PRESENT -> * ; no need for flush */ | |
334 | if (!(pte_flags(oldpte) & _PAGE_PRESENT)) | |
335 | return false; | |
336 | ||
337 | /* PFN changed ; needs flush */ | |
338 | if (pte_pfn(oldpte) != pte_pfn(newpte)) | |
339 | return true; | |
340 | ||
341 | /* | |
342 | * check PTE flags; ignore access-bit; see comment in | |
343 | * ptep_clear_flush_young(). | |
344 | */ | |
345 | return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte), | |
346 | true); | |
347 | } | |
348 | #define pte_needs_flush pte_needs_flush | |
349 | ||
350 | /* | |
351 | * huge_pmd_needs_flush() checks whether permissions were demoted and require a | |
352 | * flush. It should only be used for userspace huge PMDs. | |
353 | */ | |
354 | static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) | |
355 | { | |
356 | /* !PRESENT -> * ; no need for flush */ | |
357 | if (!(pmd_flags(oldpmd) & _PAGE_PRESENT)) | |
358 | return false; | |
359 | ||
360 | /* PFN changed ; needs flush */ | |
361 | if (pmd_pfn(oldpmd) != pmd_pfn(newpmd)) | |
362 | return true; | |
363 | ||
364 | /* | |
365 | * check PMD flags; do not ignore access-bit; see | |
366 | * pmdp_clear_flush_young(). | |
367 | */ | |
368 | return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd), | |
369 | false); | |
370 | } | |
371 | #define huge_pmd_needs_flush huge_pmd_needs_flush | |
372 | ||
82721d8b KS |
373 | #ifdef CONFIG_ADDRESS_MASKING |
374 | static inline u64 tlbstate_lam_cr3_mask(void) | |
375 | { | |
376 | u64 lam = this_cpu_read(cpu_tlbstate.lam); | |
377 | ||
378 | return lam << X86_CR3_LAM_U57_BIT; | |
379 | } | |
380 | ||
381 | static inline void set_tlbstate_lam_mode(struct mm_struct *mm) | |
382 | { | |
383 | this_cpu_write(cpu_tlbstate.lam, | |
384 | mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT); | |
74c228d2 | 385 | this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask); |
82721d8b KS |
386 | } |
387 | ||
388 | #else | |
389 | ||
390 | static inline u64 tlbstate_lam_cr3_mask(void) | |
391 | { | |
392 | return 0; | |
393 | } | |
394 | ||
395 | static inline void set_tlbstate_lam_mode(struct mm_struct *mm) | |
396 | { | |
397 | } | |
398 | #endif | |
bfe3d8f6 TG |
399 | #endif /* !MODULE */ |
400 | ||
f154f290 JR |
401 | static inline void __native_tlb_flush_global(unsigned long cr4) |
402 | { | |
403 | native_write_cr4(cr4 ^ X86_CR4_PGE); | |
404 | native_write_cr4(cr4); | |
405 | } | |
1965aae3 | 406 | #endif /* _ASM_X86_TLBFLUSH_H */ |